1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Overview: 4 * This is the generic MTD driver for NAND flash devices. It should be 5 * capable of working with almost all NAND chips currently available. 6 * 7 * Additional technical information is available on 8 * http://www.linux-mtd.infradead.org/doc/nand.html 9 * 10 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com) 11 * 2002-2006 Thomas Gleixner (tglx@linutronix.de) 12 * 13 * Credits: 14 * David Woodhouse for adding multichip support 15 * 16 * Aleph One Ltd. and Toby Churchill Ltd. for supporting the 17 * rework for 2K page size chips 18 * 19 * TODO: 20 * Enable cached programming for 2k page size chips 21 * Check, if mtd->ecctype should be set to MTD_ECC_HW 22 * if we have HW ECC support. 23 * BBT table is not serialized, has to be fixed 24 */ 25 26 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 27 28 #include <linux/module.h> 29 #include <linux/delay.h> 30 #include <linux/errno.h> 31 #include <linux/err.h> 32 #include <linux/sched.h> 33 #include <linux/slab.h> 34 #include <linux/mm.h> 35 #include <linux/types.h> 36 #include <linux/mtd/mtd.h> 37 #include <linux/mtd/nand.h> 38 #include <linux/mtd/nand-ecc-sw-hamming.h> 39 #include <linux/mtd/nand-ecc-sw-bch.h> 40 #include <linux/interrupt.h> 41 #include <linux/bitops.h> 42 #include <linux/io.h> 43 #include <linux/mtd/partitions.h> 44 #include <linux/of.h> 45 #include <linux/of_gpio.h> 46 #include <linux/gpio/consumer.h> 47 48 #include "internals.h" 49 50 static int nand_pairing_dist3_get_info(struct mtd_info *mtd, int page, 51 struct mtd_pairing_info *info) 52 { 53 int lastpage = (mtd->erasesize / mtd->writesize) - 1; 54 int dist = 3; 55 56 if (page == lastpage) 57 dist = 2; 58 59 if (!page || (page & 1)) { 60 info->group = 0; 61 info->pair = (page + 1) / 2; 62 } else { 63 info->group = 1; 64 info->pair = (page + 1 - dist) / 2; 65 } 66 67 return 0; 68 } 69 70 static int nand_pairing_dist3_get_wunit(struct mtd_info *mtd, 71 const struct mtd_pairing_info *info) 72 { 73 int lastpair = ((mtd->erasesize / mtd->writesize) - 1) / 2; 74 int page = info->pair * 2; 75 int dist = 3; 76 77 if (!info->group && !info->pair) 78 return 0; 79 80 if (info->pair == lastpair && info->group) 81 dist = 2; 82 83 if (!info->group) 84 page--; 85 else if (info->pair) 86 page += dist - 1; 87 88 if (page >= mtd->erasesize / mtd->writesize) 89 return -EINVAL; 90 91 return page; 92 } 93 94 const struct mtd_pairing_scheme dist3_pairing_scheme = { 95 .ngroups = 2, 96 .get_info = nand_pairing_dist3_get_info, 97 .get_wunit = nand_pairing_dist3_get_wunit, 98 }; 99 100 static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len) 101 { 102 int ret = 0; 103 104 /* Start address must align on block boundary */ 105 if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) { 106 pr_debug("%s: unaligned address\n", __func__); 107 ret = -EINVAL; 108 } 109 110 /* Length must align on block boundary */ 111 if (len & ((1ULL << chip->phys_erase_shift) - 1)) { 112 pr_debug("%s: length not block aligned\n", __func__); 113 ret = -EINVAL; 114 } 115 116 return ret; 117 } 118 119 /** 120 * nand_extract_bits - Copy unaligned bits from one buffer to another one 121 * @dst: destination buffer 122 * @dst_off: bit offset at which the writing starts 123 * @src: source buffer 124 * @src_off: bit offset at which the reading starts 125 * @nbits: number of bits to copy from @src to @dst 126 * 127 * Copy bits from one memory region to another (overlap authorized). 128 */ 129 void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src, 130 unsigned int src_off, unsigned int nbits) 131 { 132 unsigned int tmp, n; 133 134 dst += dst_off / 8; 135 dst_off %= 8; 136 src += src_off / 8; 137 src_off %= 8; 138 139 while (nbits) { 140 n = min3(8 - dst_off, 8 - src_off, nbits); 141 142 tmp = (*src >> src_off) & GENMASK(n - 1, 0); 143 *dst &= ~GENMASK(n - 1 + dst_off, dst_off); 144 *dst |= tmp << dst_off; 145 146 dst_off += n; 147 if (dst_off >= 8) { 148 dst++; 149 dst_off -= 8; 150 } 151 152 src_off += n; 153 if (src_off >= 8) { 154 src++; 155 src_off -= 8; 156 } 157 158 nbits -= n; 159 } 160 } 161 EXPORT_SYMBOL_GPL(nand_extract_bits); 162 163 /** 164 * nand_select_target() - Select a NAND target (A.K.A. die) 165 * @chip: NAND chip object 166 * @cs: the CS line to select. Note that this CS id is always from the chip 167 * PoV, not the controller one 168 * 169 * Select a NAND target so that further operations executed on @chip go to the 170 * selected NAND target. 171 */ 172 void nand_select_target(struct nand_chip *chip, unsigned int cs) 173 { 174 /* 175 * cs should always lie between 0 and nanddev_ntargets(), when that's 176 * not the case it's a bug and the caller should be fixed. 177 */ 178 if (WARN_ON(cs > nanddev_ntargets(&chip->base))) 179 return; 180 181 chip->cur_cs = cs; 182 183 if (chip->legacy.select_chip) 184 chip->legacy.select_chip(chip, cs); 185 } 186 EXPORT_SYMBOL_GPL(nand_select_target); 187 188 /** 189 * nand_deselect_target() - Deselect the currently selected target 190 * @chip: NAND chip object 191 * 192 * Deselect the currently selected NAND target. The result of operations 193 * executed on @chip after the target has been deselected is undefined. 194 */ 195 void nand_deselect_target(struct nand_chip *chip) 196 { 197 if (chip->legacy.select_chip) 198 chip->legacy.select_chip(chip, -1); 199 200 chip->cur_cs = -1; 201 } 202 EXPORT_SYMBOL_GPL(nand_deselect_target); 203 204 /** 205 * nand_release_device - [GENERIC] release chip 206 * @chip: NAND chip object 207 * 208 * Release chip lock and wake up anyone waiting on the device. 209 */ 210 static void nand_release_device(struct nand_chip *chip) 211 { 212 /* Release the controller and the chip */ 213 mutex_unlock(&chip->controller->lock); 214 mutex_unlock(&chip->lock); 215 } 216 217 /** 218 * nand_bbm_get_next_page - Get the next page for bad block markers 219 * @chip: NAND chip object 220 * @page: First page to start checking for bad block marker usage 221 * 222 * Returns an integer that corresponds to the page offset within a block, for 223 * a page that is used to store bad block markers. If no more pages are 224 * available, -EINVAL is returned. 225 */ 226 int nand_bbm_get_next_page(struct nand_chip *chip, int page) 227 { 228 struct mtd_info *mtd = nand_to_mtd(chip); 229 int last_page = ((mtd->erasesize - mtd->writesize) >> 230 chip->page_shift) & chip->pagemask; 231 unsigned int bbm_flags = NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE 232 | NAND_BBM_LASTPAGE; 233 234 if (page == 0 && !(chip->options & bbm_flags)) 235 return 0; 236 if (page == 0 && chip->options & NAND_BBM_FIRSTPAGE) 237 return 0; 238 if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE) 239 return 1; 240 if (page <= last_page && chip->options & NAND_BBM_LASTPAGE) 241 return last_page; 242 243 return -EINVAL; 244 } 245 246 /** 247 * nand_block_bad - [DEFAULT] Read bad block marker from the chip 248 * @chip: NAND chip object 249 * @ofs: offset from device start 250 * 251 * Check, if the block is bad. 252 */ 253 static int nand_block_bad(struct nand_chip *chip, loff_t ofs) 254 { 255 int first_page, page_offset; 256 int res; 257 u8 bad; 258 259 first_page = (int)(ofs >> chip->page_shift) & chip->pagemask; 260 page_offset = nand_bbm_get_next_page(chip, 0); 261 262 while (page_offset >= 0) { 263 res = chip->ecc.read_oob(chip, first_page + page_offset); 264 if (res < 0) 265 return res; 266 267 bad = chip->oob_poi[chip->badblockpos]; 268 269 if (likely(chip->badblockbits == 8)) 270 res = bad != 0xFF; 271 else 272 res = hweight8(bad) < chip->badblockbits; 273 if (res) 274 return res; 275 276 page_offset = nand_bbm_get_next_page(chip, page_offset + 1); 277 } 278 279 return 0; 280 } 281 282 /** 283 * nand_region_is_secured() - Check if the region is secured 284 * @chip: NAND chip object 285 * @offset: Offset of the region to check 286 * @size: Size of the region to check 287 * 288 * Checks if the region is secured by comparing the offset and size with the 289 * list of secure regions obtained from DT. Returns true if the region is 290 * secured else false. 291 */ 292 static bool nand_region_is_secured(struct nand_chip *chip, loff_t offset, u64 size) 293 { 294 int i; 295 296 /* Skip touching the secure regions if present */ 297 for (i = 0; i < chip->nr_secure_regions; i++) { 298 const struct nand_secure_region *region = &chip->secure_regions[i]; 299 300 if (offset + size <= region->offset || 301 offset >= region->offset + region->size) 302 continue; 303 304 pr_debug("%s: Region 0x%llx - 0x%llx is secured!", 305 __func__, offset, offset + size); 306 307 return true; 308 } 309 310 return false; 311 } 312 313 static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs) 314 { 315 struct mtd_info *mtd = nand_to_mtd(chip); 316 317 if (chip->options & NAND_NO_BBM_QUIRK) 318 return 0; 319 320 /* Check if the region is secured */ 321 if (nand_region_is_secured(chip, ofs, mtd->erasesize)) 322 return -EIO; 323 324 if (chip->legacy.block_bad) 325 return chip->legacy.block_bad(chip, ofs); 326 327 return nand_block_bad(chip, ofs); 328 } 329 330 /** 331 * nand_get_device - [GENERIC] Get chip for selected access 332 * @chip: NAND chip structure 333 * 334 * Lock the device and its controller for exclusive access 335 * 336 * Return: -EBUSY if the chip has been suspended, 0 otherwise 337 */ 338 static int nand_get_device(struct nand_chip *chip) 339 { 340 mutex_lock(&chip->lock); 341 if (chip->suspended) { 342 mutex_unlock(&chip->lock); 343 return -EBUSY; 344 } 345 mutex_lock(&chip->controller->lock); 346 347 return 0; 348 } 349 350 /** 351 * nand_check_wp - [GENERIC] check if the chip is write protected 352 * @chip: NAND chip object 353 * 354 * Check, if the device is write protected. The function expects, that the 355 * device is already selected. 356 */ 357 static int nand_check_wp(struct nand_chip *chip) 358 { 359 u8 status; 360 int ret; 361 362 /* Broken xD cards report WP despite being writable */ 363 if (chip->options & NAND_BROKEN_XD) 364 return 0; 365 366 /* Check the WP bit */ 367 ret = nand_status_op(chip, &status); 368 if (ret) 369 return ret; 370 371 return status & NAND_STATUS_WP ? 0 : 1; 372 } 373 374 /** 375 * nand_fill_oob - [INTERN] Transfer client buffer to oob 376 * @chip: NAND chip object 377 * @oob: oob data buffer 378 * @len: oob data write length 379 * @ops: oob ops structure 380 */ 381 static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len, 382 struct mtd_oob_ops *ops) 383 { 384 struct mtd_info *mtd = nand_to_mtd(chip); 385 int ret; 386 387 /* 388 * Initialise to all 0xFF, to avoid the possibility of left over OOB 389 * data from a previous OOB read. 390 */ 391 memset(chip->oob_poi, 0xff, mtd->oobsize); 392 393 switch (ops->mode) { 394 395 case MTD_OPS_PLACE_OOB: 396 case MTD_OPS_RAW: 397 memcpy(chip->oob_poi + ops->ooboffs, oob, len); 398 return oob + len; 399 400 case MTD_OPS_AUTO_OOB: 401 ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi, 402 ops->ooboffs, len); 403 BUG_ON(ret); 404 return oob + len; 405 406 default: 407 BUG(); 408 } 409 return NULL; 410 } 411 412 /** 413 * nand_do_write_oob - [MTD Interface] NAND write out-of-band 414 * @chip: NAND chip object 415 * @to: offset to write to 416 * @ops: oob operation description structure 417 * 418 * NAND write out-of-band. 419 */ 420 static int nand_do_write_oob(struct nand_chip *chip, loff_t to, 421 struct mtd_oob_ops *ops) 422 { 423 struct mtd_info *mtd = nand_to_mtd(chip); 424 int chipnr, page, status, len, ret; 425 426 pr_debug("%s: to = 0x%08x, len = %i\n", 427 __func__, (unsigned int)to, (int)ops->ooblen); 428 429 len = mtd_oobavail(mtd, ops); 430 431 /* Do not allow write past end of page */ 432 if ((ops->ooboffs + ops->ooblen) > len) { 433 pr_debug("%s: attempt to write past end of page\n", 434 __func__); 435 return -EINVAL; 436 } 437 438 /* Check if the region is secured */ 439 if (nand_region_is_secured(chip, to, ops->ooblen)) 440 return -EIO; 441 442 chipnr = (int)(to >> chip->chip_shift); 443 444 /* 445 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one 446 * of my DiskOnChip 2000 test units) will clear the whole data page too 447 * if we don't do this. I have no clue why, but I seem to have 'fixed' 448 * it in the doc2000 driver in August 1999. dwmw2. 449 */ 450 ret = nand_reset(chip, chipnr); 451 if (ret) 452 return ret; 453 454 nand_select_target(chip, chipnr); 455 456 /* Shift to get page */ 457 page = (int)(to >> chip->page_shift); 458 459 /* Check, if it is write protected */ 460 if (nand_check_wp(chip)) { 461 nand_deselect_target(chip); 462 return -EROFS; 463 } 464 465 /* Invalidate the page cache, if we write to the cached page */ 466 if (page == chip->pagecache.page) 467 chip->pagecache.page = -1; 468 469 nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops); 470 471 if (ops->mode == MTD_OPS_RAW) 472 status = chip->ecc.write_oob_raw(chip, page & chip->pagemask); 473 else 474 status = chip->ecc.write_oob(chip, page & chip->pagemask); 475 476 nand_deselect_target(chip); 477 478 if (status) 479 return status; 480 481 ops->oobretlen = ops->ooblen; 482 483 return 0; 484 } 485 486 /** 487 * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker 488 * @chip: NAND chip object 489 * @ofs: offset from device start 490 * 491 * This is the default implementation, which can be overridden by a hardware 492 * specific driver. It provides the details for writing a bad block marker to a 493 * block. 494 */ 495 static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs) 496 { 497 struct mtd_info *mtd = nand_to_mtd(chip); 498 struct mtd_oob_ops ops; 499 uint8_t buf[2] = { 0, 0 }; 500 int ret = 0, res, page_offset; 501 502 memset(&ops, 0, sizeof(ops)); 503 ops.oobbuf = buf; 504 ops.ooboffs = chip->badblockpos; 505 if (chip->options & NAND_BUSWIDTH_16) { 506 ops.ooboffs &= ~0x01; 507 ops.len = ops.ooblen = 2; 508 } else { 509 ops.len = ops.ooblen = 1; 510 } 511 ops.mode = MTD_OPS_PLACE_OOB; 512 513 page_offset = nand_bbm_get_next_page(chip, 0); 514 515 while (page_offset >= 0) { 516 res = nand_do_write_oob(chip, 517 ofs + (page_offset * mtd->writesize), 518 &ops); 519 520 if (!ret) 521 ret = res; 522 523 page_offset = nand_bbm_get_next_page(chip, page_offset + 1); 524 } 525 526 return ret; 527 } 528 529 /** 530 * nand_markbad_bbm - mark a block by updating the BBM 531 * @chip: NAND chip object 532 * @ofs: offset of the block to mark bad 533 */ 534 int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs) 535 { 536 if (chip->legacy.block_markbad) 537 return chip->legacy.block_markbad(chip, ofs); 538 539 return nand_default_block_markbad(chip, ofs); 540 } 541 542 /** 543 * nand_block_markbad_lowlevel - mark a block bad 544 * @chip: NAND chip object 545 * @ofs: offset from device start 546 * 547 * This function performs the generic NAND bad block marking steps (i.e., bad 548 * block table(s) and/or marker(s)). We only allow the hardware driver to 549 * specify how to write bad block markers to OOB (chip->legacy.block_markbad). 550 * 551 * We try operations in the following order: 552 * 553 * (1) erase the affected block, to allow OOB marker to be written cleanly 554 * (2) write bad block marker to OOB area of affected block (unless flag 555 * NAND_BBT_NO_OOB_BBM is present) 556 * (3) update the BBT 557 * 558 * Note that we retain the first error encountered in (2) or (3), finish the 559 * procedures, and dump the error in the end. 560 */ 561 static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs) 562 { 563 struct mtd_info *mtd = nand_to_mtd(chip); 564 int res, ret = 0; 565 566 if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) { 567 struct erase_info einfo; 568 569 /* Attempt erase before marking OOB */ 570 memset(&einfo, 0, sizeof(einfo)); 571 einfo.addr = ofs; 572 einfo.len = 1ULL << chip->phys_erase_shift; 573 nand_erase_nand(chip, &einfo, 0); 574 575 /* Write bad block marker to OOB */ 576 ret = nand_get_device(chip); 577 if (ret) 578 return ret; 579 580 ret = nand_markbad_bbm(chip, ofs); 581 nand_release_device(chip); 582 } 583 584 /* Mark block bad in BBT */ 585 if (chip->bbt) { 586 res = nand_markbad_bbt(chip, ofs); 587 if (!ret) 588 ret = res; 589 } 590 591 if (!ret) 592 mtd->ecc_stats.badblocks++; 593 594 return ret; 595 } 596 597 /** 598 * nand_block_isreserved - [GENERIC] Check if a block is marked reserved. 599 * @mtd: MTD device structure 600 * @ofs: offset from device start 601 * 602 * Check if the block is marked as reserved. 603 */ 604 static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs) 605 { 606 struct nand_chip *chip = mtd_to_nand(mtd); 607 608 if (!chip->bbt) 609 return 0; 610 /* Return info from the table */ 611 return nand_isreserved_bbt(chip, ofs); 612 } 613 614 /** 615 * nand_block_checkbad - [GENERIC] Check if a block is marked bad 616 * @chip: NAND chip object 617 * @ofs: offset from device start 618 * @allowbbt: 1, if its allowed to access the bbt area 619 * 620 * Check, if the block is bad. Either by reading the bad block table or 621 * calling of the scan function. 622 */ 623 static int nand_block_checkbad(struct nand_chip *chip, loff_t ofs, int allowbbt) 624 { 625 /* Return info from the table */ 626 if (chip->bbt) 627 return nand_isbad_bbt(chip, ofs, allowbbt); 628 629 return nand_isbad_bbm(chip, ofs); 630 } 631 632 /** 633 * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1 634 * @chip: NAND chip structure 635 * @timeout_ms: Timeout in ms 636 * 637 * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1. 638 * If that does not happen whitin the specified timeout, -ETIMEDOUT is 639 * returned. 640 * 641 * This helper is intended to be used when the controller does not have access 642 * to the NAND R/B pin. 643 * 644 * Be aware that calling this helper from an ->exec_op() implementation means 645 * ->exec_op() must be re-entrant. 646 * 647 * Return 0 if the NAND chip is ready, a negative error otherwise. 648 */ 649 int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms) 650 { 651 const struct nand_interface_config *conf; 652 u8 status = 0; 653 int ret; 654 655 if (!nand_has_exec_op(chip)) 656 return -ENOTSUPP; 657 658 /* Wait tWB before polling the STATUS reg. */ 659 conf = nand_get_interface_config(chip); 660 ndelay(NAND_COMMON_TIMING_NS(conf, tWB_max)); 661 662 ret = nand_status_op(chip, NULL); 663 if (ret) 664 return ret; 665 666 /* 667 * +1 below is necessary because if we are now in the last fraction 668 * of jiffy and msecs_to_jiffies is 1 then we will wait only that 669 * small jiffy fraction - possibly leading to false timeout 670 */ 671 timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1; 672 do { 673 ret = nand_read_data_op(chip, &status, sizeof(status), true, 674 false); 675 if (ret) 676 break; 677 678 if (status & NAND_STATUS_READY) 679 break; 680 681 /* 682 * Typical lowest execution time for a tR on most NANDs is 10us, 683 * use this as polling delay before doing something smarter (ie. 684 * deriving a delay from the timeout value, timeout_ms/ratio). 685 */ 686 udelay(10); 687 } while (time_before(jiffies, timeout_ms)); 688 689 /* 690 * We have to exit READ_STATUS mode in order to read real data on the 691 * bus in case the WAITRDY instruction is preceding a DATA_IN 692 * instruction. 693 */ 694 nand_exit_status_op(chip); 695 696 if (ret) 697 return ret; 698 699 return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT; 700 }; 701 EXPORT_SYMBOL_GPL(nand_soft_waitrdy); 702 703 /** 704 * nand_gpio_waitrdy - Poll R/B GPIO pin until ready 705 * @chip: NAND chip structure 706 * @gpiod: GPIO descriptor of R/B pin 707 * @timeout_ms: Timeout in ms 708 * 709 * Poll the R/B GPIO pin until it becomes ready. If that does not happen 710 * whitin the specified timeout, -ETIMEDOUT is returned. 711 * 712 * This helper is intended to be used when the controller has access to the 713 * NAND R/B pin over GPIO. 714 * 715 * Return 0 if the R/B pin indicates chip is ready, a negative error otherwise. 716 */ 717 int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod, 718 unsigned long timeout_ms) 719 { 720 721 /* 722 * Wait until R/B pin indicates chip is ready or timeout occurs. 723 * +1 below is necessary because if we are now in the last fraction 724 * of jiffy and msecs_to_jiffies is 1 then we will wait only that 725 * small jiffy fraction - possibly leading to false timeout. 726 */ 727 timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1; 728 do { 729 if (gpiod_get_value_cansleep(gpiod)) 730 return 0; 731 732 cond_resched(); 733 } while (time_before(jiffies, timeout_ms)); 734 735 return gpiod_get_value_cansleep(gpiod) ? 0 : -ETIMEDOUT; 736 }; 737 EXPORT_SYMBOL_GPL(nand_gpio_waitrdy); 738 739 /** 740 * panic_nand_wait - [GENERIC] wait until the command is done 741 * @chip: NAND chip structure 742 * @timeo: timeout 743 * 744 * Wait for command done. This is a helper function for nand_wait used when 745 * we are in interrupt context. May happen when in panic and trying to write 746 * an oops through mtdoops. 747 */ 748 void panic_nand_wait(struct nand_chip *chip, unsigned long timeo) 749 { 750 int i; 751 for (i = 0; i < timeo; i++) { 752 if (chip->legacy.dev_ready) { 753 if (chip->legacy.dev_ready(chip)) 754 break; 755 } else { 756 int ret; 757 u8 status; 758 759 ret = nand_read_data_op(chip, &status, sizeof(status), 760 true, false); 761 if (ret) 762 return; 763 764 if (status & NAND_STATUS_READY) 765 break; 766 } 767 mdelay(1); 768 } 769 } 770 771 static bool nand_supports_get_features(struct nand_chip *chip, int addr) 772 { 773 return (chip->parameters.supports_set_get_features && 774 test_bit(addr, chip->parameters.get_feature_list)); 775 } 776 777 static bool nand_supports_set_features(struct nand_chip *chip, int addr) 778 { 779 return (chip->parameters.supports_set_get_features && 780 test_bit(addr, chip->parameters.set_feature_list)); 781 } 782 783 /** 784 * nand_reset_interface - Reset data interface and timings 785 * @chip: The NAND chip 786 * @chipnr: Internal die id 787 * 788 * Reset the Data interface and timings to ONFI mode 0. 789 * 790 * Returns 0 for success or negative error code otherwise. 791 */ 792 static int nand_reset_interface(struct nand_chip *chip, int chipnr) 793 { 794 const struct nand_controller_ops *ops = chip->controller->ops; 795 int ret; 796 797 if (!nand_controller_can_setup_interface(chip)) 798 return 0; 799 800 /* 801 * The ONFI specification says: 802 * " 803 * To transition from NV-DDR or NV-DDR2 to the SDR data 804 * interface, the host shall use the Reset (FFh) command 805 * using SDR timing mode 0. A device in any timing mode is 806 * required to recognize Reset (FFh) command issued in SDR 807 * timing mode 0. 808 * " 809 * 810 * Configure the data interface in SDR mode and set the 811 * timings to timing mode 0. 812 */ 813 814 chip->current_interface_config = nand_get_reset_interface_config(); 815 ret = ops->setup_interface(chip, chipnr, 816 chip->current_interface_config); 817 if (ret) 818 pr_err("Failed to configure data interface to SDR timing mode 0\n"); 819 820 return ret; 821 } 822 823 /** 824 * nand_setup_interface - Setup the best data interface and timings 825 * @chip: The NAND chip 826 * @chipnr: Internal die id 827 * 828 * Configure what has been reported to be the best data interface and NAND 829 * timings supported by the chip and the driver. 830 * 831 * Returns 0 for success or negative error code otherwise. 832 */ 833 static int nand_setup_interface(struct nand_chip *chip, int chipnr) 834 { 835 const struct nand_controller_ops *ops = chip->controller->ops; 836 u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { }, request; 837 int ret; 838 839 if (!nand_controller_can_setup_interface(chip)) 840 return 0; 841 842 /* 843 * A nand_reset_interface() put both the NAND chip and the NAND 844 * controller in timings mode 0. If the default mode for this chip is 845 * also 0, no need to proceed to the change again. Plus, at probe time, 846 * nand_setup_interface() uses ->set/get_features() which would 847 * fail anyway as the parameter page is not available yet. 848 */ 849 if (!chip->best_interface_config) 850 return 0; 851 852 request = chip->best_interface_config->timings.mode; 853 if (nand_interface_is_sdr(chip->best_interface_config)) 854 request |= ONFI_DATA_INTERFACE_SDR; 855 else 856 request |= ONFI_DATA_INTERFACE_NVDDR; 857 tmode_param[0] = request; 858 859 /* Change the mode on the chip side (if supported by the NAND chip) */ 860 if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) { 861 nand_select_target(chip, chipnr); 862 ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE, 863 tmode_param); 864 nand_deselect_target(chip); 865 if (ret) 866 return ret; 867 } 868 869 /* Change the mode on the controller side */ 870 ret = ops->setup_interface(chip, chipnr, chip->best_interface_config); 871 if (ret) 872 return ret; 873 874 /* Check the mode has been accepted by the chip, if supported */ 875 if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) 876 goto update_interface_config; 877 878 memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN); 879 nand_select_target(chip, chipnr); 880 ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE, 881 tmode_param); 882 nand_deselect_target(chip); 883 if (ret) 884 goto err_reset_chip; 885 886 if (request != tmode_param[0]) { 887 pr_warn("%s timing mode %d not acknowledged by the NAND chip\n", 888 nand_interface_is_nvddr(chip->best_interface_config) ? "NV-DDR" : "SDR", 889 chip->best_interface_config->timings.mode); 890 pr_debug("NAND chip would work in %s timing mode %d\n", 891 tmode_param[0] & ONFI_DATA_INTERFACE_NVDDR ? "NV-DDR" : "SDR", 892 (unsigned int)ONFI_TIMING_MODE_PARAM(tmode_param[0])); 893 goto err_reset_chip; 894 } 895 896 update_interface_config: 897 chip->current_interface_config = chip->best_interface_config; 898 899 return 0; 900 901 err_reset_chip: 902 /* 903 * Fallback to mode 0 if the chip explicitly did not ack the chosen 904 * timing mode. 905 */ 906 nand_reset_interface(chip, chipnr); 907 nand_select_target(chip, chipnr); 908 nand_reset_op(chip); 909 nand_deselect_target(chip); 910 911 return ret; 912 } 913 914 /** 915 * nand_choose_best_sdr_timings - Pick up the best SDR timings that both the 916 * NAND controller and the NAND chip support 917 * @chip: the NAND chip 918 * @iface: the interface configuration (can eventually be updated) 919 * @spec_timings: specific timings, when not fitting the ONFI specification 920 * 921 * If specific timings are provided, use them. Otherwise, retrieve supported 922 * timing modes from ONFI information. 923 */ 924 int nand_choose_best_sdr_timings(struct nand_chip *chip, 925 struct nand_interface_config *iface, 926 struct nand_sdr_timings *spec_timings) 927 { 928 const struct nand_controller_ops *ops = chip->controller->ops; 929 int best_mode = 0, mode, ret; 930 931 iface->type = NAND_SDR_IFACE; 932 933 if (spec_timings) { 934 iface->timings.sdr = *spec_timings; 935 iface->timings.mode = onfi_find_closest_sdr_mode(spec_timings); 936 937 /* Verify the controller supports the requested interface */ 938 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY, 939 iface); 940 if (!ret) { 941 chip->best_interface_config = iface; 942 return ret; 943 } 944 945 /* Fallback to slower modes */ 946 best_mode = iface->timings.mode; 947 } else if (chip->parameters.onfi) { 948 best_mode = fls(chip->parameters.onfi->sdr_timing_modes) - 1; 949 } 950 951 for (mode = best_mode; mode >= 0; mode--) { 952 onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, mode); 953 954 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY, 955 iface); 956 if (!ret) { 957 chip->best_interface_config = iface; 958 break; 959 } 960 } 961 962 return ret; 963 } 964 965 /** 966 * nand_choose_best_nvddr_timings - Pick up the best NVDDR timings that both the 967 * NAND controller and the NAND chip support 968 * @chip: the NAND chip 969 * @iface: the interface configuration (can eventually be updated) 970 * @spec_timings: specific timings, when not fitting the ONFI specification 971 * 972 * If specific timings are provided, use them. Otherwise, retrieve supported 973 * timing modes from ONFI information. 974 */ 975 int nand_choose_best_nvddr_timings(struct nand_chip *chip, 976 struct nand_interface_config *iface, 977 struct nand_nvddr_timings *spec_timings) 978 { 979 const struct nand_controller_ops *ops = chip->controller->ops; 980 int best_mode = 0, mode, ret; 981 982 iface->type = NAND_NVDDR_IFACE; 983 984 if (spec_timings) { 985 iface->timings.nvddr = *spec_timings; 986 iface->timings.mode = onfi_find_closest_nvddr_mode(spec_timings); 987 988 /* Verify the controller supports the requested interface */ 989 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY, 990 iface); 991 if (!ret) { 992 chip->best_interface_config = iface; 993 return ret; 994 } 995 996 /* Fallback to slower modes */ 997 best_mode = iface->timings.mode; 998 } else if (chip->parameters.onfi) { 999 best_mode = fls(chip->parameters.onfi->nvddr_timing_modes) - 1; 1000 } 1001 1002 for (mode = best_mode; mode >= 0; mode--) { 1003 onfi_fill_interface_config(chip, iface, NAND_NVDDR_IFACE, mode); 1004 1005 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY, 1006 iface); 1007 if (!ret) { 1008 chip->best_interface_config = iface; 1009 break; 1010 } 1011 } 1012 1013 return ret; 1014 } 1015 1016 /** 1017 * nand_choose_best_timings - Pick up the best NVDDR or SDR timings that both 1018 * NAND controller and the NAND chip support 1019 * @chip: the NAND chip 1020 * @iface: the interface configuration (can eventually be updated) 1021 * 1022 * If specific timings are provided, use them. Otherwise, retrieve supported 1023 * timing modes from ONFI information. 1024 */ 1025 static int nand_choose_best_timings(struct nand_chip *chip, 1026 struct nand_interface_config *iface) 1027 { 1028 int ret; 1029 1030 /* Try the fastest timings: NV-DDR */ 1031 ret = nand_choose_best_nvddr_timings(chip, iface, NULL); 1032 if (!ret) 1033 return 0; 1034 1035 /* Fallback to SDR timings otherwise */ 1036 return nand_choose_best_sdr_timings(chip, iface, NULL); 1037 } 1038 1039 /** 1040 * nand_choose_interface_config - find the best data interface and timings 1041 * @chip: The NAND chip 1042 * 1043 * Find the best data interface and NAND timings supported by the chip 1044 * and the driver. Eventually let the NAND manufacturer driver propose his own 1045 * set of timings. 1046 * 1047 * After this function nand_chip->interface_config is initialized with the best 1048 * timing mode available. 1049 * 1050 * Returns 0 for success or negative error code otherwise. 1051 */ 1052 static int nand_choose_interface_config(struct nand_chip *chip) 1053 { 1054 struct nand_interface_config *iface; 1055 int ret; 1056 1057 if (!nand_controller_can_setup_interface(chip)) 1058 return 0; 1059 1060 iface = kzalloc(sizeof(*iface), GFP_KERNEL); 1061 if (!iface) 1062 return -ENOMEM; 1063 1064 if (chip->ops.choose_interface_config) 1065 ret = chip->ops.choose_interface_config(chip, iface); 1066 else 1067 ret = nand_choose_best_timings(chip, iface); 1068 1069 if (ret) 1070 kfree(iface); 1071 1072 return ret; 1073 } 1074 1075 /** 1076 * nand_fill_column_cycles - fill the column cycles of an address 1077 * @chip: The NAND chip 1078 * @addrs: Array of address cycles to fill 1079 * @offset_in_page: The offset in the page 1080 * 1081 * Fills the first or the first two bytes of the @addrs field depending 1082 * on the NAND bus width and the page size. 1083 * 1084 * Returns the number of cycles needed to encode the column, or a negative 1085 * error code in case one of the arguments is invalid. 1086 */ 1087 static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs, 1088 unsigned int offset_in_page) 1089 { 1090 struct mtd_info *mtd = nand_to_mtd(chip); 1091 1092 /* Make sure the offset is less than the actual page size. */ 1093 if (offset_in_page > mtd->writesize + mtd->oobsize) 1094 return -EINVAL; 1095 1096 /* 1097 * On small page NANDs, there's a dedicated command to access the OOB 1098 * area, and the column address is relative to the start of the OOB 1099 * area, not the start of the page. Asjust the address accordingly. 1100 */ 1101 if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize) 1102 offset_in_page -= mtd->writesize; 1103 1104 /* 1105 * The offset in page is expressed in bytes, if the NAND bus is 16-bit 1106 * wide, then it must be divided by 2. 1107 */ 1108 if (chip->options & NAND_BUSWIDTH_16) { 1109 if (WARN_ON(offset_in_page % 2)) 1110 return -EINVAL; 1111 1112 offset_in_page /= 2; 1113 } 1114 1115 addrs[0] = offset_in_page; 1116 1117 /* 1118 * Small page NANDs use 1 cycle for the columns, while large page NANDs 1119 * need 2 1120 */ 1121 if (mtd->writesize <= 512) 1122 return 1; 1123 1124 addrs[1] = offset_in_page >> 8; 1125 1126 return 2; 1127 } 1128 1129 static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page, 1130 unsigned int offset_in_page, void *buf, 1131 unsigned int len) 1132 { 1133 const struct nand_interface_config *conf = 1134 nand_get_interface_config(chip); 1135 struct mtd_info *mtd = nand_to_mtd(chip); 1136 u8 addrs[4]; 1137 struct nand_op_instr instrs[] = { 1138 NAND_OP_CMD(NAND_CMD_READ0, 0), 1139 NAND_OP_ADDR(3, addrs, NAND_COMMON_TIMING_NS(conf, tWB_max)), 1140 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), 1141 NAND_COMMON_TIMING_NS(conf, tRR_min)), 1142 NAND_OP_DATA_IN(len, buf, 0), 1143 }; 1144 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1145 int ret; 1146 1147 /* Drop the DATA_IN instruction if len is set to 0. */ 1148 if (!len) 1149 op.ninstrs--; 1150 1151 if (offset_in_page >= mtd->writesize) 1152 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB; 1153 else if (offset_in_page >= 256 && 1154 !(chip->options & NAND_BUSWIDTH_16)) 1155 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1; 1156 1157 ret = nand_fill_column_cycles(chip, addrs, offset_in_page); 1158 if (ret < 0) 1159 return ret; 1160 1161 addrs[1] = page; 1162 addrs[2] = page >> 8; 1163 1164 if (chip->options & NAND_ROW_ADDR_3) { 1165 addrs[3] = page >> 16; 1166 instrs[1].ctx.addr.naddrs++; 1167 } 1168 1169 return nand_exec_op(chip, &op); 1170 } 1171 1172 static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page, 1173 unsigned int offset_in_page, void *buf, 1174 unsigned int len) 1175 { 1176 const struct nand_interface_config *conf = 1177 nand_get_interface_config(chip); 1178 u8 addrs[5]; 1179 struct nand_op_instr instrs[] = { 1180 NAND_OP_CMD(NAND_CMD_READ0, 0), 1181 NAND_OP_ADDR(4, addrs, 0), 1182 NAND_OP_CMD(NAND_CMD_READSTART, NAND_COMMON_TIMING_NS(conf, tWB_max)), 1183 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), 1184 NAND_COMMON_TIMING_NS(conf, tRR_min)), 1185 NAND_OP_DATA_IN(len, buf, 0), 1186 }; 1187 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1188 int ret; 1189 1190 /* Drop the DATA_IN instruction if len is set to 0. */ 1191 if (!len) 1192 op.ninstrs--; 1193 1194 ret = nand_fill_column_cycles(chip, addrs, offset_in_page); 1195 if (ret < 0) 1196 return ret; 1197 1198 addrs[2] = page; 1199 addrs[3] = page >> 8; 1200 1201 if (chip->options & NAND_ROW_ADDR_3) { 1202 addrs[4] = page >> 16; 1203 instrs[1].ctx.addr.naddrs++; 1204 } 1205 1206 return nand_exec_op(chip, &op); 1207 } 1208 1209 /** 1210 * nand_read_page_op - Do a READ PAGE operation 1211 * @chip: The NAND chip 1212 * @page: page to read 1213 * @offset_in_page: offset within the page 1214 * @buf: buffer used to store the data 1215 * @len: length of the buffer 1216 * 1217 * This function issues a READ PAGE operation. 1218 * This function does not select/unselect the CS line. 1219 * 1220 * Returns 0 on success, a negative error code otherwise. 1221 */ 1222 int nand_read_page_op(struct nand_chip *chip, unsigned int page, 1223 unsigned int offset_in_page, void *buf, unsigned int len) 1224 { 1225 struct mtd_info *mtd = nand_to_mtd(chip); 1226 1227 if (len && !buf) 1228 return -EINVAL; 1229 1230 if (offset_in_page + len > mtd->writesize + mtd->oobsize) 1231 return -EINVAL; 1232 1233 if (nand_has_exec_op(chip)) { 1234 if (mtd->writesize > 512) 1235 return nand_lp_exec_read_page_op(chip, page, 1236 offset_in_page, buf, 1237 len); 1238 1239 return nand_sp_exec_read_page_op(chip, page, offset_in_page, 1240 buf, len); 1241 } 1242 1243 chip->legacy.cmdfunc(chip, NAND_CMD_READ0, offset_in_page, page); 1244 if (len) 1245 chip->legacy.read_buf(chip, buf, len); 1246 1247 return 0; 1248 } 1249 EXPORT_SYMBOL_GPL(nand_read_page_op); 1250 1251 /** 1252 * nand_read_param_page_op - Do a READ PARAMETER PAGE operation 1253 * @chip: The NAND chip 1254 * @page: parameter page to read 1255 * @buf: buffer used to store the data 1256 * @len: length of the buffer 1257 * 1258 * This function issues a READ PARAMETER PAGE operation. 1259 * This function does not select/unselect the CS line. 1260 * 1261 * Returns 0 on success, a negative error code otherwise. 1262 */ 1263 int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf, 1264 unsigned int len) 1265 { 1266 unsigned int i; 1267 u8 *p = buf; 1268 1269 if (len && !buf) 1270 return -EINVAL; 1271 1272 if (nand_has_exec_op(chip)) { 1273 const struct nand_interface_config *conf = 1274 nand_get_interface_config(chip); 1275 struct nand_op_instr instrs[] = { 1276 NAND_OP_CMD(NAND_CMD_PARAM, 0), 1277 NAND_OP_ADDR(1, &page, 1278 NAND_COMMON_TIMING_NS(conf, tWB_max)), 1279 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), 1280 NAND_COMMON_TIMING_NS(conf, tRR_min)), 1281 NAND_OP_8BIT_DATA_IN(len, buf, 0), 1282 }; 1283 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1284 1285 /* Drop the DATA_IN instruction if len is set to 0. */ 1286 if (!len) 1287 op.ninstrs--; 1288 1289 return nand_exec_op(chip, &op); 1290 } 1291 1292 chip->legacy.cmdfunc(chip, NAND_CMD_PARAM, page, -1); 1293 for (i = 0; i < len; i++) 1294 p[i] = chip->legacy.read_byte(chip); 1295 1296 return 0; 1297 } 1298 1299 /** 1300 * nand_change_read_column_op - Do a CHANGE READ COLUMN operation 1301 * @chip: The NAND chip 1302 * @offset_in_page: offset within the page 1303 * @buf: buffer used to store the data 1304 * @len: length of the buffer 1305 * @force_8bit: force 8-bit bus access 1306 * 1307 * This function issues a CHANGE READ COLUMN operation. 1308 * This function does not select/unselect the CS line. 1309 * 1310 * Returns 0 on success, a negative error code otherwise. 1311 */ 1312 int nand_change_read_column_op(struct nand_chip *chip, 1313 unsigned int offset_in_page, void *buf, 1314 unsigned int len, bool force_8bit) 1315 { 1316 struct mtd_info *mtd = nand_to_mtd(chip); 1317 1318 if (len && !buf) 1319 return -EINVAL; 1320 1321 if (offset_in_page + len > mtd->writesize + mtd->oobsize) 1322 return -EINVAL; 1323 1324 /* Small page NANDs do not support column change. */ 1325 if (mtd->writesize <= 512) 1326 return -ENOTSUPP; 1327 1328 if (nand_has_exec_op(chip)) { 1329 const struct nand_interface_config *conf = 1330 nand_get_interface_config(chip); 1331 u8 addrs[2] = {}; 1332 struct nand_op_instr instrs[] = { 1333 NAND_OP_CMD(NAND_CMD_RNDOUT, 0), 1334 NAND_OP_ADDR(2, addrs, 0), 1335 NAND_OP_CMD(NAND_CMD_RNDOUTSTART, 1336 NAND_COMMON_TIMING_NS(conf, tCCS_min)), 1337 NAND_OP_DATA_IN(len, buf, 0), 1338 }; 1339 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1340 int ret; 1341 1342 ret = nand_fill_column_cycles(chip, addrs, offset_in_page); 1343 if (ret < 0) 1344 return ret; 1345 1346 /* Drop the DATA_IN instruction if len is set to 0. */ 1347 if (!len) 1348 op.ninstrs--; 1349 1350 instrs[3].ctx.data.force_8bit = force_8bit; 1351 1352 return nand_exec_op(chip, &op); 1353 } 1354 1355 chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, offset_in_page, -1); 1356 if (len) 1357 chip->legacy.read_buf(chip, buf, len); 1358 1359 return 0; 1360 } 1361 EXPORT_SYMBOL_GPL(nand_change_read_column_op); 1362 1363 /** 1364 * nand_read_oob_op - Do a READ OOB operation 1365 * @chip: The NAND chip 1366 * @page: page to read 1367 * @offset_in_oob: offset within the OOB area 1368 * @buf: buffer used to store the data 1369 * @len: length of the buffer 1370 * 1371 * This function issues a READ OOB operation. 1372 * This function does not select/unselect the CS line. 1373 * 1374 * Returns 0 on success, a negative error code otherwise. 1375 */ 1376 int nand_read_oob_op(struct nand_chip *chip, unsigned int page, 1377 unsigned int offset_in_oob, void *buf, unsigned int len) 1378 { 1379 struct mtd_info *mtd = nand_to_mtd(chip); 1380 1381 if (len && !buf) 1382 return -EINVAL; 1383 1384 if (offset_in_oob + len > mtd->oobsize) 1385 return -EINVAL; 1386 1387 if (nand_has_exec_op(chip)) 1388 return nand_read_page_op(chip, page, 1389 mtd->writesize + offset_in_oob, 1390 buf, len); 1391 1392 chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, offset_in_oob, page); 1393 if (len) 1394 chip->legacy.read_buf(chip, buf, len); 1395 1396 return 0; 1397 } 1398 EXPORT_SYMBOL_GPL(nand_read_oob_op); 1399 1400 static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page, 1401 unsigned int offset_in_page, const void *buf, 1402 unsigned int len, bool prog) 1403 { 1404 const struct nand_interface_config *conf = 1405 nand_get_interface_config(chip); 1406 struct mtd_info *mtd = nand_to_mtd(chip); 1407 u8 addrs[5] = {}; 1408 struct nand_op_instr instrs[] = { 1409 /* 1410 * The first instruction will be dropped if we're dealing 1411 * with a large page NAND and adjusted if we're dealing 1412 * with a small page NAND and the page offset is > 255. 1413 */ 1414 NAND_OP_CMD(NAND_CMD_READ0, 0), 1415 NAND_OP_CMD(NAND_CMD_SEQIN, 0), 1416 NAND_OP_ADDR(0, addrs, NAND_COMMON_TIMING_NS(conf, tADL_min)), 1417 NAND_OP_DATA_OUT(len, buf, 0), 1418 NAND_OP_CMD(NAND_CMD_PAGEPROG, 1419 NAND_COMMON_TIMING_NS(conf, tWB_max)), 1420 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max), 0), 1421 }; 1422 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1423 int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page); 1424 1425 if (naddrs < 0) 1426 return naddrs; 1427 1428 addrs[naddrs++] = page; 1429 addrs[naddrs++] = page >> 8; 1430 if (chip->options & NAND_ROW_ADDR_3) 1431 addrs[naddrs++] = page >> 16; 1432 1433 instrs[2].ctx.addr.naddrs = naddrs; 1434 1435 /* Drop the last two instructions if we're not programming the page. */ 1436 if (!prog) { 1437 op.ninstrs -= 2; 1438 /* Also drop the DATA_OUT instruction if empty. */ 1439 if (!len) 1440 op.ninstrs--; 1441 } 1442 1443 if (mtd->writesize <= 512) { 1444 /* 1445 * Small pages need some more tweaking: we have to adjust the 1446 * first instruction depending on the page offset we're trying 1447 * to access. 1448 */ 1449 if (offset_in_page >= mtd->writesize) 1450 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB; 1451 else if (offset_in_page >= 256 && 1452 !(chip->options & NAND_BUSWIDTH_16)) 1453 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1; 1454 } else { 1455 /* 1456 * Drop the first command if we're dealing with a large page 1457 * NAND. 1458 */ 1459 op.instrs++; 1460 op.ninstrs--; 1461 } 1462 1463 return nand_exec_op(chip, &op); 1464 } 1465 1466 /** 1467 * nand_prog_page_begin_op - starts a PROG PAGE operation 1468 * @chip: The NAND chip 1469 * @page: page to write 1470 * @offset_in_page: offset within the page 1471 * @buf: buffer containing the data to write to the page 1472 * @len: length of the buffer 1473 * 1474 * This function issues the first half of a PROG PAGE operation. 1475 * This function does not select/unselect the CS line. 1476 * 1477 * Returns 0 on success, a negative error code otherwise. 1478 */ 1479 int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page, 1480 unsigned int offset_in_page, const void *buf, 1481 unsigned int len) 1482 { 1483 struct mtd_info *mtd = nand_to_mtd(chip); 1484 1485 if (len && !buf) 1486 return -EINVAL; 1487 1488 if (offset_in_page + len > mtd->writesize + mtd->oobsize) 1489 return -EINVAL; 1490 1491 if (nand_has_exec_op(chip)) 1492 return nand_exec_prog_page_op(chip, page, offset_in_page, buf, 1493 len, false); 1494 1495 chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, page); 1496 1497 if (buf) 1498 chip->legacy.write_buf(chip, buf, len); 1499 1500 return 0; 1501 } 1502 EXPORT_SYMBOL_GPL(nand_prog_page_begin_op); 1503 1504 /** 1505 * nand_prog_page_end_op - ends a PROG PAGE operation 1506 * @chip: The NAND chip 1507 * 1508 * This function issues the second half of a PROG PAGE operation. 1509 * This function does not select/unselect the CS line. 1510 * 1511 * Returns 0 on success, a negative error code otherwise. 1512 */ 1513 int nand_prog_page_end_op(struct nand_chip *chip) 1514 { 1515 int ret; 1516 u8 status; 1517 1518 if (nand_has_exec_op(chip)) { 1519 const struct nand_interface_config *conf = 1520 nand_get_interface_config(chip); 1521 struct nand_op_instr instrs[] = { 1522 NAND_OP_CMD(NAND_CMD_PAGEPROG, 1523 NAND_COMMON_TIMING_NS(conf, tWB_max)), 1524 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max), 1525 0), 1526 }; 1527 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1528 1529 ret = nand_exec_op(chip, &op); 1530 if (ret) 1531 return ret; 1532 1533 ret = nand_status_op(chip, &status); 1534 if (ret) 1535 return ret; 1536 } else { 1537 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1); 1538 ret = chip->legacy.waitfunc(chip); 1539 if (ret < 0) 1540 return ret; 1541 1542 status = ret; 1543 } 1544 1545 if (status & NAND_STATUS_FAIL) 1546 return -EIO; 1547 1548 return 0; 1549 } 1550 EXPORT_SYMBOL_GPL(nand_prog_page_end_op); 1551 1552 /** 1553 * nand_prog_page_op - Do a full PROG PAGE operation 1554 * @chip: The NAND chip 1555 * @page: page to write 1556 * @offset_in_page: offset within the page 1557 * @buf: buffer containing the data to write to the page 1558 * @len: length of the buffer 1559 * 1560 * This function issues a full PROG PAGE operation. 1561 * This function does not select/unselect the CS line. 1562 * 1563 * Returns 0 on success, a negative error code otherwise. 1564 */ 1565 int nand_prog_page_op(struct nand_chip *chip, unsigned int page, 1566 unsigned int offset_in_page, const void *buf, 1567 unsigned int len) 1568 { 1569 struct mtd_info *mtd = nand_to_mtd(chip); 1570 u8 status; 1571 int ret; 1572 1573 if (!len || !buf) 1574 return -EINVAL; 1575 1576 if (offset_in_page + len > mtd->writesize + mtd->oobsize) 1577 return -EINVAL; 1578 1579 if (nand_has_exec_op(chip)) { 1580 ret = nand_exec_prog_page_op(chip, page, offset_in_page, buf, 1581 len, true); 1582 if (ret) 1583 return ret; 1584 1585 ret = nand_status_op(chip, &status); 1586 if (ret) 1587 return ret; 1588 } else { 1589 chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, 1590 page); 1591 chip->legacy.write_buf(chip, buf, len); 1592 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1); 1593 ret = chip->legacy.waitfunc(chip); 1594 if (ret < 0) 1595 return ret; 1596 1597 status = ret; 1598 } 1599 1600 if (status & NAND_STATUS_FAIL) 1601 return -EIO; 1602 1603 return 0; 1604 } 1605 EXPORT_SYMBOL_GPL(nand_prog_page_op); 1606 1607 /** 1608 * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation 1609 * @chip: The NAND chip 1610 * @offset_in_page: offset within the page 1611 * @buf: buffer containing the data to send to the NAND 1612 * @len: length of the buffer 1613 * @force_8bit: force 8-bit bus access 1614 * 1615 * This function issues a CHANGE WRITE COLUMN operation. 1616 * This function does not select/unselect the CS line. 1617 * 1618 * Returns 0 on success, a negative error code otherwise. 1619 */ 1620 int nand_change_write_column_op(struct nand_chip *chip, 1621 unsigned int offset_in_page, 1622 const void *buf, unsigned int len, 1623 bool force_8bit) 1624 { 1625 struct mtd_info *mtd = nand_to_mtd(chip); 1626 1627 if (len && !buf) 1628 return -EINVAL; 1629 1630 if (offset_in_page + len > mtd->writesize + mtd->oobsize) 1631 return -EINVAL; 1632 1633 /* Small page NANDs do not support column change. */ 1634 if (mtd->writesize <= 512) 1635 return -ENOTSUPP; 1636 1637 if (nand_has_exec_op(chip)) { 1638 const struct nand_interface_config *conf = 1639 nand_get_interface_config(chip); 1640 u8 addrs[2]; 1641 struct nand_op_instr instrs[] = { 1642 NAND_OP_CMD(NAND_CMD_RNDIN, 0), 1643 NAND_OP_ADDR(2, addrs, NAND_COMMON_TIMING_NS(conf, tCCS_min)), 1644 NAND_OP_DATA_OUT(len, buf, 0), 1645 }; 1646 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1647 int ret; 1648 1649 ret = nand_fill_column_cycles(chip, addrs, offset_in_page); 1650 if (ret < 0) 1651 return ret; 1652 1653 instrs[2].ctx.data.force_8bit = force_8bit; 1654 1655 /* Drop the DATA_OUT instruction if len is set to 0. */ 1656 if (!len) 1657 op.ninstrs--; 1658 1659 return nand_exec_op(chip, &op); 1660 } 1661 1662 chip->legacy.cmdfunc(chip, NAND_CMD_RNDIN, offset_in_page, -1); 1663 if (len) 1664 chip->legacy.write_buf(chip, buf, len); 1665 1666 return 0; 1667 } 1668 EXPORT_SYMBOL_GPL(nand_change_write_column_op); 1669 1670 /** 1671 * nand_readid_op - Do a READID operation 1672 * @chip: The NAND chip 1673 * @addr: address cycle to pass after the READID command 1674 * @buf: buffer used to store the ID 1675 * @len: length of the buffer 1676 * 1677 * This function sends a READID command and reads back the ID returned by the 1678 * NAND. 1679 * This function does not select/unselect the CS line. 1680 * 1681 * Returns 0 on success, a negative error code otherwise. 1682 */ 1683 int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf, 1684 unsigned int len) 1685 { 1686 unsigned int i; 1687 u8 *id = buf, *ddrbuf = NULL; 1688 1689 if (len && !buf) 1690 return -EINVAL; 1691 1692 if (nand_has_exec_op(chip)) { 1693 const struct nand_interface_config *conf = 1694 nand_get_interface_config(chip); 1695 struct nand_op_instr instrs[] = { 1696 NAND_OP_CMD(NAND_CMD_READID, 0), 1697 NAND_OP_ADDR(1, &addr, 1698 NAND_COMMON_TIMING_NS(conf, tADL_min)), 1699 NAND_OP_8BIT_DATA_IN(len, buf, 0), 1700 }; 1701 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1702 int ret; 1703 1704 /* READ_ID data bytes are received twice in NV-DDR mode */ 1705 if (len && nand_interface_is_nvddr(conf)) { 1706 ddrbuf = kzalloc(len * 2, GFP_KERNEL); 1707 if (!ddrbuf) 1708 return -ENOMEM; 1709 1710 instrs[2].ctx.data.len *= 2; 1711 instrs[2].ctx.data.buf.in = ddrbuf; 1712 } 1713 1714 /* Drop the DATA_IN instruction if len is set to 0. */ 1715 if (!len) 1716 op.ninstrs--; 1717 1718 ret = nand_exec_op(chip, &op); 1719 if (!ret && len && nand_interface_is_nvddr(conf)) { 1720 for (i = 0; i < len; i++) 1721 id[i] = ddrbuf[i * 2]; 1722 } 1723 1724 kfree(ddrbuf); 1725 1726 return ret; 1727 } 1728 1729 chip->legacy.cmdfunc(chip, NAND_CMD_READID, addr, -1); 1730 1731 for (i = 0; i < len; i++) 1732 id[i] = chip->legacy.read_byte(chip); 1733 1734 return 0; 1735 } 1736 EXPORT_SYMBOL_GPL(nand_readid_op); 1737 1738 /** 1739 * nand_status_op - Do a STATUS operation 1740 * @chip: The NAND chip 1741 * @status: out variable to store the NAND status 1742 * 1743 * This function sends a STATUS command and reads back the status returned by 1744 * the NAND. 1745 * This function does not select/unselect the CS line. 1746 * 1747 * Returns 0 on success, a negative error code otherwise. 1748 */ 1749 int nand_status_op(struct nand_chip *chip, u8 *status) 1750 { 1751 if (nand_has_exec_op(chip)) { 1752 const struct nand_interface_config *conf = 1753 nand_get_interface_config(chip); 1754 u8 ddrstatus[2]; 1755 struct nand_op_instr instrs[] = { 1756 NAND_OP_CMD(NAND_CMD_STATUS, 1757 NAND_COMMON_TIMING_NS(conf, tADL_min)), 1758 NAND_OP_8BIT_DATA_IN(1, status, 0), 1759 }; 1760 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1761 int ret; 1762 1763 /* The status data byte will be received twice in NV-DDR mode */ 1764 if (status && nand_interface_is_nvddr(conf)) { 1765 instrs[1].ctx.data.len *= 2; 1766 instrs[1].ctx.data.buf.in = ddrstatus; 1767 } 1768 1769 if (!status) 1770 op.ninstrs--; 1771 1772 ret = nand_exec_op(chip, &op); 1773 if (!ret && status && nand_interface_is_nvddr(conf)) 1774 *status = ddrstatus[0]; 1775 1776 return ret; 1777 } 1778 1779 chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1); 1780 if (status) 1781 *status = chip->legacy.read_byte(chip); 1782 1783 return 0; 1784 } 1785 EXPORT_SYMBOL_GPL(nand_status_op); 1786 1787 /** 1788 * nand_exit_status_op - Exit a STATUS operation 1789 * @chip: The NAND chip 1790 * 1791 * This function sends a READ0 command to cancel the effect of the STATUS 1792 * command to avoid reading only the status until a new read command is sent. 1793 * 1794 * This function does not select/unselect the CS line. 1795 * 1796 * Returns 0 on success, a negative error code otherwise. 1797 */ 1798 int nand_exit_status_op(struct nand_chip *chip) 1799 { 1800 if (nand_has_exec_op(chip)) { 1801 struct nand_op_instr instrs[] = { 1802 NAND_OP_CMD(NAND_CMD_READ0, 0), 1803 }; 1804 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1805 1806 return nand_exec_op(chip, &op); 1807 } 1808 1809 chip->legacy.cmdfunc(chip, NAND_CMD_READ0, -1, -1); 1810 1811 return 0; 1812 } 1813 1814 /** 1815 * nand_erase_op - Do an erase operation 1816 * @chip: The NAND chip 1817 * @eraseblock: block to erase 1818 * 1819 * This function sends an ERASE command and waits for the NAND to be ready 1820 * before returning. 1821 * This function does not select/unselect the CS line. 1822 * 1823 * Returns 0 on success, a negative error code otherwise. 1824 */ 1825 int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock) 1826 { 1827 unsigned int page = eraseblock << 1828 (chip->phys_erase_shift - chip->page_shift); 1829 int ret; 1830 u8 status; 1831 1832 if (nand_has_exec_op(chip)) { 1833 const struct nand_interface_config *conf = 1834 nand_get_interface_config(chip); 1835 u8 addrs[3] = { page, page >> 8, page >> 16 }; 1836 struct nand_op_instr instrs[] = { 1837 NAND_OP_CMD(NAND_CMD_ERASE1, 0), 1838 NAND_OP_ADDR(2, addrs, 0), 1839 NAND_OP_CMD(NAND_CMD_ERASE2, 1840 NAND_COMMON_TIMING_MS(conf, tWB_max)), 1841 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tBERS_max), 1842 0), 1843 }; 1844 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1845 1846 if (chip->options & NAND_ROW_ADDR_3) 1847 instrs[1].ctx.addr.naddrs++; 1848 1849 ret = nand_exec_op(chip, &op); 1850 if (ret) 1851 return ret; 1852 1853 ret = nand_status_op(chip, &status); 1854 if (ret) 1855 return ret; 1856 } else { 1857 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE1, -1, page); 1858 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE2, -1, -1); 1859 1860 ret = chip->legacy.waitfunc(chip); 1861 if (ret < 0) 1862 return ret; 1863 1864 status = ret; 1865 } 1866 1867 if (status & NAND_STATUS_FAIL) 1868 return -EIO; 1869 1870 return 0; 1871 } 1872 EXPORT_SYMBOL_GPL(nand_erase_op); 1873 1874 /** 1875 * nand_set_features_op - Do a SET FEATURES operation 1876 * @chip: The NAND chip 1877 * @feature: feature id 1878 * @data: 4 bytes of data 1879 * 1880 * This function sends a SET FEATURES command and waits for the NAND to be 1881 * ready before returning. 1882 * This function does not select/unselect the CS line. 1883 * 1884 * Returns 0 on success, a negative error code otherwise. 1885 */ 1886 static int nand_set_features_op(struct nand_chip *chip, u8 feature, 1887 const void *data) 1888 { 1889 const u8 *params = data; 1890 int i, ret; 1891 1892 if (nand_has_exec_op(chip)) { 1893 const struct nand_interface_config *conf = 1894 nand_get_interface_config(chip); 1895 struct nand_op_instr instrs[] = { 1896 NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0), 1897 NAND_OP_ADDR(1, &feature, NAND_COMMON_TIMING_NS(conf, 1898 tADL_min)), 1899 NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data, 1900 NAND_COMMON_TIMING_NS(conf, 1901 tWB_max)), 1902 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tFEAT_max), 1903 0), 1904 }; 1905 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1906 1907 return nand_exec_op(chip, &op); 1908 } 1909 1910 chip->legacy.cmdfunc(chip, NAND_CMD_SET_FEATURES, feature, -1); 1911 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i) 1912 chip->legacy.write_byte(chip, params[i]); 1913 1914 ret = chip->legacy.waitfunc(chip); 1915 if (ret < 0) 1916 return ret; 1917 1918 if (ret & NAND_STATUS_FAIL) 1919 return -EIO; 1920 1921 return 0; 1922 } 1923 1924 /** 1925 * nand_get_features_op - Do a GET FEATURES operation 1926 * @chip: The NAND chip 1927 * @feature: feature id 1928 * @data: 4 bytes of data 1929 * 1930 * This function sends a GET FEATURES command and waits for the NAND to be 1931 * ready before returning. 1932 * This function does not select/unselect the CS line. 1933 * 1934 * Returns 0 on success, a negative error code otherwise. 1935 */ 1936 static int nand_get_features_op(struct nand_chip *chip, u8 feature, 1937 void *data) 1938 { 1939 u8 *params = data, ddrbuf[ONFI_SUBFEATURE_PARAM_LEN * 2]; 1940 int i; 1941 1942 if (nand_has_exec_op(chip)) { 1943 const struct nand_interface_config *conf = 1944 nand_get_interface_config(chip); 1945 struct nand_op_instr instrs[] = { 1946 NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0), 1947 NAND_OP_ADDR(1, &feature, 1948 NAND_COMMON_TIMING_NS(conf, tWB_max)), 1949 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tFEAT_max), 1950 NAND_COMMON_TIMING_NS(conf, tRR_min)), 1951 NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN, 1952 data, 0), 1953 }; 1954 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1955 int ret; 1956 1957 /* GET_FEATURE data bytes are received twice in NV-DDR mode */ 1958 if (nand_interface_is_nvddr(conf)) { 1959 instrs[3].ctx.data.len *= 2; 1960 instrs[3].ctx.data.buf.in = ddrbuf; 1961 } 1962 1963 ret = nand_exec_op(chip, &op); 1964 if (nand_interface_is_nvddr(conf)) { 1965 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; i++) 1966 params[i] = ddrbuf[i * 2]; 1967 } 1968 1969 return ret; 1970 } 1971 1972 chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, feature, -1); 1973 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i) 1974 params[i] = chip->legacy.read_byte(chip); 1975 1976 return 0; 1977 } 1978 1979 static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms, 1980 unsigned int delay_ns) 1981 { 1982 if (nand_has_exec_op(chip)) { 1983 struct nand_op_instr instrs[] = { 1984 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(timeout_ms), 1985 PSEC_TO_NSEC(delay_ns)), 1986 }; 1987 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1988 1989 return nand_exec_op(chip, &op); 1990 } 1991 1992 /* Apply delay or wait for ready/busy pin */ 1993 if (!chip->legacy.dev_ready) 1994 udelay(chip->legacy.chip_delay); 1995 else 1996 nand_wait_ready(chip); 1997 1998 return 0; 1999 } 2000 2001 /** 2002 * nand_reset_op - Do a reset operation 2003 * @chip: The NAND chip 2004 * 2005 * This function sends a RESET command and waits for the NAND to be ready 2006 * before returning. 2007 * This function does not select/unselect the CS line. 2008 * 2009 * Returns 0 on success, a negative error code otherwise. 2010 */ 2011 int nand_reset_op(struct nand_chip *chip) 2012 { 2013 if (nand_has_exec_op(chip)) { 2014 const struct nand_interface_config *conf = 2015 nand_get_interface_config(chip); 2016 struct nand_op_instr instrs[] = { 2017 NAND_OP_CMD(NAND_CMD_RESET, 2018 NAND_COMMON_TIMING_NS(conf, tWB_max)), 2019 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tRST_max), 2020 0), 2021 }; 2022 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 2023 2024 return nand_exec_op(chip, &op); 2025 } 2026 2027 chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1); 2028 2029 return 0; 2030 } 2031 EXPORT_SYMBOL_GPL(nand_reset_op); 2032 2033 /** 2034 * nand_read_data_op - Read data from the NAND 2035 * @chip: The NAND chip 2036 * @buf: buffer used to store the data 2037 * @len: length of the buffer 2038 * @force_8bit: force 8-bit bus access 2039 * @check_only: do not actually run the command, only checks if the 2040 * controller driver supports it 2041 * 2042 * This function does a raw data read on the bus. Usually used after launching 2043 * another NAND operation like nand_read_page_op(). 2044 * This function does not select/unselect the CS line. 2045 * 2046 * Returns 0 on success, a negative error code otherwise. 2047 */ 2048 int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len, 2049 bool force_8bit, bool check_only) 2050 { 2051 if (!len || !buf) 2052 return -EINVAL; 2053 2054 if (nand_has_exec_op(chip)) { 2055 const struct nand_interface_config *conf = 2056 nand_get_interface_config(chip); 2057 struct nand_op_instr instrs[] = { 2058 NAND_OP_DATA_IN(len, buf, 0), 2059 }; 2060 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 2061 u8 *ddrbuf = NULL; 2062 int ret, i; 2063 2064 instrs[0].ctx.data.force_8bit = force_8bit; 2065 2066 /* 2067 * Parameter payloads (ID, status, features, etc) do not go 2068 * through the same pipeline as regular data, hence the 2069 * force_8bit flag must be set and this also indicates that in 2070 * case NV-DDR timings are being used the data will be received 2071 * twice. 2072 */ 2073 if (force_8bit && nand_interface_is_nvddr(conf)) { 2074 ddrbuf = kzalloc(len * 2, GFP_KERNEL); 2075 if (!ddrbuf) 2076 return -ENOMEM; 2077 2078 instrs[0].ctx.data.len *= 2; 2079 instrs[0].ctx.data.buf.in = ddrbuf; 2080 } 2081 2082 if (check_only) { 2083 ret = nand_check_op(chip, &op); 2084 kfree(ddrbuf); 2085 return ret; 2086 } 2087 2088 ret = nand_exec_op(chip, &op); 2089 if (!ret && force_8bit && nand_interface_is_nvddr(conf)) { 2090 u8 *dst = buf; 2091 2092 for (i = 0; i < len; i++) 2093 dst[i] = ddrbuf[i * 2]; 2094 } 2095 2096 kfree(ddrbuf); 2097 2098 return ret; 2099 } 2100 2101 if (check_only) 2102 return 0; 2103 2104 if (force_8bit) { 2105 u8 *p = buf; 2106 unsigned int i; 2107 2108 for (i = 0; i < len; i++) 2109 p[i] = chip->legacy.read_byte(chip); 2110 } else { 2111 chip->legacy.read_buf(chip, buf, len); 2112 } 2113 2114 return 0; 2115 } 2116 EXPORT_SYMBOL_GPL(nand_read_data_op); 2117 2118 /** 2119 * nand_write_data_op - Write data from the NAND 2120 * @chip: The NAND chip 2121 * @buf: buffer containing the data to send on the bus 2122 * @len: length of the buffer 2123 * @force_8bit: force 8-bit bus access 2124 * 2125 * This function does a raw data write on the bus. Usually used after launching 2126 * another NAND operation like nand_write_page_begin_op(). 2127 * This function does not select/unselect the CS line. 2128 * 2129 * Returns 0 on success, a negative error code otherwise. 2130 */ 2131 int nand_write_data_op(struct nand_chip *chip, const void *buf, 2132 unsigned int len, bool force_8bit) 2133 { 2134 if (!len || !buf) 2135 return -EINVAL; 2136 2137 if (nand_has_exec_op(chip)) { 2138 struct nand_op_instr instrs[] = { 2139 NAND_OP_DATA_OUT(len, buf, 0), 2140 }; 2141 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 2142 2143 instrs[0].ctx.data.force_8bit = force_8bit; 2144 2145 return nand_exec_op(chip, &op); 2146 } 2147 2148 if (force_8bit) { 2149 const u8 *p = buf; 2150 unsigned int i; 2151 2152 for (i = 0; i < len; i++) 2153 chip->legacy.write_byte(chip, p[i]); 2154 } else { 2155 chip->legacy.write_buf(chip, buf, len); 2156 } 2157 2158 return 0; 2159 } 2160 EXPORT_SYMBOL_GPL(nand_write_data_op); 2161 2162 /** 2163 * struct nand_op_parser_ctx - Context used by the parser 2164 * @instrs: array of all the instructions that must be addressed 2165 * @ninstrs: length of the @instrs array 2166 * @subop: Sub-operation to be passed to the NAND controller 2167 * 2168 * This structure is used by the core to split NAND operations into 2169 * sub-operations that can be handled by the NAND controller. 2170 */ 2171 struct nand_op_parser_ctx { 2172 const struct nand_op_instr *instrs; 2173 unsigned int ninstrs; 2174 struct nand_subop subop; 2175 }; 2176 2177 /** 2178 * nand_op_parser_must_split_instr - Checks if an instruction must be split 2179 * @pat: the parser pattern element that matches @instr 2180 * @instr: pointer to the instruction to check 2181 * @start_offset: this is an in/out parameter. If @instr has already been 2182 * split, then @start_offset is the offset from which to start 2183 * (either an address cycle or an offset in the data buffer). 2184 * Conversely, if the function returns true (ie. instr must be 2185 * split), this parameter is updated to point to the first 2186 * data/address cycle that has not been taken care of. 2187 * 2188 * Some NAND controllers are limited and cannot send X address cycles with a 2189 * unique operation, or cannot read/write more than Y bytes at the same time. 2190 * In this case, split the instruction that does not fit in a single 2191 * controller-operation into two or more chunks. 2192 * 2193 * Returns true if the instruction must be split, false otherwise. 2194 * The @start_offset parameter is also updated to the offset at which the next 2195 * bundle of instruction must start (if an address or a data instruction). 2196 */ 2197 static bool 2198 nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat, 2199 const struct nand_op_instr *instr, 2200 unsigned int *start_offset) 2201 { 2202 switch (pat->type) { 2203 case NAND_OP_ADDR_INSTR: 2204 if (!pat->ctx.addr.maxcycles) 2205 break; 2206 2207 if (instr->ctx.addr.naddrs - *start_offset > 2208 pat->ctx.addr.maxcycles) { 2209 *start_offset += pat->ctx.addr.maxcycles; 2210 return true; 2211 } 2212 break; 2213 2214 case NAND_OP_DATA_IN_INSTR: 2215 case NAND_OP_DATA_OUT_INSTR: 2216 if (!pat->ctx.data.maxlen) 2217 break; 2218 2219 if (instr->ctx.data.len - *start_offset > 2220 pat->ctx.data.maxlen) { 2221 *start_offset += pat->ctx.data.maxlen; 2222 return true; 2223 } 2224 break; 2225 2226 default: 2227 break; 2228 } 2229 2230 return false; 2231 } 2232 2233 /** 2234 * nand_op_parser_match_pat - Checks if a pattern matches the instructions 2235 * remaining in the parser context 2236 * @pat: the pattern to test 2237 * @ctx: the parser context structure to match with the pattern @pat 2238 * 2239 * Check if @pat matches the set or a sub-set of instructions remaining in @ctx. 2240 * Returns true if this is the case, false ortherwise. When true is returned, 2241 * @ctx->subop is updated with the set of instructions to be passed to the 2242 * controller driver. 2243 */ 2244 static bool 2245 nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat, 2246 struct nand_op_parser_ctx *ctx) 2247 { 2248 unsigned int instr_offset = ctx->subop.first_instr_start_off; 2249 const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs; 2250 const struct nand_op_instr *instr = ctx->subop.instrs; 2251 unsigned int i, ninstrs; 2252 2253 for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) { 2254 /* 2255 * The pattern instruction does not match the operation 2256 * instruction. If the instruction is marked optional in the 2257 * pattern definition, we skip the pattern element and continue 2258 * to the next one. If the element is mandatory, there's no 2259 * match and we can return false directly. 2260 */ 2261 if (instr->type != pat->elems[i].type) { 2262 if (!pat->elems[i].optional) 2263 return false; 2264 2265 continue; 2266 } 2267 2268 /* 2269 * Now check the pattern element constraints. If the pattern is 2270 * not able to handle the whole instruction in a single step, 2271 * we have to split it. 2272 * The last_instr_end_off value comes back updated to point to 2273 * the position where we have to split the instruction (the 2274 * start of the next subop chunk). 2275 */ 2276 if (nand_op_parser_must_split_instr(&pat->elems[i], instr, 2277 &instr_offset)) { 2278 ninstrs++; 2279 i++; 2280 break; 2281 } 2282 2283 instr++; 2284 ninstrs++; 2285 instr_offset = 0; 2286 } 2287 2288 /* 2289 * This can happen if all instructions of a pattern are optional. 2290 * Still, if there's not at least one instruction handled by this 2291 * pattern, this is not a match, and we should try the next one (if 2292 * any). 2293 */ 2294 if (!ninstrs) 2295 return false; 2296 2297 /* 2298 * We had a match on the pattern head, but the pattern may be longer 2299 * than the instructions we're asked to execute. We need to make sure 2300 * there's no mandatory elements in the pattern tail. 2301 */ 2302 for (; i < pat->nelems; i++) { 2303 if (!pat->elems[i].optional) 2304 return false; 2305 } 2306 2307 /* 2308 * We have a match: update the subop structure accordingly and return 2309 * true. 2310 */ 2311 ctx->subop.ninstrs = ninstrs; 2312 ctx->subop.last_instr_end_off = instr_offset; 2313 2314 return true; 2315 } 2316 2317 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG) 2318 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx) 2319 { 2320 const struct nand_op_instr *instr; 2321 char *prefix = " "; 2322 unsigned int i; 2323 2324 pr_debug("executing subop (CS%d):\n", ctx->subop.cs); 2325 2326 for (i = 0; i < ctx->ninstrs; i++) { 2327 instr = &ctx->instrs[i]; 2328 2329 if (instr == &ctx->subop.instrs[0]) 2330 prefix = " ->"; 2331 2332 nand_op_trace(prefix, instr); 2333 2334 if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1]) 2335 prefix = " "; 2336 } 2337 } 2338 #else 2339 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx) 2340 { 2341 /* NOP */ 2342 } 2343 #endif 2344 2345 static int nand_op_parser_cmp_ctx(const struct nand_op_parser_ctx *a, 2346 const struct nand_op_parser_ctx *b) 2347 { 2348 if (a->subop.ninstrs < b->subop.ninstrs) 2349 return -1; 2350 else if (a->subop.ninstrs > b->subop.ninstrs) 2351 return 1; 2352 2353 if (a->subop.last_instr_end_off < b->subop.last_instr_end_off) 2354 return -1; 2355 else if (a->subop.last_instr_end_off > b->subop.last_instr_end_off) 2356 return 1; 2357 2358 return 0; 2359 } 2360 2361 /** 2362 * nand_op_parser_exec_op - exec_op parser 2363 * @chip: the NAND chip 2364 * @parser: patterns description provided by the controller driver 2365 * @op: the NAND operation to address 2366 * @check_only: when true, the function only checks if @op can be handled but 2367 * does not execute the operation 2368 * 2369 * Helper function designed to ease integration of NAND controller drivers that 2370 * only support a limited set of instruction sequences. The supported sequences 2371 * are described in @parser, and the framework takes care of splitting @op into 2372 * multiple sub-operations (if required) and pass them back to the ->exec() 2373 * callback of the matching pattern if @check_only is set to false. 2374 * 2375 * NAND controller drivers should call this function from their own ->exec_op() 2376 * implementation. 2377 * 2378 * Returns 0 on success, a negative error code otherwise. A failure can be 2379 * caused by an unsupported operation (none of the supported patterns is able 2380 * to handle the requested operation), or an error returned by one of the 2381 * matching pattern->exec() hook. 2382 */ 2383 int nand_op_parser_exec_op(struct nand_chip *chip, 2384 const struct nand_op_parser *parser, 2385 const struct nand_operation *op, bool check_only) 2386 { 2387 struct nand_op_parser_ctx ctx = { 2388 .subop.cs = op->cs, 2389 .subop.instrs = op->instrs, 2390 .instrs = op->instrs, 2391 .ninstrs = op->ninstrs, 2392 }; 2393 unsigned int i; 2394 2395 while (ctx.subop.instrs < op->instrs + op->ninstrs) { 2396 const struct nand_op_parser_pattern *pattern; 2397 struct nand_op_parser_ctx best_ctx; 2398 int ret, best_pattern = -1; 2399 2400 for (i = 0; i < parser->npatterns; i++) { 2401 struct nand_op_parser_ctx test_ctx = ctx; 2402 2403 pattern = &parser->patterns[i]; 2404 if (!nand_op_parser_match_pat(pattern, &test_ctx)) 2405 continue; 2406 2407 if (best_pattern >= 0 && 2408 nand_op_parser_cmp_ctx(&test_ctx, &best_ctx) <= 0) 2409 continue; 2410 2411 best_pattern = i; 2412 best_ctx = test_ctx; 2413 } 2414 2415 if (best_pattern < 0) { 2416 pr_debug("->exec_op() parser: pattern not found!\n"); 2417 return -ENOTSUPP; 2418 } 2419 2420 ctx = best_ctx; 2421 nand_op_parser_trace(&ctx); 2422 2423 if (!check_only) { 2424 pattern = &parser->patterns[best_pattern]; 2425 ret = pattern->exec(chip, &ctx.subop); 2426 if (ret) 2427 return ret; 2428 } 2429 2430 /* 2431 * Update the context structure by pointing to the start of the 2432 * next subop. 2433 */ 2434 ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs; 2435 if (ctx.subop.last_instr_end_off) 2436 ctx.subop.instrs -= 1; 2437 2438 ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off; 2439 } 2440 2441 return 0; 2442 } 2443 EXPORT_SYMBOL_GPL(nand_op_parser_exec_op); 2444 2445 static bool nand_instr_is_data(const struct nand_op_instr *instr) 2446 { 2447 return instr && (instr->type == NAND_OP_DATA_IN_INSTR || 2448 instr->type == NAND_OP_DATA_OUT_INSTR); 2449 } 2450 2451 static bool nand_subop_instr_is_valid(const struct nand_subop *subop, 2452 unsigned int instr_idx) 2453 { 2454 return subop && instr_idx < subop->ninstrs; 2455 } 2456 2457 static unsigned int nand_subop_get_start_off(const struct nand_subop *subop, 2458 unsigned int instr_idx) 2459 { 2460 if (instr_idx) 2461 return 0; 2462 2463 return subop->first_instr_start_off; 2464 } 2465 2466 /** 2467 * nand_subop_get_addr_start_off - Get the start offset in an address array 2468 * @subop: The entire sub-operation 2469 * @instr_idx: Index of the instruction inside the sub-operation 2470 * 2471 * During driver development, one could be tempted to directly use the 2472 * ->addr.addrs field of address instructions. This is wrong as address 2473 * instructions might be split. 2474 * 2475 * Given an address instruction, returns the offset of the first cycle to issue. 2476 */ 2477 unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop, 2478 unsigned int instr_idx) 2479 { 2480 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) || 2481 subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)) 2482 return 0; 2483 2484 return nand_subop_get_start_off(subop, instr_idx); 2485 } 2486 EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off); 2487 2488 /** 2489 * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert 2490 * @subop: The entire sub-operation 2491 * @instr_idx: Index of the instruction inside the sub-operation 2492 * 2493 * During driver development, one could be tempted to directly use the 2494 * ->addr->naddrs field of a data instruction. This is wrong as instructions 2495 * might be split. 2496 * 2497 * Given an address instruction, returns the number of address cycle to issue. 2498 */ 2499 unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop, 2500 unsigned int instr_idx) 2501 { 2502 int start_off, end_off; 2503 2504 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) || 2505 subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)) 2506 return 0; 2507 2508 start_off = nand_subop_get_addr_start_off(subop, instr_idx); 2509 2510 if (instr_idx == subop->ninstrs - 1 && 2511 subop->last_instr_end_off) 2512 end_off = subop->last_instr_end_off; 2513 else 2514 end_off = subop->instrs[instr_idx].ctx.addr.naddrs; 2515 2516 return end_off - start_off; 2517 } 2518 EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc); 2519 2520 /** 2521 * nand_subop_get_data_start_off - Get the start offset in a data array 2522 * @subop: The entire sub-operation 2523 * @instr_idx: Index of the instruction inside the sub-operation 2524 * 2525 * During driver development, one could be tempted to directly use the 2526 * ->data->buf.{in,out} field of data instructions. This is wrong as data 2527 * instructions might be split. 2528 * 2529 * Given a data instruction, returns the offset to start from. 2530 */ 2531 unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop, 2532 unsigned int instr_idx) 2533 { 2534 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) || 2535 !nand_instr_is_data(&subop->instrs[instr_idx]))) 2536 return 0; 2537 2538 return nand_subop_get_start_off(subop, instr_idx); 2539 } 2540 EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off); 2541 2542 /** 2543 * nand_subop_get_data_len - Get the number of bytes to retrieve 2544 * @subop: The entire sub-operation 2545 * @instr_idx: Index of the instruction inside the sub-operation 2546 * 2547 * During driver development, one could be tempted to directly use the 2548 * ->data->len field of a data instruction. This is wrong as data instructions 2549 * might be split. 2550 * 2551 * Returns the length of the chunk of data to send/receive. 2552 */ 2553 unsigned int nand_subop_get_data_len(const struct nand_subop *subop, 2554 unsigned int instr_idx) 2555 { 2556 int start_off = 0, end_off; 2557 2558 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) || 2559 !nand_instr_is_data(&subop->instrs[instr_idx]))) 2560 return 0; 2561 2562 start_off = nand_subop_get_data_start_off(subop, instr_idx); 2563 2564 if (instr_idx == subop->ninstrs - 1 && 2565 subop->last_instr_end_off) 2566 end_off = subop->last_instr_end_off; 2567 else 2568 end_off = subop->instrs[instr_idx].ctx.data.len; 2569 2570 return end_off - start_off; 2571 } 2572 EXPORT_SYMBOL_GPL(nand_subop_get_data_len); 2573 2574 /** 2575 * nand_reset - Reset and initialize a NAND device 2576 * @chip: The NAND chip 2577 * @chipnr: Internal die id 2578 * 2579 * Save the timings data structure, then apply SDR timings mode 0 (see 2580 * nand_reset_interface for details), do the reset operation, and apply 2581 * back the previous timings. 2582 * 2583 * Returns 0 on success, a negative error code otherwise. 2584 */ 2585 int nand_reset(struct nand_chip *chip, int chipnr) 2586 { 2587 int ret; 2588 2589 ret = nand_reset_interface(chip, chipnr); 2590 if (ret) 2591 return ret; 2592 2593 /* 2594 * The CS line has to be released before we can apply the new NAND 2595 * interface settings, hence this weird nand_select_target() 2596 * nand_deselect_target() dance. 2597 */ 2598 nand_select_target(chip, chipnr); 2599 ret = nand_reset_op(chip); 2600 nand_deselect_target(chip); 2601 if (ret) 2602 return ret; 2603 2604 ret = nand_setup_interface(chip, chipnr); 2605 if (ret) 2606 return ret; 2607 2608 return 0; 2609 } 2610 EXPORT_SYMBOL_GPL(nand_reset); 2611 2612 /** 2613 * nand_get_features - wrapper to perform a GET_FEATURE 2614 * @chip: NAND chip info structure 2615 * @addr: feature address 2616 * @subfeature_param: the subfeature parameters, a four bytes array 2617 * 2618 * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the 2619 * operation cannot be handled. 2620 */ 2621 int nand_get_features(struct nand_chip *chip, int addr, 2622 u8 *subfeature_param) 2623 { 2624 if (!nand_supports_get_features(chip, addr)) 2625 return -ENOTSUPP; 2626 2627 if (chip->legacy.get_features) 2628 return chip->legacy.get_features(chip, addr, subfeature_param); 2629 2630 return nand_get_features_op(chip, addr, subfeature_param); 2631 } 2632 2633 /** 2634 * nand_set_features - wrapper to perform a SET_FEATURE 2635 * @chip: NAND chip info structure 2636 * @addr: feature address 2637 * @subfeature_param: the subfeature parameters, a four bytes array 2638 * 2639 * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the 2640 * operation cannot be handled. 2641 */ 2642 int nand_set_features(struct nand_chip *chip, int addr, 2643 u8 *subfeature_param) 2644 { 2645 if (!nand_supports_set_features(chip, addr)) 2646 return -ENOTSUPP; 2647 2648 if (chip->legacy.set_features) 2649 return chip->legacy.set_features(chip, addr, subfeature_param); 2650 2651 return nand_set_features_op(chip, addr, subfeature_param); 2652 } 2653 2654 /** 2655 * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data 2656 * @buf: buffer to test 2657 * @len: buffer length 2658 * @bitflips_threshold: maximum number of bitflips 2659 * 2660 * Check if a buffer contains only 0xff, which means the underlying region 2661 * has been erased and is ready to be programmed. 2662 * The bitflips_threshold specify the maximum number of bitflips before 2663 * considering the region is not erased. 2664 * Note: The logic of this function has been extracted from the memweight 2665 * implementation, except that nand_check_erased_buf function exit before 2666 * testing the whole buffer if the number of bitflips exceed the 2667 * bitflips_threshold value. 2668 * 2669 * Returns a positive number of bitflips less than or equal to 2670 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the 2671 * threshold. 2672 */ 2673 static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold) 2674 { 2675 const unsigned char *bitmap = buf; 2676 int bitflips = 0; 2677 int weight; 2678 2679 for (; len && ((uintptr_t)bitmap) % sizeof(long); 2680 len--, bitmap++) { 2681 weight = hweight8(*bitmap); 2682 bitflips += BITS_PER_BYTE - weight; 2683 if (unlikely(bitflips > bitflips_threshold)) 2684 return -EBADMSG; 2685 } 2686 2687 for (; len >= sizeof(long); 2688 len -= sizeof(long), bitmap += sizeof(long)) { 2689 unsigned long d = *((unsigned long *)bitmap); 2690 if (d == ~0UL) 2691 continue; 2692 weight = hweight_long(d); 2693 bitflips += BITS_PER_LONG - weight; 2694 if (unlikely(bitflips > bitflips_threshold)) 2695 return -EBADMSG; 2696 } 2697 2698 for (; len > 0; len--, bitmap++) { 2699 weight = hweight8(*bitmap); 2700 bitflips += BITS_PER_BYTE - weight; 2701 if (unlikely(bitflips > bitflips_threshold)) 2702 return -EBADMSG; 2703 } 2704 2705 return bitflips; 2706 } 2707 2708 /** 2709 * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only 2710 * 0xff data 2711 * @data: data buffer to test 2712 * @datalen: data length 2713 * @ecc: ECC buffer 2714 * @ecclen: ECC length 2715 * @extraoob: extra OOB buffer 2716 * @extraooblen: extra OOB length 2717 * @bitflips_threshold: maximum number of bitflips 2718 * 2719 * Check if a data buffer and its associated ECC and OOB data contains only 2720 * 0xff pattern, which means the underlying region has been erased and is 2721 * ready to be programmed. 2722 * The bitflips_threshold specify the maximum number of bitflips before 2723 * considering the region as not erased. 2724 * 2725 * Note: 2726 * 1/ ECC algorithms are working on pre-defined block sizes which are usually 2727 * different from the NAND page size. When fixing bitflips, ECC engines will 2728 * report the number of errors per chunk, and the NAND core infrastructure 2729 * expect you to return the maximum number of bitflips for the whole page. 2730 * This is why you should always use this function on a single chunk and 2731 * not on the whole page. After checking each chunk you should update your 2732 * max_bitflips value accordingly. 2733 * 2/ When checking for bitflips in erased pages you should not only check 2734 * the payload data but also their associated ECC data, because a user might 2735 * have programmed almost all bits to 1 but a few. In this case, we 2736 * shouldn't consider the chunk as erased, and checking ECC bytes prevent 2737 * this case. 2738 * 3/ The extraoob argument is optional, and should be used if some of your OOB 2739 * data are protected by the ECC engine. 2740 * It could also be used if you support subpages and want to attach some 2741 * extra OOB data to an ECC chunk. 2742 * 2743 * Returns a positive number of bitflips less than or equal to 2744 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the 2745 * threshold. In case of success, the passed buffers are filled with 0xff. 2746 */ 2747 int nand_check_erased_ecc_chunk(void *data, int datalen, 2748 void *ecc, int ecclen, 2749 void *extraoob, int extraooblen, 2750 int bitflips_threshold) 2751 { 2752 int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0; 2753 2754 data_bitflips = nand_check_erased_buf(data, datalen, 2755 bitflips_threshold); 2756 if (data_bitflips < 0) 2757 return data_bitflips; 2758 2759 bitflips_threshold -= data_bitflips; 2760 2761 ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold); 2762 if (ecc_bitflips < 0) 2763 return ecc_bitflips; 2764 2765 bitflips_threshold -= ecc_bitflips; 2766 2767 extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen, 2768 bitflips_threshold); 2769 if (extraoob_bitflips < 0) 2770 return extraoob_bitflips; 2771 2772 if (data_bitflips) 2773 memset(data, 0xff, datalen); 2774 2775 if (ecc_bitflips) 2776 memset(ecc, 0xff, ecclen); 2777 2778 if (extraoob_bitflips) 2779 memset(extraoob, 0xff, extraooblen); 2780 2781 return data_bitflips + ecc_bitflips + extraoob_bitflips; 2782 } 2783 EXPORT_SYMBOL(nand_check_erased_ecc_chunk); 2784 2785 /** 2786 * nand_read_page_raw_notsupp - dummy read raw page function 2787 * @chip: nand chip info structure 2788 * @buf: buffer to store read data 2789 * @oob_required: caller requires OOB data read to chip->oob_poi 2790 * @page: page number to read 2791 * 2792 * Returns -ENOTSUPP unconditionally. 2793 */ 2794 int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf, 2795 int oob_required, int page) 2796 { 2797 return -ENOTSUPP; 2798 } 2799 2800 /** 2801 * nand_read_page_raw - [INTERN] read raw page data without ecc 2802 * @chip: nand chip info structure 2803 * @buf: buffer to store read data 2804 * @oob_required: caller requires OOB data read to chip->oob_poi 2805 * @page: page number to read 2806 * 2807 * Not for syndrome calculating ECC controllers, which use a special oob layout. 2808 */ 2809 int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required, 2810 int page) 2811 { 2812 struct mtd_info *mtd = nand_to_mtd(chip); 2813 int ret; 2814 2815 ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize); 2816 if (ret) 2817 return ret; 2818 2819 if (oob_required) { 2820 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, 2821 false, false); 2822 if (ret) 2823 return ret; 2824 } 2825 2826 return 0; 2827 } 2828 EXPORT_SYMBOL(nand_read_page_raw); 2829 2830 /** 2831 * nand_monolithic_read_page_raw - Monolithic page read in raw mode 2832 * @chip: NAND chip info structure 2833 * @buf: buffer to store read data 2834 * @oob_required: caller requires OOB data read to chip->oob_poi 2835 * @page: page number to read 2836 * 2837 * This is a raw page read, ie. without any error detection/correction. 2838 * Monolithic means we are requesting all the relevant data (main plus 2839 * eventually OOB) to be loaded in the NAND cache and sent over the 2840 * bus (from the NAND chip to the NAND controller) in a single 2841 * operation. This is an alternative to nand_read_page_raw(), which 2842 * first reads the main data, and if the OOB data is requested too, 2843 * then reads more data on the bus. 2844 */ 2845 int nand_monolithic_read_page_raw(struct nand_chip *chip, u8 *buf, 2846 int oob_required, int page) 2847 { 2848 struct mtd_info *mtd = nand_to_mtd(chip); 2849 unsigned int size = mtd->writesize; 2850 u8 *read_buf = buf; 2851 int ret; 2852 2853 if (oob_required) { 2854 size += mtd->oobsize; 2855 2856 if (buf != chip->data_buf) 2857 read_buf = nand_get_data_buf(chip); 2858 } 2859 2860 ret = nand_read_page_op(chip, page, 0, read_buf, size); 2861 if (ret) 2862 return ret; 2863 2864 if (buf != chip->data_buf) 2865 memcpy(buf, read_buf, mtd->writesize); 2866 2867 return 0; 2868 } 2869 EXPORT_SYMBOL(nand_monolithic_read_page_raw); 2870 2871 /** 2872 * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc 2873 * @chip: nand chip info structure 2874 * @buf: buffer to store read data 2875 * @oob_required: caller requires OOB data read to chip->oob_poi 2876 * @page: page number to read 2877 * 2878 * We need a special oob layout and handling even when OOB isn't used. 2879 */ 2880 static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf, 2881 int oob_required, int page) 2882 { 2883 struct mtd_info *mtd = nand_to_mtd(chip); 2884 int eccsize = chip->ecc.size; 2885 int eccbytes = chip->ecc.bytes; 2886 uint8_t *oob = chip->oob_poi; 2887 int steps, size, ret; 2888 2889 ret = nand_read_page_op(chip, page, 0, NULL, 0); 2890 if (ret) 2891 return ret; 2892 2893 for (steps = chip->ecc.steps; steps > 0; steps--) { 2894 ret = nand_read_data_op(chip, buf, eccsize, false, false); 2895 if (ret) 2896 return ret; 2897 2898 buf += eccsize; 2899 2900 if (chip->ecc.prepad) { 2901 ret = nand_read_data_op(chip, oob, chip->ecc.prepad, 2902 false, false); 2903 if (ret) 2904 return ret; 2905 2906 oob += chip->ecc.prepad; 2907 } 2908 2909 ret = nand_read_data_op(chip, oob, eccbytes, false, false); 2910 if (ret) 2911 return ret; 2912 2913 oob += eccbytes; 2914 2915 if (chip->ecc.postpad) { 2916 ret = nand_read_data_op(chip, oob, chip->ecc.postpad, 2917 false, false); 2918 if (ret) 2919 return ret; 2920 2921 oob += chip->ecc.postpad; 2922 } 2923 } 2924 2925 size = mtd->oobsize - (oob - chip->oob_poi); 2926 if (size) { 2927 ret = nand_read_data_op(chip, oob, size, false, false); 2928 if (ret) 2929 return ret; 2930 } 2931 2932 return 0; 2933 } 2934 2935 /** 2936 * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function 2937 * @chip: nand chip info structure 2938 * @buf: buffer to store read data 2939 * @oob_required: caller requires OOB data read to chip->oob_poi 2940 * @page: page number to read 2941 */ 2942 static int nand_read_page_swecc(struct nand_chip *chip, uint8_t *buf, 2943 int oob_required, int page) 2944 { 2945 struct mtd_info *mtd = nand_to_mtd(chip); 2946 int i, eccsize = chip->ecc.size, ret; 2947 int eccbytes = chip->ecc.bytes; 2948 int eccsteps = chip->ecc.steps; 2949 uint8_t *p = buf; 2950 uint8_t *ecc_calc = chip->ecc.calc_buf; 2951 uint8_t *ecc_code = chip->ecc.code_buf; 2952 unsigned int max_bitflips = 0; 2953 2954 chip->ecc.read_page_raw(chip, buf, 1, page); 2955 2956 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) 2957 chip->ecc.calculate(chip, p, &ecc_calc[i]); 2958 2959 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, 2960 chip->ecc.total); 2961 if (ret) 2962 return ret; 2963 2964 eccsteps = chip->ecc.steps; 2965 p = buf; 2966 2967 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 2968 int stat; 2969 2970 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]); 2971 if (stat < 0) { 2972 mtd->ecc_stats.failed++; 2973 } else { 2974 mtd->ecc_stats.corrected += stat; 2975 max_bitflips = max_t(unsigned int, max_bitflips, stat); 2976 } 2977 } 2978 return max_bitflips; 2979 } 2980 2981 /** 2982 * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function 2983 * @chip: nand chip info structure 2984 * @data_offs: offset of requested data within the page 2985 * @readlen: data length 2986 * @bufpoi: buffer to store read data 2987 * @page: page number to read 2988 */ 2989 static int nand_read_subpage(struct nand_chip *chip, uint32_t data_offs, 2990 uint32_t readlen, uint8_t *bufpoi, int page) 2991 { 2992 struct mtd_info *mtd = nand_to_mtd(chip); 2993 int start_step, end_step, num_steps, ret; 2994 uint8_t *p; 2995 int data_col_addr, i, gaps = 0; 2996 int datafrag_len, eccfrag_len, aligned_len, aligned_pos; 2997 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1; 2998 int index, section = 0; 2999 unsigned int max_bitflips = 0; 3000 struct mtd_oob_region oobregion = { }; 3001 3002 /* Column address within the page aligned to ECC size (256bytes) */ 3003 start_step = data_offs / chip->ecc.size; 3004 end_step = (data_offs + readlen - 1) / chip->ecc.size; 3005 num_steps = end_step - start_step + 1; 3006 index = start_step * chip->ecc.bytes; 3007 3008 /* Data size aligned to ECC ecc.size */ 3009 datafrag_len = num_steps * chip->ecc.size; 3010 eccfrag_len = num_steps * chip->ecc.bytes; 3011 3012 data_col_addr = start_step * chip->ecc.size; 3013 /* If we read not a page aligned data */ 3014 p = bufpoi + data_col_addr; 3015 ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len); 3016 if (ret) 3017 return ret; 3018 3019 /* Calculate ECC */ 3020 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) 3021 chip->ecc.calculate(chip, p, &chip->ecc.calc_buf[i]); 3022 3023 /* 3024 * The performance is faster if we position offsets according to 3025 * ecc.pos. Let's make sure that there are no gaps in ECC positions. 3026 */ 3027 ret = mtd_ooblayout_find_eccregion(mtd, index, §ion, &oobregion); 3028 if (ret) 3029 return ret; 3030 3031 if (oobregion.length < eccfrag_len) 3032 gaps = 1; 3033 3034 if (gaps) { 3035 ret = nand_change_read_column_op(chip, mtd->writesize, 3036 chip->oob_poi, mtd->oobsize, 3037 false); 3038 if (ret) 3039 return ret; 3040 } else { 3041 /* 3042 * Send the command to read the particular ECC bytes take care 3043 * about buswidth alignment in read_buf. 3044 */ 3045 aligned_pos = oobregion.offset & ~(busw - 1); 3046 aligned_len = eccfrag_len; 3047 if (oobregion.offset & (busw - 1)) 3048 aligned_len++; 3049 if ((oobregion.offset + (num_steps * chip->ecc.bytes)) & 3050 (busw - 1)) 3051 aligned_len++; 3052 3053 ret = nand_change_read_column_op(chip, 3054 mtd->writesize + aligned_pos, 3055 &chip->oob_poi[aligned_pos], 3056 aligned_len, false); 3057 if (ret) 3058 return ret; 3059 } 3060 3061 ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf, 3062 chip->oob_poi, index, eccfrag_len); 3063 if (ret) 3064 return ret; 3065 3066 p = bufpoi + data_col_addr; 3067 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) { 3068 int stat; 3069 3070 stat = chip->ecc.correct(chip, p, &chip->ecc.code_buf[i], 3071 &chip->ecc.calc_buf[i]); 3072 if (stat == -EBADMSG && 3073 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) { 3074 /* check for empty pages with bitflips */ 3075 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size, 3076 &chip->ecc.code_buf[i], 3077 chip->ecc.bytes, 3078 NULL, 0, 3079 chip->ecc.strength); 3080 } 3081 3082 if (stat < 0) { 3083 mtd->ecc_stats.failed++; 3084 } else { 3085 mtd->ecc_stats.corrected += stat; 3086 max_bitflips = max_t(unsigned int, max_bitflips, stat); 3087 } 3088 } 3089 return max_bitflips; 3090 } 3091 3092 /** 3093 * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function 3094 * @chip: nand chip info structure 3095 * @buf: buffer to store read data 3096 * @oob_required: caller requires OOB data read to chip->oob_poi 3097 * @page: page number to read 3098 * 3099 * Not for syndrome calculating ECC controllers which need a special oob layout. 3100 */ 3101 static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf, 3102 int oob_required, int page) 3103 { 3104 struct mtd_info *mtd = nand_to_mtd(chip); 3105 int i, eccsize = chip->ecc.size, ret; 3106 int eccbytes = chip->ecc.bytes; 3107 int eccsteps = chip->ecc.steps; 3108 uint8_t *p = buf; 3109 uint8_t *ecc_calc = chip->ecc.calc_buf; 3110 uint8_t *ecc_code = chip->ecc.code_buf; 3111 unsigned int max_bitflips = 0; 3112 3113 ret = nand_read_page_op(chip, page, 0, NULL, 0); 3114 if (ret) 3115 return ret; 3116 3117 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 3118 chip->ecc.hwctl(chip, NAND_ECC_READ); 3119 3120 ret = nand_read_data_op(chip, p, eccsize, false, false); 3121 if (ret) 3122 return ret; 3123 3124 chip->ecc.calculate(chip, p, &ecc_calc[i]); 3125 } 3126 3127 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false, 3128 false); 3129 if (ret) 3130 return ret; 3131 3132 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, 3133 chip->ecc.total); 3134 if (ret) 3135 return ret; 3136 3137 eccsteps = chip->ecc.steps; 3138 p = buf; 3139 3140 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 3141 int stat; 3142 3143 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]); 3144 if (stat == -EBADMSG && 3145 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) { 3146 /* check for empty pages with bitflips */ 3147 stat = nand_check_erased_ecc_chunk(p, eccsize, 3148 &ecc_code[i], eccbytes, 3149 NULL, 0, 3150 chip->ecc.strength); 3151 } 3152 3153 if (stat < 0) { 3154 mtd->ecc_stats.failed++; 3155 } else { 3156 mtd->ecc_stats.corrected += stat; 3157 max_bitflips = max_t(unsigned int, max_bitflips, stat); 3158 } 3159 } 3160 return max_bitflips; 3161 } 3162 3163 /** 3164 * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read 3165 * @chip: nand chip info structure 3166 * @buf: buffer to store read data 3167 * @oob_required: caller requires OOB data read to chip->oob_poi 3168 * @page: page number to read 3169 * 3170 * The hw generator calculates the error syndrome automatically. Therefore we 3171 * need a special oob layout and handling. 3172 */ 3173 static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf, 3174 int oob_required, int page) 3175 { 3176 struct mtd_info *mtd = nand_to_mtd(chip); 3177 int ret, i, eccsize = chip->ecc.size; 3178 int eccbytes = chip->ecc.bytes; 3179 int eccsteps = chip->ecc.steps; 3180 int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad; 3181 uint8_t *p = buf; 3182 uint8_t *oob = chip->oob_poi; 3183 unsigned int max_bitflips = 0; 3184 3185 ret = nand_read_page_op(chip, page, 0, NULL, 0); 3186 if (ret) 3187 return ret; 3188 3189 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 3190 int stat; 3191 3192 chip->ecc.hwctl(chip, NAND_ECC_READ); 3193 3194 ret = nand_read_data_op(chip, p, eccsize, false, false); 3195 if (ret) 3196 return ret; 3197 3198 if (chip->ecc.prepad) { 3199 ret = nand_read_data_op(chip, oob, chip->ecc.prepad, 3200 false, false); 3201 if (ret) 3202 return ret; 3203 3204 oob += chip->ecc.prepad; 3205 } 3206 3207 chip->ecc.hwctl(chip, NAND_ECC_READSYN); 3208 3209 ret = nand_read_data_op(chip, oob, eccbytes, false, false); 3210 if (ret) 3211 return ret; 3212 3213 stat = chip->ecc.correct(chip, p, oob, NULL); 3214 3215 oob += eccbytes; 3216 3217 if (chip->ecc.postpad) { 3218 ret = nand_read_data_op(chip, oob, chip->ecc.postpad, 3219 false, false); 3220 if (ret) 3221 return ret; 3222 3223 oob += chip->ecc.postpad; 3224 } 3225 3226 if (stat == -EBADMSG && 3227 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) { 3228 /* check for empty pages with bitflips */ 3229 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size, 3230 oob - eccpadbytes, 3231 eccpadbytes, 3232 NULL, 0, 3233 chip->ecc.strength); 3234 } 3235 3236 if (stat < 0) { 3237 mtd->ecc_stats.failed++; 3238 } else { 3239 mtd->ecc_stats.corrected += stat; 3240 max_bitflips = max_t(unsigned int, max_bitflips, stat); 3241 } 3242 } 3243 3244 /* Calculate remaining oob bytes */ 3245 i = mtd->oobsize - (oob - chip->oob_poi); 3246 if (i) { 3247 ret = nand_read_data_op(chip, oob, i, false, false); 3248 if (ret) 3249 return ret; 3250 } 3251 3252 return max_bitflips; 3253 } 3254 3255 /** 3256 * nand_transfer_oob - [INTERN] Transfer oob to client buffer 3257 * @chip: NAND chip object 3258 * @oob: oob destination address 3259 * @ops: oob ops structure 3260 * @len: size of oob to transfer 3261 */ 3262 static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob, 3263 struct mtd_oob_ops *ops, size_t len) 3264 { 3265 struct mtd_info *mtd = nand_to_mtd(chip); 3266 int ret; 3267 3268 switch (ops->mode) { 3269 3270 case MTD_OPS_PLACE_OOB: 3271 case MTD_OPS_RAW: 3272 memcpy(oob, chip->oob_poi + ops->ooboffs, len); 3273 return oob + len; 3274 3275 case MTD_OPS_AUTO_OOB: 3276 ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi, 3277 ops->ooboffs, len); 3278 BUG_ON(ret); 3279 return oob + len; 3280 3281 default: 3282 BUG(); 3283 } 3284 return NULL; 3285 } 3286 3287 /** 3288 * nand_setup_read_retry - [INTERN] Set the READ RETRY mode 3289 * @chip: NAND chip object 3290 * @retry_mode: the retry mode to use 3291 * 3292 * Some vendors supply a special command to shift the Vt threshold, to be used 3293 * when there are too many bitflips in a page (i.e., ECC error). After setting 3294 * a new threshold, the host should retry reading the page. 3295 */ 3296 static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode) 3297 { 3298 pr_debug("setting READ RETRY mode %d\n", retry_mode); 3299 3300 if (retry_mode >= chip->read_retries) 3301 return -EINVAL; 3302 3303 if (!chip->ops.setup_read_retry) 3304 return -EOPNOTSUPP; 3305 3306 return chip->ops.setup_read_retry(chip, retry_mode); 3307 } 3308 3309 static void nand_wait_readrdy(struct nand_chip *chip) 3310 { 3311 const struct nand_interface_config *conf; 3312 3313 if (!(chip->options & NAND_NEED_READRDY)) 3314 return; 3315 3316 conf = nand_get_interface_config(chip); 3317 WARN_ON(nand_wait_rdy_op(chip, NAND_COMMON_TIMING_MS(conf, tR_max), 0)); 3318 } 3319 3320 /** 3321 * nand_do_read_ops - [INTERN] Read data with ECC 3322 * @chip: NAND chip object 3323 * @from: offset to read from 3324 * @ops: oob ops structure 3325 * 3326 * Internal function. Called with chip held. 3327 */ 3328 static int nand_do_read_ops(struct nand_chip *chip, loff_t from, 3329 struct mtd_oob_ops *ops) 3330 { 3331 int chipnr, page, realpage, col, bytes, aligned, oob_required; 3332 struct mtd_info *mtd = nand_to_mtd(chip); 3333 int ret = 0; 3334 uint32_t readlen = ops->len; 3335 uint32_t oobreadlen = ops->ooblen; 3336 uint32_t max_oobsize = mtd_oobavail(mtd, ops); 3337 3338 uint8_t *bufpoi, *oob, *buf; 3339 int use_bounce_buf; 3340 unsigned int max_bitflips = 0; 3341 int retry_mode = 0; 3342 bool ecc_fail = false; 3343 3344 /* Check if the region is secured */ 3345 if (nand_region_is_secured(chip, from, readlen)) 3346 return -EIO; 3347 3348 chipnr = (int)(from >> chip->chip_shift); 3349 nand_select_target(chip, chipnr); 3350 3351 realpage = (int)(from >> chip->page_shift); 3352 page = realpage & chip->pagemask; 3353 3354 col = (int)(from & (mtd->writesize - 1)); 3355 3356 buf = ops->datbuf; 3357 oob = ops->oobbuf; 3358 oob_required = oob ? 1 : 0; 3359 3360 while (1) { 3361 struct mtd_ecc_stats ecc_stats = mtd->ecc_stats; 3362 3363 bytes = min(mtd->writesize - col, readlen); 3364 aligned = (bytes == mtd->writesize); 3365 3366 if (!aligned) 3367 use_bounce_buf = 1; 3368 else if (chip->options & NAND_USES_DMA) 3369 use_bounce_buf = !virt_addr_valid(buf) || 3370 !IS_ALIGNED((unsigned long)buf, 3371 chip->buf_align); 3372 else 3373 use_bounce_buf = 0; 3374 3375 /* Is the current page in the buffer? */ 3376 if (realpage != chip->pagecache.page || oob) { 3377 bufpoi = use_bounce_buf ? chip->data_buf : buf; 3378 3379 if (use_bounce_buf && aligned) 3380 pr_debug("%s: using read bounce buffer for buf@%p\n", 3381 __func__, buf); 3382 3383 read_retry: 3384 /* 3385 * Now read the page into the buffer. Absent an error, 3386 * the read methods return max bitflips per ecc step. 3387 */ 3388 if (unlikely(ops->mode == MTD_OPS_RAW)) 3389 ret = chip->ecc.read_page_raw(chip, bufpoi, 3390 oob_required, 3391 page); 3392 else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) && 3393 !oob) 3394 ret = chip->ecc.read_subpage(chip, col, bytes, 3395 bufpoi, page); 3396 else 3397 ret = chip->ecc.read_page(chip, bufpoi, 3398 oob_required, page); 3399 if (ret < 0) { 3400 if (use_bounce_buf) 3401 /* Invalidate page cache */ 3402 chip->pagecache.page = -1; 3403 break; 3404 } 3405 3406 /* 3407 * Copy back the data in the initial buffer when reading 3408 * partial pages or when a bounce buffer is required. 3409 */ 3410 if (use_bounce_buf) { 3411 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob && 3412 !(mtd->ecc_stats.failed - ecc_stats.failed) && 3413 (ops->mode != MTD_OPS_RAW)) { 3414 chip->pagecache.page = realpage; 3415 chip->pagecache.bitflips = ret; 3416 } else { 3417 /* Invalidate page cache */ 3418 chip->pagecache.page = -1; 3419 } 3420 memcpy(buf, bufpoi + col, bytes); 3421 } 3422 3423 if (unlikely(oob)) { 3424 int toread = min(oobreadlen, max_oobsize); 3425 3426 if (toread) { 3427 oob = nand_transfer_oob(chip, oob, ops, 3428 toread); 3429 oobreadlen -= toread; 3430 } 3431 } 3432 3433 nand_wait_readrdy(chip); 3434 3435 if (mtd->ecc_stats.failed - ecc_stats.failed) { 3436 if (retry_mode + 1 < chip->read_retries) { 3437 retry_mode++; 3438 ret = nand_setup_read_retry(chip, 3439 retry_mode); 3440 if (ret < 0) 3441 break; 3442 3443 /* Reset ecc_stats; retry */ 3444 mtd->ecc_stats = ecc_stats; 3445 goto read_retry; 3446 } else { 3447 /* No more retry modes; real failure */ 3448 ecc_fail = true; 3449 } 3450 } 3451 3452 buf += bytes; 3453 max_bitflips = max_t(unsigned int, max_bitflips, ret); 3454 } else { 3455 memcpy(buf, chip->data_buf + col, bytes); 3456 buf += bytes; 3457 max_bitflips = max_t(unsigned int, max_bitflips, 3458 chip->pagecache.bitflips); 3459 } 3460 3461 readlen -= bytes; 3462 3463 /* Reset to retry mode 0 */ 3464 if (retry_mode) { 3465 ret = nand_setup_read_retry(chip, 0); 3466 if (ret < 0) 3467 break; 3468 retry_mode = 0; 3469 } 3470 3471 if (!readlen) 3472 break; 3473 3474 /* For subsequent reads align to page boundary */ 3475 col = 0; 3476 /* Increment page address */ 3477 realpage++; 3478 3479 page = realpage & chip->pagemask; 3480 /* Check, if we cross a chip boundary */ 3481 if (!page) { 3482 chipnr++; 3483 nand_deselect_target(chip); 3484 nand_select_target(chip, chipnr); 3485 } 3486 } 3487 nand_deselect_target(chip); 3488 3489 ops->retlen = ops->len - (size_t) readlen; 3490 if (oob) 3491 ops->oobretlen = ops->ooblen - oobreadlen; 3492 3493 if (ret < 0) 3494 return ret; 3495 3496 if (ecc_fail) 3497 return -EBADMSG; 3498 3499 return max_bitflips; 3500 } 3501 3502 /** 3503 * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function 3504 * @chip: nand chip info structure 3505 * @page: page number to read 3506 */ 3507 int nand_read_oob_std(struct nand_chip *chip, int page) 3508 { 3509 struct mtd_info *mtd = nand_to_mtd(chip); 3510 3511 return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize); 3512 } 3513 EXPORT_SYMBOL(nand_read_oob_std); 3514 3515 /** 3516 * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC 3517 * with syndromes 3518 * @chip: nand chip info structure 3519 * @page: page number to read 3520 */ 3521 static int nand_read_oob_syndrome(struct nand_chip *chip, int page) 3522 { 3523 struct mtd_info *mtd = nand_to_mtd(chip); 3524 int length = mtd->oobsize; 3525 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad; 3526 int eccsize = chip->ecc.size; 3527 uint8_t *bufpoi = chip->oob_poi; 3528 int i, toread, sndrnd = 0, pos, ret; 3529 3530 ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0); 3531 if (ret) 3532 return ret; 3533 3534 for (i = 0; i < chip->ecc.steps; i++) { 3535 if (sndrnd) { 3536 int ret; 3537 3538 pos = eccsize + i * (eccsize + chunk); 3539 if (mtd->writesize > 512) 3540 ret = nand_change_read_column_op(chip, pos, 3541 NULL, 0, 3542 false); 3543 else 3544 ret = nand_read_page_op(chip, page, pos, NULL, 3545 0); 3546 3547 if (ret) 3548 return ret; 3549 } else 3550 sndrnd = 1; 3551 toread = min_t(int, length, chunk); 3552 3553 ret = nand_read_data_op(chip, bufpoi, toread, false, false); 3554 if (ret) 3555 return ret; 3556 3557 bufpoi += toread; 3558 length -= toread; 3559 } 3560 if (length > 0) { 3561 ret = nand_read_data_op(chip, bufpoi, length, false, false); 3562 if (ret) 3563 return ret; 3564 } 3565 3566 return 0; 3567 } 3568 3569 /** 3570 * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function 3571 * @chip: nand chip info structure 3572 * @page: page number to write 3573 */ 3574 int nand_write_oob_std(struct nand_chip *chip, int page) 3575 { 3576 struct mtd_info *mtd = nand_to_mtd(chip); 3577 3578 return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi, 3579 mtd->oobsize); 3580 } 3581 EXPORT_SYMBOL(nand_write_oob_std); 3582 3583 /** 3584 * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC 3585 * with syndrome - only for large page flash 3586 * @chip: nand chip info structure 3587 * @page: page number to write 3588 */ 3589 static int nand_write_oob_syndrome(struct nand_chip *chip, int page) 3590 { 3591 struct mtd_info *mtd = nand_to_mtd(chip); 3592 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad; 3593 int eccsize = chip->ecc.size, length = mtd->oobsize; 3594 int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps; 3595 const uint8_t *bufpoi = chip->oob_poi; 3596 3597 /* 3598 * data-ecc-data-ecc ... ecc-oob 3599 * or 3600 * data-pad-ecc-pad-data-pad .... ecc-pad-oob 3601 */ 3602 if (!chip->ecc.prepad && !chip->ecc.postpad) { 3603 pos = steps * (eccsize + chunk); 3604 steps = 0; 3605 } else 3606 pos = eccsize; 3607 3608 ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0); 3609 if (ret) 3610 return ret; 3611 3612 for (i = 0; i < steps; i++) { 3613 if (sndcmd) { 3614 if (mtd->writesize <= 512) { 3615 uint32_t fill = 0xFFFFFFFF; 3616 3617 len = eccsize; 3618 while (len > 0) { 3619 int num = min_t(int, len, 4); 3620 3621 ret = nand_write_data_op(chip, &fill, 3622 num, false); 3623 if (ret) 3624 return ret; 3625 3626 len -= num; 3627 } 3628 } else { 3629 pos = eccsize + i * (eccsize + chunk); 3630 ret = nand_change_write_column_op(chip, pos, 3631 NULL, 0, 3632 false); 3633 if (ret) 3634 return ret; 3635 } 3636 } else 3637 sndcmd = 1; 3638 len = min_t(int, length, chunk); 3639 3640 ret = nand_write_data_op(chip, bufpoi, len, false); 3641 if (ret) 3642 return ret; 3643 3644 bufpoi += len; 3645 length -= len; 3646 } 3647 if (length > 0) { 3648 ret = nand_write_data_op(chip, bufpoi, length, false); 3649 if (ret) 3650 return ret; 3651 } 3652 3653 return nand_prog_page_end_op(chip); 3654 } 3655 3656 /** 3657 * nand_do_read_oob - [INTERN] NAND read out-of-band 3658 * @chip: NAND chip object 3659 * @from: offset to read from 3660 * @ops: oob operations description structure 3661 * 3662 * NAND read out-of-band data from the spare area. 3663 */ 3664 static int nand_do_read_oob(struct nand_chip *chip, loff_t from, 3665 struct mtd_oob_ops *ops) 3666 { 3667 struct mtd_info *mtd = nand_to_mtd(chip); 3668 unsigned int max_bitflips = 0; 3669 int page, realpage, chipnr; 3670 struct mtd_ecc_stats stats; 3671 int readlen = ops->ooblen; 3672 int len; 3673 uint8_t *buf = ops->oobbuf; 3674 int ret = 0; 3675 3676 pr_debug("%s: from = 0x%08Lx, len = %i\n", 3677 __func__, (unsigned long long)from, readlen); 3678 3679 /* Check if the region is secured */ 3680 if (nand_region_is_secured(chip, from, readlen)) 3681 return -EIO; 3682 3683 stats = mtd->ecc_stats; 3684 3685 len = mtd_oobavail(mtd, ops); 3686 3687 chipnr = (int)(from >> chip->chip_shift); 3688 nand_select_target(chip, chipnr); 3689 3690 /* Shift to get page */ 3691 realpage = (int)(from >> chip->page_shift); 3692 page = realpage & chip->pagemask; 3693 3694 while (1) { 3695 if (ops->mode == MTD_OPS_RAW) 3696 ret = chip->ecc.read_oob_raw(chip, page); 3697 else 3698 ret = chip->ecc.read_oob(chip, page); 3699 3700 if (ret < 0) 3701 break; 3702 3703 len = min(len, readlen); 3704 buf = nand_transfer_oob(chip, buf, ops, len); 3705 3706 nand_wait_readrdy(chip); 3707 3708 max_bitflips = max_t(unsigned int, max_bitflips, ret); 3709 3710 readlen -= len; 3711 if (!readlen) 3712 break; 3713 3714 /* Increment page address */ 3715 realpage++; 3716 3717 page = realpage & chip->pagemask; 3718 /* Check, if we cross a chip boundary */ 3719 if (!page) { 3720 chipnr++; 3721 nand_deselect_target(chip); 3722 nand_select_target(chip, chipnr); 3723 } 3724 } 3725 nand_deselect_target(chip); 3726 3727 ops->oobretlen = ops->ooblen - readlen; 3728 3729 if (ret < 0) 3730 return ret; 3731 3732 if (mtd->ecc_stats.failed - stats.failed) 3733 return -EBADMSG; 3734 3735 return max_bitflips; 3736 } 3737 3738 /** 3739 * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band 3740 * @mtd: MTD device structure 3741 * @from: offset to read from 3742 * @ops: oob operation description structure 3743 * 3744 * NAND read data and/or out-of-band data. 3745 */ 3746 static int nand_read_oob(struct mtd_info *mtd, loff_t from, 3747 struct mtd_oob_ops *ops) 3748 { 3749 struct nand_chip *chip = mtd_to_nand(mtd); 3750 int ret; 3751 3752 ops->retlen = 0; 3753 3754 if (ops->mode != MTD_OPS_PLACE_OOB && 3755 ops->mode != MTD_OPS_AUTO_OOB && 3756 ops->mode != MTD_OPS_RAW) 3757 return -ENOTSUPP; 3758 3759 ret = nand_get_device(chip); 3760 if (ret) 3761 return ret; 3762 3763 if (!ops->datbuf) 3764 ret = nand_do_read_oob(chip, from, ops); 3765 else 3766 ret = nand_do_read_ops(chip, from, ops); 3767 3768 nand_release_device(chip); 3769 return ret; 3770 } 3771 3772 /** 3773 * nand_write_page_raw_notsupp - dummy raw page write function 3774 * @chip: nand chip info structure 3775 * @buf: data buffer 3776 * @oob_required: must write chip->oob_poi to OOB 3777 * @page: page number to write 3778 * 3779 * Returns -ENOTSUPP unconditionally. 3780 */ 3781 int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf, 3782 int oob_required, int page) 3783 { 3784 return -ENOTSUPP; 3785 } 3786 3787 /** 3788 * nand_write_page_raw - [INTERN] raw page write function 3789 * @chip: nand chip info structure 3790 * @buf: data buffer 3791 * @oob_required: must write chip->oob_poi to OOB 3792 * @page: page number to write 3793 * 3794 * Not for syndrome calculating ECC controllers, which use a special oob layout. 3795 */ 3796 int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf, 3797 int oob_required, int page) 3798 { 3799 struct mtd_info *mtd = nand_to_mtd(chip); 3800 int ret; 3801 3802 ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize); 3803 if (ret) 3804 return ret; 3805 3806 if (oob_required) { 3807 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, 3808 false); 3809 if (ret) 3810 return ret; 3811 } 3812 3813 return nand_prog_page_end_op(chip); 3814 } 3815 EXPORT_SYMBOL(nand_write_page_raw); 3816 3817 /** 3818 * nand_monolithic_write_page_raw - Monolithic page write in raw mode 3819 * @chip: NAND chip info structure 3820 * @buf: data buffer to write 3821 * @oob_required: must write chip->oob_poi to OOB 3822 * @page: page number to write 3823 * 3824 * This is a raw page write, ie. without any error detection/correction. 3825 * Monolithic means we are requesting all the relevant data (main plus 3826 * eventually OOB) to be sent over the bus and effectively programmed 3827 * into the NAND chip arrays in a single operation. This is an 3828 * alternative to nand_write_page_raw(), which first sends the main 3829 * data, then eventually send the OOB data by latching more data 3830 * cycles on the NAND bus, and finally sends the program command to 3831 * synchronyze the NAND chip cache. 3832 */ 3833 int nand_monolithic_write_page_raw(struct nand_chip *chip, const u8 *buf, 3834 int oob_required, int page) 3835 { 3836 struct mtd_info *mtd = nand_to_mtd(chip); 3837 unsigned int size = mtd->writesize; 3838 u8 *write_buf = (u8 *)buf; 3839 3840 if (oob_required) { 3841 size += mtd->oobsize; 3842 3843 if (buf != chip->data_buf) { 3844 write_buf = nand_get_data_buf(chip); 3845 memcpy(write_buf, buf, mtd->writesize); 3846 } 3847 } 3848 3849 return nand_prog_page_op(chip, page, 0, write_buf, size); 3850 } 3851 EXPORT_SYMBOL(nand_monolithic_write_page_raw); 3852 3853 /** 3854 * nand_write_page_raw_syndrome - [INTERN] raw page write function 3855 * @chip: nand chip info structure 3856 * @buf: data buffer 3857 * @oob_required: must write chip->oob_poi to OOB 3858 * @page: page number to write 3859 * 3860 * We need a special oob layout and handling even when ECC isn't checked. 3861 */ 3862 static int nand_write_page_raw_syndrome(struct nand_chip *chip, 3863 const uint8_t *buf, int oob_required, 3864 int page) 3865 { 3866 struct mtd_info *mtd = nand_to_mtd(chip); 3867 int eccsize = chip->ecc.size; 3868 int eccbytes = chip->ecc.bytes; 3869 uint8_t *oob = chip->oob_poi; 3870 int steps, size, ret; 3871 3872 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0); 3873 if (ret) 3874 return ret; 3875 3876 for (steps = chip->ecc.steps; steps > 0; steps--) { 3877 ret = nand_write_data_op(chip, buf, eccsize, false); 3878 if (ret) 3879 return ret; 3880 3881 buf += eccsize; 3882 3883 if (chip->ecc.prepad) { 3884 ret = nand_write_data_op(chip, oob, chip->ecc.prepad, 3885 false); 3886 if (ret) 3887 return ret; 3888 3889 oob += chip->ecc.prepad; 3890 } 3891 3892 ret = nand_write_data_op(chip, oob, eccbytes, false); 3893 if (ret) 3894 return ret; 3895 3896 oob += eccbytes; 3897 3898 if (chip->ecc.postpad) { 3899 ret = nand_write_data_op(chip, oob, chip->ecc.postpad, 3900 false); 3901 if (ret) 3902 return ret; 3903 3904 oob += chip->ecc.postpad; 3905 } 3906 } 3907 3908 size = mtd->oobsize - (oob - chip->oob_poi); 3909 if (size) { 3910 ret = nand_write_data_op(chip, oob, size, false); 3911 if (ret) 3912 return ret; 3913 } 3914 3915 return nand_prog_page_end_op(chip); 3916 } 3917 /** 3918 * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function 3919 * @chip: nand chip info structure 3920 * @buf: data buffer 3921 * @oob_required: must write chip->oob_poi to OOB 3922 * @page: page number to write 3923 */ 3924 static int nand_write_page_swecc(struct nand_chip *chip, const uint8_t *buf, 3925 int oob_required, int page) 3926 { 3927 struct mtd_info *mtd = nand_to_mtd(chip); 3928 int i, eccsize = chip->ecc.size, ret; 3929 int eccbytes = chip->ecc.bytes; 3930 int eccsteps = chip->ecc.steps; 3931 uint8_t *ecc_calc = chip->ecc.calc_buf; 3932 const uint8_t *p = buf; 3933 3934 /* Software ECC calculation */ 3935 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) 3936 chip->ecc.calculate(chip, p, &ecc_calc[i]); 3937 3938 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, 3939 chip->ecc.total); 3940 if (ret) 3941 return ret; 3942 3943 return chip->ecc.write_page_raw(chip, buf, 1, page); 3944 } 3945 3946 /** 3947 * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function 3948 * @chip: nand chip info structure 3949 * @buf: data buffer 3950 * @oob_required: must write chip->oob_poi to OOB 3951 * @page: page number to write 3952 */ 3953 static int nand_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf, 3954 int oob_required, int page) 3955 { 3956 struct mtd_info *mtd = nand_to_mtd(chip); 3957 int i, eccsize = chip->ecc.size, ret; 3958 int eccbytes = chip->ecc.bytes; 3959 int eccsteps = chip->ecc.steps; 3960 uint8_t *ecc_calc = chip->ecc.calc_buf; 3961 const uint8_t *p = buf; 3962 3963 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0); 3964 if (ret) 3965 return ret; 3966 3967 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 3968 chip->ecc.hwctl(chip, NAND_ECC_WRITE); 3969 3970 ret = nand_write_data_op(chip, p, eccsize, false); 3971 if (ret) 3972 return ret; 3973 3974 chip->ecc.calculate(chip, p, &ecc_calc[i]); 3975 } 3976 3977 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, 3978 chip->ecc.total); 3979 if (ret) 3980 return ret; 3981 3982 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false); 3983 if (ret) 3984 return ret; 3985 3986 return nand_prog_page_end_op(chip); 3987 } 3988 3989 3990 /** 3991 * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write 3992 * @chip: nand chip info structure 3993 * @offset: column address of subpage within the page 3994 * @data_len: data length 3995 * @buf: data buffer 3996 * @oob_required: must write chip->oob_poi to OOB 3997 * @page: page number to write 3998 */ 3999 static int nand_write_subpage_hwecc(struct nand_chip *chip, uint32_t offset, 4000 uint32_t data_len, const uint8_t *buf, 4001 int oob_required, int page) 4002 { 4003 struct mtd_info *mtd = nand_to_mtd(chip); 4004 uint8_t *oob_buf = chip->oob_poi; 4005 uint8_t *ecc_calc = chip->ecc.calc_buf; 4006 int ecc_size = chip->ecc.size; 4007 int ecc_bytes = chip->ecc.bytes; 4008 int ecc_steps = chip->ecc.steps; 4009 uint32_t start_step = offset / ecc_size; 4010 uint32_t end_step = (offset + data_len - 1) / ecc_size; 4011 int oob_bytes = mtd->oobsize / ecc_steps; 4012 int step, ret; 4013 4014 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0); 4015 if (ret) 4016 return ret; 4017 4018 for (step = 0; step < ecc_steps; step++) { 4019 /* configure controller for WRITE access */ 4020 chip->ecc.hwctl(chip, NAND_ECC_WRITE); 4021 4022 /* write data (untouched subpages already masked by 0xFF) */ 4023 ret = nand_write_data_op(chip, buf, ecc_size, false); 4024 if (ret) 4025 return ret; 4026 4027 /* mask ECC of un-touched subpages by padding 0xFF */ 4028 if ((step < start_step) || (step > end_step)) 4029 memset(ecc_calc, 0xff, ecc_bytes); 4030 else 4031 chip->ecc.calculate(chip, buf, ecc_calc); 4032 4033 /* mask OOB of un-touched subpages by padding 0xFF */ 4034 /* if oob_required, preserve OOB metadata of written subpage */ 4035 if (!oob_required || (step < start_step) || (step > end_step)) 4036 memset(oob_buf, 0xff, oob_bytes); 4037 4038 buf += ecc_size; 4039 ecc_calc += ecc_bytes; 4040 oob_buf += oob_bytes; 4041 } 4042 4043 /* copy calculated ECC for whole page to chip->buffer->oob */ 4044 /* this include masked-value(0xFF) for unwritten subpages */ 4045 ecc_calc = chip->ecc.calc_buf; 4046 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, 4047 chip->ecc.total); 4048 if (ret) 4049 return ret; 4050 4051 /* write OOB buffer to NAND device */ 4052 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false); 4053 if (ret) 4054 return ret; 4055 4056 return nand_prog_page_end_op(chip); 4057 } 4058 4059 4060 /** 4061 * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write 4062 * @chip: nand chip info structure 4063 * @buf: data buffer 4064 * @oob_required: must write chip->oob_poi to OOB 4065 * @page: page number to write 4066 * 4067 * The hw generator calculates the error syndrome automatically. Therefore we 4068 * need a special oob layout and handling. 4069 */ 4070 static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf, 4071 int oob_required, int page) 4072 { 4073 struct mtd_info *mtd = nand_to_mtd(chip); 4074 int i, eccsize = chip->ecc.size; 4075 int eccbytes = chip->ecc.bytes; 4076 int eccsteps = chip->ecc.steps; 4077 const uint8_t *p = buf; 4078 uint8_t *oob = chip->oob_poi; 4079 int ret; 4080 4081 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0); 4082 if (ret) 4083 return ret; 4084 4085 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 4086 chip->ecc.hwctl(chip, NAND_ECC_WRITE); 4087 4088 ret = nand_write_data_op(chip, p, eccsize, false); 4089 if (ret) 4090 return ret; 4091 4092 if (chip->ecc.prepad) { 4093 ret = nand_write_data_op(chip, oob, chip->ecc.prepad, 4094 false); 4095 if (ret) 4096 return ret; 4097 4098 oob += chip->ecc.prepad; 4099 } 4100 4101 chip->ecc.calculate(chip, p, oob); 4102 4103 ret = nand_write_data_op(chip, oob, eccbytes, false); 4104 if (ret) 4105 return ret; 4106 4107 oob += eccbytes; 4108 4109 if (chip->ecc.postpad) { 4110 ret = nand_write_data_op(chip, oob, chip->ecc.postpad, 4111 false); 4112 if (ret) 4113 return ret; 4114 4115 oob += chip->ecc.postpad; 4116 } 4117 } 4118 4119 /* Calculate remaining oob bytes */ 4120 i = mtd->oobsize - (oob - chip->oob_poi); 4121 if (i) { 4122 ret = nand_write_data_op(chip, oob, i, false); 4123 if (ret) 4124 return ret; 4125 } 4126 4127 return nand_prog_page_end_op(chip); 4128 } 4129 4130 /** 4131 * nand_write_page - write one page 4132 * @chip: NAND chip descriptor 4133 * @offset: address offset within the page 4134 * @data_len: length of actual data to be written 4135 * @buf: the data to write 4136 * @oob_required: must write chip->oob_poi to OOB 4137 * @page: page number to write 4138 * @raw: use _raw version of write_page 4139 */ 4140 static int nand_write_page(struct nand_chip *chip, uint32_t offset, 4141 int data_len, const uint8_t *buf, int oob_required, 4142 int page, int raw) 4143 { 4144 struct mtd_info *mtd = nand_to_mtd(chip); 4145 int status, subpage; 4146 4147 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && 4148 chip->ecc.write_subpage) 4149 subpage = offset || (data_len < mtd->writesize); 4150 else 4151 subpage = 0; 4152 4153 if (unlikely(raw)) 4154 status = chip->ecc.write_page_raw(chip, buf, oob_required, 4155 page); 4156 else if (subpage) 4157 status = chip->ecc.write_subpage(chip, offset, data_len, buf, 4158 oob_required, page); 4159 else 4160 status = chip->ecc.write_page(chip, buf, oob_required, page); 4161 4162 if (status < 0) 4163 return status; 4164 4165 return 0; 4166 } 4167 4168 #define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0) 4169 4170 /** 4171 * nand_do_write_ops - [INTERN] NAND write with ECC 4172 * @chip: NAND chip object 4173 * @to: offset to write to 4174 * @ops: oob operations description structure 4175 * 4176 * NAND write with ECC. 4177 */ 4178 static int nand_do_write_ops(struct nand_chip *chip, loff_t to, 4179 struct mtd_oob_ops *ops) 4180 { 4181 struct mtd_info *mtd = nand_to_mtd(chip); 4182 int chipnr, realpage, page, column; 4183 uint32_t writelen = ops->len; 4184 4185 uint32_t oobwritelen = ops->ooblen; 4186 uint32_t oobmaxlen = mtd_oobavail(mtd, ops); 4187 4188 uint8_t *oob = ops->oobbuf; 4189 uint8_t *buf = ops->datbuf; 4190 int ret; 4191 int oob_required = oob ? 1 : 0; 4192 4193 ops->retlen = 0; 4194 if (!writelen) 4195 return 0; 4196 4197 /* Reject writes, which are not page aligned */ 4198 if (NOTALIGNED(to) || NOTALIGNED(ops->len)) { 4199 pr_notice("%s: attempt to write non page aligned data\n", 4200 __func__); 4201 return -EINVAL; 4202 } 4203 4204 /* Check if the region is secured */ 4205 if (nand_region_is_secured(chip, to, writelen)) 4206 return -EIO; 4207 4208 column = to & (mtd->writesize - 1); 4209 4210 chipnr = (int)(to >> chip->chip_shift); 4211 nand_select_target(chip, chipnr); 4212 4213 /* Check, if it is write protected */ 4214 if (nand_check_wp(chip)) { 4215 ret = -EIO; 4216 goto err_out; 4217 } 4218 4219 realpage = (int)(to >> chip->page_shift); 4220 page = realpage & chip->pagemask; 4221 4222 /* Invalidate the page cache, when we write to the cached page */ 4223 if (to <= ((loff_t)chip->pagecache.page << chip->page_shift) && 4224 ((loff_t)chip->pagecache.page << chip->page_shift) < (to + ops->len)) 4225 chip->pagecache.page = -1; 4226 4227 /* Don't allow multipage oob writes with offset */ 4228 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) { 4229 ret = -EINVAL; 4230 goto err_out; 4231 } 4232 4233 while (1) { 4234 int bytes = mtd->writesize; 4235 uint8_t *wbuf = buf; 4236 int use_bounce_buf; 4237 int part_pagewr = (column || writelen < mtd->writesize); 4238 4239 if (part_pagewr) 4240 use_bounce_buf = 1; 4241 else if (chip->options & NAND_USES_DMA) 4242 use_bounce_buf = !virt_addr_valid(buf) || 4243 !IS_ALIGNED((unsigned long)buf, 4244 chip->buf_align); 4245 else 4246 use_bounce_buf = 0; 4247 4248 /* 4249 * Copy the data from the initial buffer when doing partial page 4250 * writes or when a bounce buffer is required. 4251 */ 4252 if (use_bounce_buf) { 4253 pr_debug("%s: using write bounce buffer for buf@%p\n", 4254 __func__, buf); 4255 if (part_pagewr) 4256 bytes = min_t(int, bytes - column, writelen); 4257 wbuf = nand_get_data_buf(chip); 4258 memset(wbuf, 0xff, mtd->writesize); 4259 memcpy(&wbuf[column], buf, bytes); 4260 } 4261 4262 if (unlikely(oob)) { 4263 size_t len = min(oobwritelen, oobmaxlen); 4264 oob = nand_fill_oob(chip, oob, len, ops); 4265 oobwritelen -= len; 4266 } else { 4267 /* We still need to erase leftover OOB data */ 4268 memset(chip->oob_poi, 0xff, mtd->oobsize); 4269 } 4270 4271 ret = nand_write_page(chip, column, bytes, wbuf, 4272 oob_required, page, 4273 (ops->mode == MTD_OPS_RAW)); 4274 if (ret) 4275 break; 4276 4277 writelen -= bytes; 4278 if (!writelen) 4279 break; 4280 4281 column = 0; 4282 buf += bytes; 4283 realpage++; 4284 4285 page = realpage & chip->pagemask; 4286 /* Check, if we cross a chip boundary */ 4287 if (!page) { 4288 chipnr++; 4289 nand_deselect_target(chip); 4290 nand_select_target(chip, chipnr); 4291 } 4292 } 4293 4294 ops->retlen = ops->len - writelen; 4295 if (unlikely(oob)) 4296 ops->oobretlen = ops->ooblen; 4297 4298 err_out: 4299 nand_deselect_target(chip); 4300 return ret; 4301 } 4302 4303 /** 4304 * panic_nand_write - [MTD Interface] NAND write with ECC 4305 * @mtd: MTD device structure 4306 * @to: offset to write to 4307 * @len: number of bytes to write 4308 * @retlen: pointer to variable to store the number of written bytes 4309 * @buf: the data to write 4310 * 4311 * NAND write with ECC. Used when performing writes in interrupt context, this 4312 * may for example be called by mtdoops when writing an oops while in panic. 4313 */ 4314 static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len, 4315 size_t *retlen, const uint8_t *buf) 4316 { 4317 struct nand_chip *chip = mtd_to_nand(mtd); 4318 int chipnr = (int)(to >> chip->chip_shift); 4319 struct mtd_oob_ops ops; 4320 int ret; 4321 4322 nand_select_target(chip, chipnr); 4323 4324 /* Wait for the device to get ready */ 4325 panic_nand_wait(chip, 400); 4326 4327 memset(&ops, 0, sizeof(ops)); 4328 ops.len = len; 4329 ops.datbuf = (uint8_t *)buf; 4330 ops.mode = MTD_OPS_PLACE_OOB; 4331 4332 ret = nand_do_write_ops(chip, to, &ops); 4333 4334 *retlen = ops.retlen; 4335 return ret; 4336 } 4337 4338 /** 4339 * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band 4340 * @mtd: MTD device structure 4341 * @to: offset to write to 4342 * @ops: oob operation description structure 4343 */ 4344 static int nand_write_oob(struct mtd_info *mtd, loff_t to, 4345 struct mtd_oob_ops *ops) 4346 { 4347 struct nand_chip *chip = mtd_to_nand(mtd); 4348 int ret; 4349 4350 ops->retlen = 0; 4351 4352 ret = nand_get_device(chip); 4353 if (ret) 4354 return ret; 4355 4356 switch (ops->mode) { 4357 case MTD_OPS_PLACE_OOB: 4358 case MTD_OPS_AUTO_OOB: 4359 case MTD_OPS_RAW: 4360 break; 4361 4362 default: 4363 goto out; 4364 } 4365 4366 if (!ops->datbuf) 4367 ret = nand_do_write_oob(chip, to, ops); 4368 else 4369 ret = nand_do_write_ops(chip, to, ops); 4370 4371 out: 4372 nand_release_device(chip); 4373 return ret; 4374 } 4375 4376 /** 4377 * nand_erase - [MTD Interface] erase block(s) 4378 * @mtd: MTD device structure 4379 * @instr: erase instruction 4380 * 4381 * Erase one ore more blocks. 4382 */ 4383 static int nand_erase(struct mtd_info *mtd, struct erase_info *instr) 4384 { 4385 return nand_erase_nand(mtd_to_nand(mtd), instr, 0); 4386 } 4387 4388 /** 4389 * nand_erase_nand - [INTERN] erase block(s) 4390 * @chip: NAND chip object 4391 * @instr: erase instruction 4392 * @allowbbt: allow erasing the bbt area 4393 * 4394 * Erase one ore more blocks. 4395 */ 4396 int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr, 4397 int allowbbt) 4398 { 4399 int page, pages_per_block, ret, chipnr; 4400 loff_t len; 4401 4402 pr_debug("%s: start = 0x%012llx, len = %llu\n", 4403 __func__, (unsigned long long)instr->addr, 4404 (unsigned long long)instr->len); 4405 4406 if (check_offs_len(chip, instr->addr, instr->len)) 4407 return -EINVAL; 4408 4409 /* Check if the region is secured */ 4410 if (nand_region_is_secured(chip, instr->addr, instr->len)) 4411 return -EIO; 4412 4413 /* Grab the lock and see if the device is available */ 4414 ret = nand_get_device(chip); 4415 if (ret) 4416 return ret; 4417 4418 /* Shift to get first page */ 4419 page = (int)(instr->addr >> chip->page_shift); 4420 chipnr = (int)(instr->addr >> chip->chip_shift); 4421 4422 /* Calculate pages in each block */ 4423 pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift); 4424 4425 /* Select the NAND device */ 4426 nand_select_target(chip, chipnr); 4427 4428 /* Check, if it is write protected */ 4429 if (nand_check_wp(chip)) { 4430 pr_debug("%s: device is write protected!\n", 4431 __func__); 4432 ret = -EIO; 4433 goto erase_exit; 4434 } 4435 4436 /* Loop through the pages */ 4437 len = instr->len; 4438 4439 while (len) { 4440 /* Check if we have a bad block, we do not erase bad blocks! */ 4441 if (nand_block_checkbad(chip, ((loff_t) page) << 4442 chip->page_shift, allowbbt)) { 4443 pr_warn("%s: attempt to erase a bad block at page 0x%08x\n", 4444 __func__, page); 4445 ret = -EIO; 4446 goto erase_exit; 4447 } 4448 4449 /* 4450 * Invalidate the page cache, if we erase the block which 4451 * contains the current cached page. 4452 */ 4453 if (page <= chip->pagecache.page && chip->pagecache.page < 4454 (page + pages_per_block)) 4455 chip->pagecache.page = -1; 4456 4457 ret = nand_erase_op(chip, (page & chip->pagemask) >> 4458 (chip->phys_erase_shift - chip->page_shift)); 4459 if (ret) { 4460 pr_debug("%s: failed erase, page 0x%08x\n", 4461 __func__, page); 4462 instr->fail_addr = 4463 ((loff_t)page << chip->page_shift); 4464 goto erase_exit; 4465 } 4466 4467 /* Increment page address and decrement length */ 4468 len -= (1ULL << chip->phys_erase_shift); 4469 page += pages_per_block; 4470 4471 /* Check, if we cross a chip boundary */ 4472 if (len && !(page & chip->pagemask)) { 4473 chipnr++; 4474 nand_deselect_target(chip); 4475 nand_select_target(chip, chipnr); 4476 } 4477 } 4478 4479 ret = 0; 4480 erase_exit: 4481 4482 /* Deselect and wake up anyone waiting on the device */ 4483 nand_deselect_target(chip); 4484 nand_release_device(chip); 4485 4486 /* Return more or less happy */ 4487 return ret; 4488 } 4489 4490 /** 4491 * nand_sync - [MTD Interface] sync 4492 * @mtd: MTD device structure 4493 * 4494 * Sync is actually a wait for chip ready function. 4495 */ 4496 static void nand_sync(struct mtd_info *mtd) 4497 { 4498 struct nand_chip *chip = mtd_to_nand(mtd); 4499 4500 pr_debug("%s: called\n", __func__); 4501 4502 /* Grab the lock and see if the device is available */ 4503 WARN_ON(nand_get_device(chip)); 4504 /* Release it and go back */ 4505 nand_release_device(chip); 4506 } 4507 4508 /** 4509 * nand_block_isbad - [MTD Interface] Check if block at offset is bad 4510 * @mtd: MTD device structure 4511 * @offs: offset relative to mtd start 4512 */ 4513 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs) 4514 { 4515 struct nand_chip *chip = mtd_to_nand(mtd); 4516 int chipnr = (int)(offs >> chip->chip_shift); 4517 int ret; 4518 4519 /* Select the NAND device */ 4520 ret = nand_get_device(chip); 4521 if (ret) 4522 return ret; 4523 4524 nand_select_target(chip, chipnr); 4525 4526 ret = nand_block_checkbad(chip, offs, 0); 4527 4528 nand_deselect_target(chip); 4529 nand_release_device(chip); 4530 4531 return ret; 4532 } 4533 4534 /** 4535 * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad 4536 * @mtd: MTD device structure 4537 * @ofs: offset relative to mtd start 4538 */ 4539 static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs) 4540 { 4541 int ret; 4542 4543 ret = nand_block_isbad(mtd, ofs); 4544 if (ret) { 4545 /* If it was bad already, return success and do nothing */ 4546 if (ret > 0) 4547 return 0; 4548 return ret; 4549 } 4550 4551 return nand_block_markbad_lowlevel(mtd_to_nand(mtd), ofs); 4552 } 4553 4554 /** 4555 * nand_suspend - [MTD Interface] Suspend the NAND flash 4556 * @mtd: MTD device structure 4557 * 4558 * Returns 0 for success or negative error code otherwise. 4559 */ 4560 static int nand_suspend(struct mtd_info *mtd) 4561 { 4562 struct nand_chip *chip = mtd_to_nand(mtd); 4563 int ret = 0; 4564 4565 mutex_lock(&chip->lock); 4566 if (chip->ops.suspend) 4567 ret = chip->ops.suspend(chip); 4568 if (!ret) 4569 chip->suspended = 1; 4570 mutex_unlock(&chip->lock); 4571 4572 return ret; 4573 } 4574 4575 /** 4576 * nand_resume - [MTD Interface] Resume the NAND flash 4577 * @mtd: MTD device structure 4578 */ 4579 static void nand_resume(struct mtd_info *mtd) 4580 { 4581 struct nand_chip *chip = mtd_to_nand(mtd); 4582 4583 mutex_lock(&chip->lock); 4584 if (chip->suspended) { 4585 if (chip->ops.resume) 4586 chip->ops.resume(chip); 4587 chip->suspended = 0; 4588 } else { 4589 pr_err("%s called for a chip which is not in suspended state\n", 4590 __func__); 4591 } 4592 mutex_unlock(&chip->lock); 4593 } 4594 4595 /** 4596 * nand_shutdown - [MTD Interface] Finish the current NAND operation and 4597 * prevent further operations 4598 * @mtd: MTD device structure 4599 */ 4600 static void nand_shutdown(struct mtd_info *mtd) 4601 { 4602 nand_suspend(mtd); 4603 } 4604 4605 /** 4606 * nand_lock - [MTD Interface] Lock the NAND flash 4607 * @mtd: MTD device structure 4608 * @ofs: offset byte address 4609 * @len: number of bytes to lock (must be a multiple of block/page size) 4610 */ 4611 static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 4612 { 4613 struct nand_chip *chip = mtd_to_nand(mtd); 4614 4615 if (!chip->ops.lock_area) 4616 return -ENOTSUPP; 4617 4618 return chip->ops.lock_area(chip, ofs, len); 4619 } 4620 4621 /** 4622 * nand_unlock - [MTD Interface] Unlock the NAND flash 4623 * @mtd: MTD device structure 4624 * @ofs: offset byte address 4625 * @len: number of bytes to unlock (must be a multiple of block/page size) 4626 */ 4627 static int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 4628 { 4629 struct nand_chip *chip = mtd_to_nand(mtd); 4630 4631 if (!chip->ops.unlock_area) 4632 return -ENOTSUPP; 4633 4634 return chip->ops.unlock_area(chip, ofs, len); 4635 } 4636 4637 /* Set default functions */ 4638 static void nand_set_defaults(struct nand_chip *chip) 4639 { 4640 /* If no controller is provided, use the dummy, legacy one. */ 4641 if (!chip->controller) { 4642 chip->controller = &chip->legacy.dummy_controller; 4643 nand_controller_init(chip->controller); 4644 } 4645 4646 nand_legacy_set_defaults(chip); 4647 4648 if (!chip->buf_align) 4649 chip->buf_align = 1; 4650 } 4651 4652 /* Sanitize ONFI strings so we can safely print them */ 4653 void sanitize_string(uint8_t *s, size_t len) 4654 { 4655 ssize_t i; 4656 4657 /* Null terminate */ 4658 s[len - 1] = 0; 4659 4660 /* Remove non printable chars */ 4661 for (i = 0; i < len - 1; i++) { 4662 if (s[i] < ' ' || s[i] > 127) 4663 s[i] = '?'; 4664 } 4665 4666 /* Remove trailing spaces */ 4667 strim(s); 4668 } 4669 4670 /* 4671 * nand_id_has_period - Check if an ID string has a given wraparound period 4672 * @id_data: the ID string 4673 * @arrlen: the length of the @id_data array 4674 * @period: the period of repitition 4675 * 4676 * Check if an ID string is repeated within a given sequence of bytes at 4677 * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a 4678 * period of 3). This is a helper function for nand_id_len(). Returns non-zero 4679 * if the repetition has a period of @period; otherwise, returns zero. 4680 */ 4681 static int nand_id_has_period(u8 *id_data, int arrlen, int period) 4682 { 4683 int i, j; 4684 for (i = 0; i < period; i++) 4685 for (j = i + period; j < arrlen; j += period) 4686 if (id_data[i] != id_data[j]) 4687 return 0; 4688 return 1; 4689 } 4690 4691 /* 4692 * nand_id_len - Get the length of an ID string returned by CMD_READID 4693 * @id_data: the ID string 4694 * @arrlen: the length of the @id_data array 4695 4696 * Returns the length of the ID string, according to known wraparound/trailing 4697 * zero patterns. If no pattern exists, returns the length of the array. 4698 */ 4699 static int nand_id_len(u8 *id_data, int arrlen) 4700 { 4701 int last_nonzero, period; 4702 4703 /* Find last non-zero byte */ 4704 for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--) 4705 if (id_data[last_nonzero]) 4706 break; 4707 4708 /* All zeros */ 4709 if (last_nonzero < 0) 4710 return 0; 4711 4712 /* Calculate wraparound period */ 4713 for (period = 1; period < arrlen; period++) 4714 if (nand_id_has_period(id_data, arrlen, period)) 4715 break; 4716 4717 /* There's a repeated pattern */ 4718 if (period < arrlen) 4719 return period; 4720 4721 /* There are trailing zeros */ 4722 if (last_nonzero < arrlen - 1) 4723 return last_nonzero + 1; 4724 4725 /* No pattern detected */ 4726 return arrlen; 4727 } 4728 4729 /* Extract the bits of per cell from the 3rd byte of the extended ID */ 4730 static int nand_get_bits_per_cell(u8 cellinfo) 4731 { 4732 int bits; 4733 4734 bits = cellinfo & NAND_CI_CELLTYPE_MSK; 4735 bits >>= NAND_CI_CELLTYPE_SHIFT; 4736 return bits + 1; 4737 } 4738 4739 /* 4740 * Many new NAND share similar device ID codes, which represent the size of the 4741 * chip. The rest of the parameters must be decoded according to generic or 4742 * manufacturer-specific "extended ID" decoding patterns. 4743 */ 4744 void nand_decode_ext_id(struct nand_chip *chip) 4745 { 4746 struct nand_memory_organization *memorg; 4747 struct mtd_info *mtd = nand_to_mtd(chip); 4748 int extid; 4749 u8 *id_data = chip->id.data; 4750 4751 memorg = nanddev_get_memorg(&chip->base); 4752 4753 /* The 3rd id byte holds MLC / multichip data */ 4754 memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]); 4755 /* The 4th id byte is the important one */ 4756 extid = id_data[3]; 4757 4758 /* Calc pagesize */ 4759 memorg->pagesize = 1024 << (extid & 0x03); 4760 mtd->writesize = memorg->pagesize; 4761 extid >>= 2; 4762 /* Calc oobsize */ 4763 memorg->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9); 4764 mtd->oobsize = memorg->oobsize; 4765 extid >>= 2; 4766 /* Calc blocksize. Blocksize is multiples of 64KiB */ 4767 memorg->pages_per_eraseblock = ((64 * 1024) << (extid & 0x03)) / 4768 memorg->pagesize; 4769 mtd->erasesize = (64 * 1024) << (extid & 0x03); 4770 extid >>= 2; 4771 /* Get buswidth information */ 4772 if (extid & 0x1) 4773 chip->options |= NAND_BUSWIDTH_16; 4774 } 4775 EXPORT_SYMBOL_GPL(nand_decode_ext_id); 4776 4777 /* 4778 * Old devices have chip data hardcoded in the device ID table. nand_decode_id 4779 * decodes a matching ID table entry and assigns the MTD size parameters for 4780 * the chip. 4781 */ 4782 static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type) 4783 { 4784 struct mtd_info *mtd = nand_to_mtd(chip); 4785 struct nand_memory_organization *memorg; 4786 4787 memorg = nanddev_get_memorg(&chip->base); 4788 4789 memorg->pages_per_eraseblock = type->erasesize / type->pagesize; 4790 mtd->erasesize = type->erasesize; 4791 memorg->pagesize = type->pagesize; 4792 mtd->writesize = memorg->pagesize; 4793 memorg->oobsize = memorg->pagesize / 32; 4794 mtd->oobsize = memorg->oobsize; 4795 4796 /* All legacy ID NAND are small-page, SLC */ 4797 memorg->bits_per_cell = 1; 4798 } 4799 4800 /* 4801 * Set the bad block marker/indicator (BBM/BBI) patterns according to some 4802 * heuristic patterns using various detected parameters (e.g., manufacturer, 4803 * page size, cell-type information). 4804 */ 4805 static void nand_decode_bbm_options(struct nand_chip *chip) 4806 { 4807 struct mtd_info *mtd = nand_to_mtd(chip); 4808 4809 /* Set the bad block position */ 4810 if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16)) 4811 chip->badblockpos = NAND_BBM_POS_LARGE; 4812 else 4813 chip->badblockpos = NAND_BBM_POS_SMALL; 4814 } 4815 4816 static inline bool is_full_id_nand(struct nand_flash_dev *type) 4817 { 4818 return type->id_len; 4819 } 4820 4821 static bool find_full_id_nand(struct nand_chip *chip, 4822 struct nand_flash_dev *type) 4823 { 4824 struct nand_device *base = &chip->base; 4825 struct nand_ecc_props requirements; 4826 struct mtd_info *mtd = nand_to_mtd(chip); 4827 struct nand_memory_organization *memorg; 4828 u8 *id_data = chip->id.data; 4829 4830 memorg = nanddev_get_memorg(&chip->base); 4831 4832 if (!strncmp(type->id, id_data, type->id_len)) { 4833 memorg->pagesize = type->pagesize; 4834 mtd->writesize = memorg->pagesize; 4835 memorg->pages_per_eraseblock = type->erasesize / 4836 type->pagesize; 4837 mtd->erasesize = type->erasesize; 4838 memorg->oobsize = type->oobsize; 4839 mtd->oobsize = memorg->oobsize; 4840 4841 memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]); 4842 memorg->eraseblocks_per_lun = 4843 DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20, 4844 memorg->pagesize * 4845 memorg->pages_per_eraseblock); 4846 chip->options |= type->options; 4847 requirements.strength = NAND_ECC_STRENGTH(type); 4848 requirements.step_size = NAND_ECC_STEP(type); 4849 nanddev_set_ecc_requirements(base, &requirements); 4850 4851 chip->parameters.model = kstrdup(type->name, GFP_KERNEL); 4852 if (!chip->parameters.model) 4853 return false; 4854 4855 return true; 4856 } 4857 return false; 4858 } 4859 4860 /* 4861 * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC 4862 * compliant and does not have a full-id or legacy-id entry in the nand_ids 4863 * table. 4864 */ 4865 static void nand_manufacturer_detect(struct nand_chip *chip) 4866 { 4867 /* 4868 * Try manufacturer detection if available and use 4869 * nand_decode_ext_id() otherwise. 4870 */ 4871 if (chip->manufacturer.desc && chip->manufacturer.desc->ops && 4872 chip->manufacturer.desc->ops->detect) { 4873 struct nand_memory_organization *memorg; 4874 4875 memorg = nanddev_get_memorg(&chip->base); 4876 4877 /* The 3rd id byte holds MLC / multichip data */ 4878 memorg->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]); 4879 chip->manufacturer.desc->ops->detect(chip); 4880 } else { 4881 nand_decode_ext_id(chip); 4882 } 4883 } 4884 4885 /* 4886 * Manufacturer initialization. This function is called for all NANDs including 4887 * ONFI and JEDEC compliant ones. 4888 * Manufacturer drivers should put all their specific initialization code in 4889 * their ->init() hook. 4890 */ 4891 static int nand_manufacturer_init(struct nand_chip *chip) 4892 { 4893 if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops || 4894 !chip->manufacturer.desc->ops->init) 4895 return 0; 4896 4897 return chip->manufacturer.desc->ops->init(chip); 4898 } 4899 4900 /* 4901 * Manufacturer cleanup. This function is called for all NANDs including 4902 * ONFI and JEDEC compliant ones. 4903 * Manufacturer drivers should put all their specific cleanup code in their 4904 * ->cleanup() hook. 4905 */ 4906 static void nand_manufacturer_cleanup(struct nand_chip *chip) 4907 { 4908 /* Release manufacturer private data */ 4909 if (chip->manufacturer.desc && chip->manufacturer.desc->ops && 4910 chip->manufacturer.desc->ops->cleanup) 4911 chip->manufacturer.desc->ops->cleanup(chip); 4912 } 4913 4914 static const char * 4915 nand_manufacturer_name(const struct nand_manufacturer_desc *manufacturer_desc) 4916 { 4917 return manufacturer_desc ? manufacturer_desc->name : "Unknown"; 4918 } 4919 4920 /* 4921 * Get the flash and manufacturer id and lookup if the type is supported. 4922 */ 4923 static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type) 4924 { 4925 const struct nand_manufacturer_desc *manufacturer_desc; 4926 struct mtd_info *mtd = nand_to_mtd(chip); 4927 struct nand_memory_organization *memorg; 4928 int busw, ret; 4929 u8 *id_data = chip->id.data; 4930 u8 maf_id, dev_id; 4931 u64 targetsize; 4932 4933 /* 4934 * Let's start by initializing memorg fields that might be left 4935 * unassigned by the ID-based detection logic. 4936 */ 4937 memorg = nanddev_get_memorg(&chip->base); 4938 memorg->planes_per_lun = 1; 4939 memorg->luns_per_target = 1; 4940 4941 /* 4942 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx) 4943 * after power-up. 4944 */ 4945 ret = nand_reset(chip, 0); 4946 if (ret) 4947 return ret; 4948 4949 /* Select the device */ 4950 nand_select_target(chip, 0); 4951 4952 /* Send the command for reading device ID */ 4953 ret = nand_readid_op(chip, 0, id_data, 2); 4954 if (ret) 4955 return ret; 4956 4957 /* Read manufacturer and device IDs */ 4958 maf_id = id_data[0]; 4959 dev_id = id_data[1]; 4960 4961 /* 4962 * Try again to make sure, as some systems the bus-hold or other 4963 * interface concerns can cause random data which looks like a 4964 * possibly credible NAND flash to appear. If the two results do 4965 * not match, ignore the device completely. 4966 */ 4967 4968 /* Read entire ID string */ 4969 ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data)); 4970 if (ret) 4971 return ret; 4972 4973 if (id_data[0] != maf_id || id_data[1] != dev_id) { 4974 pr_info("second ID read did not match %02x,%02x against %02x,%02x\n", 4975 maf_id, dev_id, id_data[0], id_data[1]); 4976 return -ENODEV; 4977 } 4978 4979 chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data)); 4980 4981 /* Try to identify manufacturer */ 4982 manufacturer_desc = nand_get_manufacturer_desc(maf_id); 4983 chip->manufacturer.desc = manufacturer_desc; 4984 4985 if (!type) 4986 type = nand_flash_ids; 4987 4988 /* 4989 * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic 4990 * override it. 4991 * This is required to make sure initial NAND bus width set by the 4992 * NAND controller driver is coherent with the real NAND bus width 4993 * (extracted by auto-detection code). 4994 */ 4995 busw = chip->options & NAND_BUSWIDTH_16; 4996 4997 /* 4998 * The flag is only set (never cleared), reset it to its default value 4999 * before starting auto-detection. 5000 */ 5001 chip->options &= ~NAND_BUSWIDTH_16; 5002 5003 for (; type->name != NULL; type++) { 5004 if (is_full_id_nand(type)) { 5005 if (find_full_id_nand(chip, type)) 5006 goto ident_done; 5007 } else if (dev_id == type->dev_id) { 5008 break; 5009 } 5010 } 5011 5012 if (!type->name || !type->pagesize) { 5013 /* Check if the chip is ONFI compliant */ 5014 ret = nand_onfi_detect(chip); 5015 if (ret < 0) 5016 return ret; 5017 else if (ret) 5018 goto ident_done; 5019 5020 /* Check if the chip is JEDEC compliant */ 5021 ret = nand_jedec_detect(chip); 5022 if (ret < 0) 5023 return ret; 5024 else if (ret) 5025 goto ident_done; 5026 } 5027 5028 if (!type->name) 5029 return -ENODEV; 5030 5031 chip->parameters.model = kstrdup(type->name, GFP_KERNEL); 5032 if (!chip->parameters.model) 5033 return -ENOMEM; 5034 5035 if (!type->pagesize) 5036 nand_manufacturer_detect(chip); 5037 else 5038 nand_decode_id(chip, type); 5039 5040 /* Get chip options */ 5041 chip->options |= type->options; 5042 5043 memorg->eraseblocks_per_lun = 5044 DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20, 5045 memorg->pagesize * 5046 memorg->pages_per_eraseblock); 5047 5048 ident_done: 5049 if (!mtd->name) 5050 mtd->name = chip->parameters.model; 5051 5052 if (chip->options & NAND_BUSWIDTH_AUTO) { 5053 WARN_ON(busw & NAND_BUSWIDTH_16); 5054 nand_set_defaults(chip); 5055 } else if (busw != (chip->options & NAND_BUSWIDTH_16)) { 5056 /* 5057 * Check, if buswidth is correct. Hardware drivers should set 5058 * chip correct! 5059 */ 5060 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n", 5061 maf_id, dev_id); 5062 pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc), 5063 mtd->name); 5064 pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8, 5065 (chip->options & NAND_BUSWIDTH_16) ? 16 : 8); 5066 ret = -EINVAL; 5067 5068 goto free_detect_allocation; 5069 } 5070 5071 nand_decode_bbm_options(chip); 5072 5073 /* Calculate the address shift from the page size */ 5074 chip->page_shift = ffs(mtd->writesize) - 1; 5075 /* Convert chipsize to number of pages per chip -1 */ 5076 targetsize = nanddev_target_size(&chip->base); 5077 chip->pagemask = (targetsize >> chip->page_shift) - 1; 5078 5079 chip->bbt_erase_shift = chip->phys_erase_shift = 5080 ffs(mtd->erasesize) - 1; 5081 if (targetsize & 0xffffffff) 5082 chip->chip_shift = ffs((unsigned)targetsize) - 1; 5083 else { 5084 chip->chip_shift = ffs((unsigned)(targetsize >> 32)); 5085 chip->chip_shift += 32 - 1; 5086 } 5087 5088 if (chip->chip_shift - chip->page_shift > 16) 5089 chip->options |= NAND_ROW_ADDR_3; 5090 5091 chip->badblockbits = 8; 5092 5093 nand_legacy_adjust_cmdfunc(chip); 5094 5095 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n", 5096 maf_id, dev_id); 5097 pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc), 5098 chip->parameters.model); 5099 pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n", 5100 (int)(targetsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC", 5101 mtd->erasesize >> 10, mtd->writesize, mtd->oobsize); 5102 return 0; 5103 5104 free_detect_allocation: 5105 kfree(chip->parameters.model); 5106 5107 return ret; 5108 } 5109 5110 static enum nand_ecc_engine_type 5111 of_get_rawnand_ecc_engine_type_legacy(struct device_node *np) 5112 { 5113 enum nand_ecc_legacy_mode { 5114 NAND_ECC_INVALID, 5115 NAND_ECC_NONE, 5116 NAND_ECC_SOFT, 5117 NAND_ECC_SOFT_BCH, 5118 NAND_ECC_HW, 5119 NAND_ECC_HW_SYNDROME, 5120 NAND_ECC_ON_DIE, 5121 }; 5122 const char * const nand_ecc_legacy_modes[] = { 5123 [NAND_ECC_NONE] = "none", 5124 [NAND_ECC_SOFT] = "soft", 5125 [NAND_ECC_SOFT_BCH] = "soft_bch", 5126 [NAND_ECC_HW] = "hw", 5127 [NAND_ECC_HW_SYNDROME] = "hw_syndrome", 5128 [NAND_ECC_ON_DIE] = "on-die", 5129 }; 5130 enum nand_ecc_legacy_mode eng_type; 5131 const char *pm; 5132 int err; 5133 5134 err = of_property_read_string(np, "nand-ecc-mode", &pm); 5135 if (err) 5136 return NAND_ECC_ENGINE_TYPE_INVALID; 5137 5138 for (eng_type = NAND_ECC_NONE; 5139 eng_type < ARRAY_SIZE(nand_ecc_legacy_modes); eng_type++) { 5140 if (!strcasecmp(pm, nand_ecc_legacy_modes[eng_type])) { 5141 switch (eng_type) { 5142 case NAND_ECC_NONE: 5143 return NAND_ECC_ENGINE_TYPE_NONE; 5144 case NAND_ECC_SOFT: 5145 case NAND_ECC_SOFT_BCH: 5146 return NAND_ECC_ENGINE_TYPE_SOFT; 5147 case NAND_ECC_HW: 5148 case NAND_ECC_HW_SYNDROME: 5149 return NAND_ECC_ENGINE_TYPE_ON_HOST; 5150 case NAND_ECC_ON_DIE: 5151 return NAND_ECC_ENGINE_TYPE_ON_DIE; 5152 default: 5153 break; 5154 } 5155 } 5156 } 5157 5158 return NAND_ECC_ENGINE_TYPE_INVALID; 5159 } 5160 5161 static enum nand_ecc_placement 5162 of_get_rawnand_ecc_placement_legacy(struct device_node *np) 5163 { 5164 const char *pm; 5165 int err; 5166 5167 err = of_property_read_string(np, "nand-ecc-mode", &pm); 5168 if (!err) { 5169 if (!strcasecmp(pm, "hw_syndrome")) 5170 return NAND_ECC_PLACEMENT_INTERLEAVED; 5171 } 5172 5173 return NAND_ECC_PLACEMENT_UNKNOWN; 5174 } 5175 5176 static enum nand_ecc_algo of_get_rawnand_ecc_algo_legacy(struct device_node *np) 5177 { 5178 const char *pm; 5179 int err; 5180 5181 err = of_property_read_string(np, "nand-ecc-mode", &pm); 5182 if (!err) { 5183 if (!strcasecmp(pm, "soft")) 5184 return NAND_ECC_ALGO_HAMMING; 5185 else if (!strcasecmp(pm, "soft_bch")) 5186 return NAND_ECC_ALGO_BCH; 5187 } 5188 5189 return NAND_ECC_ALGO_UNKNOWN; 5190 } 5191 5192 static void of_get_nand_ecc_legacy_user_config(struct nand_chip *chip) 5193 { 5194 struct device_node *dn = nand_get_flash_node(chip); 5195 struct nand_ecc_props *user_conf = &chip->base.ecc.user_conf; 5196 5197 if (user_conf->engine_type == NAND_ECC_ENGINE_TYPE_INVALID) 5198 user_conf->engine_type = of_get_rawnand_ecc_engine_type_legacy(dn); 5199 5200 if (user_conf->algo == NAND_ECC_ALGO_UNKNOWN) 5201 user_conf->algo = of_get_rawnand_ecc_algo_legacy(dn); 5202 5203 if (user_conf->placement == NAND_ECC_PLACEMENT_UNKNOWN) 5204 user_conf->placement = of_get_rawnand_ecc_placement_legacy(dn); 5205 } 5206 5207 static int of_get_nand_bus_width(struct device_node *np) 5208 { 5209 u32 val; 5210 5211 if (of_property_read_u32(np, "nand-bus-width", &val)) 5212 return 8; 5213 5214 switch (val) { 5215 case 8: 5216 case 16: 5217 return val; 5218 default: 5219 return -EIO; 5220 } 5221 } 5222 5223 static bool of_get_nand_on_flash_bbt(struct device_node *np) 5224 { 5225 return of_property_read_bool(np, "nand-on-flash-bbt"); 5226 } 5227 5228 static int of_get_nand_secure_regions(struct nand_chip *chip) 5229 { 5230 struct device_node *dn = nand_get_flash_node(chip); 5231 struct property *prop; 5232 int nr_elem, i, j; 5233 5234 /* Only proceed if the "secure-regions" property is present in DT */ 5235 prop = of_find_property(dn, "secure-regions", NULL); 5236 if (!prop) 5237 return 0; 5238 5239 nr_elem = of_property_count_elems_of_size(dn, "secure-regions", sizeof(u64)); 5240 if (nr_elem <= 0) 5241 return nr_elem; 5242 5243 chip->nr_secure_regions = nr_elem / 2; 5244 chip->secure_regions = kcalloc(chip->nr_secure_regions, sizeof(*chip->secure_regions), 5245 GFP_KERNEL); 5246 if (!chip->secure_regions) 5247 return -ENOMEM; 5248 5249 for (i = 0, j = 0; i < chip->nr_secure_regions; i++, j += 2) { 5250 of_property_read_u64_index(dn, "secure-regions", j, 5251 &chip->secure_regions[i].offset); 5252 of_property_read_u64_index(dn, "secure-regions", j + 1, 5253 &chip->secure_regions[i].size); 5254 } 5255 5256 return 0; 5257 } 5258 5259 /** 5260 * rawnand_dt_parse_gpio_cs - Parse the gpio-cs property of a controller 5261 * @dev: Device that will be parsed. Also used for managed allocations. 5262 * @cs_array: Array of GPIO desc pointers allocated on success 5263 * @ncs_array: Number of entries in @cs_array updated on success. 5264 * @return 0 on success, an error otherwise. 5265 */ 5266 int rawnand_dt_parse_gpio_cs(struct device *dev, struct gpio_desc ***cs_array, 5267 unsigned int *ncs_array) 5268 { 5269 struct device_node *np = dev->of_node; 5270 struct gpio_desc **descs; 5271 int ndescs, i; 5272 5273 ndescs = of_gpio_named_count(np, "cs-gpios"); 5274 if (ndescs < 0) { 5275 dev_dbg(dev, "No valid cs-gpios property\n"); 5276 return 0; 5277 } 5278 5279 descs = devm_kcalloc(dev, ndescs, sizeof(*descs), GFP_KERNEL); 5280 if (!descs) 5281 return -ENOMEM; 5282 5283 for (i = 0; i < ndescs; i++) { 5284 descs[i] = gpiod_get_index_optional(dev, "cs", i, 5285 GPIOD_OUT_HIGH); 5286 if (IS_ERR(descs[i])) 5287 return PTR_ERR(descs[i]); 5288 } 5289 5290 *ncs_array = ndescs; 5291 *cs_array = descs; 5292 5293 return 0; 5294 } 5295 EXPORT_SYMBOL(rawnand_dt_parse_gpio_cs); 5296 5297 static int rawnand_dt_init(struct nand_chip *chip) 5298 { 5299 struct nand_device *nand = mtd_to_nanddev(nand_to_mtd(chip)); 5300 struct device_node *dn = nand_get_flash_node(chip); 5301 5302 if (!dn) 5303 return 0; 5304 5305 if (of_get_nand_bus_width(dn) == 16) 5306 chip->options |= NAND_BUSWIDTH_16; 5307 5308 if (of_property_read_bool(dn, "nand-is-boot-medium")) 5309 chip->options |= NAND_IS_BOOT_MEDIUM; 5310 5311 if (of_get_nand_on_flash_bbt(dn)) 5312 chip->bbt_options |= NAND_BBT_USE_FLASH; 5313 5314 of_get_nand_ecc_user_config(nand); 5315 of_get_nand_ecc_legacy_user_config(chip); 5316 5317 /* 5318 * If neither the user nor the NAND controller have requested a specific 5319 * ECC engine type, we will default to NAND_ECC_ENGINE_TYPE_ON_HOST. 5320 */ 5321 nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; 5322 5323 /* 5324 * Use the user requested engine type, unless there is none, in this 5325 * case default to the NAND controller choice, otherwise fallback to 5326 * the raw NAND default one. 5327 */ 5328 if (nand->ecc.user_conf.engine_type != NAND_ECC_ENGINE_TYPE_INVALID) 5329 chip->ecc.engine_type = nand->ecc.user_conf.engine_type; 5330 if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_INVALID) 5331 chip->ecc.engine_type = nand->ecc.defaults.engine_type; 5332 5333 chip->ecc.placement = nand->ecc.user_conf.placement; 5334 chip->ecc.algo = nand->ecc.user_conf.algo; 5335 chip->ecc.strength = nand->ecc.user_conf.strength; 5336 chip->ecc.size = nand->ecc.user_conf.step_size; 5337 5338 return 0; 5339 } 5340 5341 /** 5342 * nand_scan_ident - Scan for the NAND device 5343 * @chip: NAND chip object 5344 * @maxchips: number of chips to scan for 5345 * @table: alternative NAND ID table 5346 * 5347 * This is the first phase of the normal nand_scan() function. It reads the 5348 * flash ID and sets up MTD fields accordingly. 5349 * 5350 * This helper used to be called directly from controller drivers that needed 5351 * to tweak some ECC-related parameters before nand_scan_tail(). This separation 5352 * prevented dynamic allocations during this phase which was unconvenient and 5353 * as been banned for the benefit of the ->init_ecc()/cleanup_ecc() hooks. 5354 */ 5355 static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips, 5356 struct nand_flash_dev *table) 5357 { 5358 struct mtd_info *mtd = nand_to_mtd(chip); 5359 struct nand_memory_organization *memorg; 5360 int nand_maf_id, nand_dev_id; 5361 unsigned int i; 5362 int ret; 5363 5364 memorg = nanddev_get_memorg(&chip->base); 5365 5366 /* Assume all dies are deselected when we enter nand_scan_ident(). */ 5367 chip->cur_cs = -1; 5368 5369 mutex_init(&chip->lock); 5370 5371 /* Enforce the right timings for reset/detection */ 5372 chip->current_interface_config = nand_get_reset_interface_config(); 5373 5374 ret = rawnand_dt_init(chip); 5375 if (ret) 5376 return ret; 5377 5378 if (!mtd->name && mtd->dev.parent) 5379 mtd->name = dev_name(mtd->dev.parent); 5380 5381 /* Set the default functions */ 5382 nand_set_defaults(chip); 5383 5384 ret = nand_legacy_check_hooks(chip); 5385 if (ret) 5386 return ret; 5387 5388 memorg->ntargets = maxchips; 5389 5390 /* Read the flash type */ 5391 ret = nand_detect(chip, table); 5392 if (ret) { 5393 if (!(chip->options & NAND_SCAN_SILENT_NODEV)) 5394 pr_warn("No NAND device found\n"); 5395 nand_deselect_target(chip); 5396 return ret; 5397 } 5398 5399 nand_maf_id = chip->id.data[0]; 5400 nand_dev_id = chip->id.data[1]; 5401 5402 nand_deselect_target(chip); 5403 5404 /* Check for a chip array */ 5405 for (i = 1; i < maxchips; i++) { 5406 u8 id[2]; 5407 5408 /* See comment in nand_get_flash_type for reset */ 5409 ret = nand_reset(chip, i); 5410 if (ret) 5411 break; 5412 5413 nand_select_target(chip, i); 5414 /* Send the command for reading device ID */ 5415 ret = nand_readid_op(chip, 0, id, sizeof(id)); 5416 if (ret) 5417 break; 5418 /* Read manufacturer and device IDs */ 5419 if (nand_maf_id != id[0] || nand_dev_id != id[1]) { 5420 nand_deselect_target(chip); 5421 break; 5422 } 5423 nand_deselect_target(chip); 5424 } 5425 if (i > 1) 5426 pr_info("%d chips detected\n", i); 5427 5428 /* Store the number of chips and calc total size for mtd */ 5429 memorg->ntargets = i; 5430 mtd->size = i * nanddev_target_size(&chip->base); 5431 5432 return 0; 5433 } 5434 5435 static void nand_scan_ident_cleanup(struct nand_chip *chip) 5436 { 5437 kfree(chip->parameters.model); 5438 kfree(chip->parameters.onfi); 5439 } 5440 5441 int rawnand_sw_hamming_init(struct nand_chip *chip) 5442 { 5443 struct nand_ecc_sw_hamming_conf *engine_conf; 5444 struct nand_device *base = &chip->base; 5445 int ret; 5446 5447 base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; 5448 base->ecc.user_conf.algo = NAND_ECC_ALGO_HAMMING; 5449 base->ecc.user_conf.strength = chip->ecc.strength; 5450 base->ecc.user_conf.step_size = chip->ecc.size; 5451 5452 ret = nand_ecc_sw_hamming_init_ctx(base); 5453 if (ret) 5454 return ret; 5455 5456 engine_conf = base->ecc.ctx.priv; 5457 5458 if (chip->ecc.options & NAND_ECC_SOFT_HAMMING_SM_ORDER) 5459 engine_conf->sm_order = true; 5460 5461 chip->ecc.size = base->ecc.ctx.conf.step_size; 5462 chip->ecc.strength = base->ecc.ctx.conf.strength; 5463 chip->ecc.total = base->ecc.ctx.total; 5464 chip->ecc.steps = nanddev_get_ecc_nsteps(base); 5465 chip->ecc.bytes = base->ecc.ctx.total / nanddev_get_ecc_nsteps(base); 5466 5467 return 0; 5468 } 5469 EXPORT_SYMBOL(rawnand_sw_hamming_init); 5470 5471 int rawnand_sw_hamming_calculate(struct nand_chip *chip, 5472 const unsigned char *buf, 5473 unsigned char *code) 5474 { 5475 struct nand_device *base = &chip->base; 5476 5477 return nand_ecc_sw_hamming_calculate(base, buf, code); 5478 } 5479 EXPORT_SYMBOL(rawnand_sw_hamming_calculate); 5480 5481 int rawnand_sw_hamming_correct(struct nand_chip *chip, 5482 unsigned char *buf, 5483 unsigned char *read_ecc, 5484 unsigned char *calc_ecc) 5485 { 5486 struct nand_device *base = &chip->base; 5487 5488 return nand_ecc_sw_hamming_correct(base, buf, read_ecc, calc_ecc); 5489 } 5490 EXPORT_SYMBOL(rawnand_sw_hamming_correct); 5491 5492 void rawnand_sw_hamming_cleanup(struct nand_chip *chip) 5493 { 5494 struct nand_device *base = &chip->base; 5495 5496 nand_ecc_sw_hamming_cleanup_ctx(base); 5497 } 5498 EXPORT_SYMBOL(rawnand_sw_hamming_cleanup); 5499 5500 int rawnand_sw_bch_init(struct nand_chip *chip) 5501 { 5502 struct nand_device *base = &chip->base; 5503 const struct nand_ecc_props *ecc_conf = nanddev_get_ecc_conf(base); 5504 int ret; 5505 5506 base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; 5507 base->ecc.user_conf.algo = NAND_ECC_ALGO_BCH; 5508 base->ecc.user_conf.step_size = chip->ecc.size; 5509 base->ecc.user_conf.strength = chip->ecc.strength; 5510 5511 ret = nand_ecc_sw_bch_init_ctx(base); 5512 if (ret) 5513 return ret; 5514 5515 chip->ecc.size = ecc_conf->step_size; 5516 chip->ecc.strength = ecc_conf->strength; 5517 chip->ecc.total = base->ecc.ctx.total; 5518 chip->ecc.steps = nanddev_get_ecc_nsteps(base); 5519 chip->ecc.bytes = base->ecc.ctx.total / nanddev_get_ecc_nsteps(base); 5520 5521 return 0; 5522 } 5523 EXPORT_SYMBOL(rawnand_sw_bch_init); 5524 5525 static int rawnand_sw_bch_calculate(struct nand_chip *chip, 5526 const unsigned char *buf, 5527 unsigned char *code) 5528 { 5529 struct nand_device *base = &chip->base; 5530 5531 return nand_ecc_sw_bch_calculate(base, buf, code); 5532 } 5533 5534 int rawnand_sw_bch_correct(struct nand_chip *chip, unsigned char *buf, 5535 unsigned char *read_ecc, unsigned char *calc_ecc) 5536 { 5537 struct nand_device *base = &chip->base; 5538 5539 return nand_ecc_sw_bch_correct(base, buf, read_ecc, calc_ecc); 5540 } 5541 EXPORT_SYMBOL(rawnand_sw_bch_correct); 5542 5543 void rawnand_sw_bch_cleanup(struct nand_chip *chip) 5544 { 5545 struct nand_device *base = &chip->base; 5546 5547 nand_ecc_sw_bch_cleanup_ctx(base); 5548 } 5549 EXPORT_SYMBOL(rawnand_sw_bch_cleanup); 5550 5551 static int nand_set_ecc_on_host_ops(struct nand_chip *chip) 5552 { 5553 struct nand_ecc_ctrl *ecc = &chip->ecc; 5554 5555 switch (ecc->placement) { 5556 case NAND_ECC_PLACEMENT_UNKNOWN: 5557 case NAND_ECC_PLACEMENT_OOB: 5558 /* Use standard hwecc read page function? */ 5559 if (!ecc->read_page) 5560 ecc->read_page = nand_read_page_hwecc; 5561 if (!ecc->write_page) 5562 ecc->write_page = nand_write_page_hwecc; 5563 if (!ecc->read_page_raw) 5564 ecc->read_page_raw = nand_read_page_raw; 5565 if (!ecc->write_page_raw) 5566 ecc->write_page_raw = nand_write_page_raw; 5567 if (!ecc->read_oob) 5568 ecc->read_oob = nand_read_oob_std; 5569 if (!ecc->write_oob) 5570 ecc->write_oob = nand_write_oob_std; 5571 if (!ecc->read_subpage) 5572 ecc->read_subpage = nand_read_subpage; 5573 if (!ecc->write_subpage && ecc->hwctl && ecc->calculate) 5574 ecc->write_subpage = nand_write_subpage_hwecc; 5575 fallthrough; 5576 5577 case NAND_ECC_PLACEMENT_INTERLEAVED: 5578 if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) && 5579 (!ecc->read_page || 5580 ecc->read_page == nand_read_page_hwecc || 5581 !ecc->write_page || 5582 ecc->write_page == nand_write_page_hwecc)) { 5583 WARN(1, "No ECC functions supplied; hardware ECC not possible\n"); 5584 return -EINVAL; 5585 } 5586 /* Use standard syndrome read/write page function? */ 5587 if (!ecc->read_page) 5588 ecc->read_page = nand_read_page_syndrome; 5589 if (!ecc->write_page) 5590 ecc->write_page = nand_write_page_syndrome; 5591 if (!ecc->read_page_raw) 5592 ecc->read_page_raw = nand_read_page_raw_syndrome; 5593 if (!ecc->write_page_raw) 5594 ecc->write_page_raw = nand_write_page_raw_syndrome; 5595 if (!ecc->read_oob) 5596 ecc->read_oob = nand_read_oob_syndrome; 5597 if (!ecc->write_oob) 5598 ecc->write_oob = nand_write_oob_syndrome; 5599 break; 5600 5601 default: 5602 pr_warn("Invalid NAND_ECC_PLACEMENT %d\n", 5603 ecc->placement); 5604 return -EINVAL; 5605 } 5606 5607 return 0; 5608 } 5609 5610 static int nand_set_ecc_soft_ops(struct nand_chip *chip) 5611 { 5612 struct mtd_info *mtd = nand_to_mtd(chip); 5613 struct nand_device *nanddev = mtd_to_nanddev(mtd); 5614 struct nand_ecc_ctrl *ecc = &chip->ecc; 5615 int ret; 5616 5617 if (WARN_ON(ecc->engine_type != NAND_ECC_ENGINE_TYPE_SOFT)) 5618 return -EINVAL; 5619 5620 switch (ecc->algo) { 5621 case NAND_ECC_ALGO_HAMMING: 5622 ecc->calculate = rawnand_sw_hamming_calculate; 5623 ecc->correct = rawnand_sw_hamming_correct; 5624 ecc->read_page = nand_read_page_swecc; 5625 ecc->read_subpage = nand_read_subpage; 5626 ecc->write_page = nand_write_page_swecc; 5627 if (!ecc->read_page_raw) 5628 ecc->read_page_raw = nand_read_page_raw; 5629 if (!ecc->write_page_raw) 5630 ecc->write_page_raw = nand_write_page_raw; 5631 ecc->read_oob = nand_read_oob_std; 5632 ecc->write_oob = nand_write_oob_std; 5633 if (!ecc->size) 5634 ecc->size = 256; 5635 ecc->bytes = 3; 5636 ecc->strength = 1; 5637 5638 if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC)) 5639 ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER; 5640 5641 ret = rawnand_sw_hamming_init(chip); 5642 if (ret) { 5643 WARN(1, "Hamming ECC initialization failed!\n"); 5644 return ret; 5645 } 5646 5647 return 0; 5648 case NAND_ECC_ALGO_BCH: 5649 if (!IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)) { 5650 WARN(1, "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n"); 5651 return -EINVAL; 5652 } 5653 ecc->calculate = rawnand_sw_bch_calculate; 5654 ecc->correct = rawnand_sw_bch_correct; 5655 ecc->read_page = nand_read_page_swecc; 5656 ecc->read_subpage = nand_read_subpage; 5657 ecc->write_page = nand_write_page_swecc; 5658 if (!ecc->read_page_raw) 5659 ecc->read_page_raw = nand_read_page_raw; 5660 if (!ecc->write_page_raw) 5661 ecc->write_page_raw = nand_write_page_raw; 5662 ecc->read_oob = nand_read_oob_std; 5663 ecc->write_oob = nand_write_oob_std; 5664 5665 /* 5666 * We can only maximize ECC config when the default layout is 5667 * used, otherwise we don't know how many bytes can really be 5668 * used. 5669 */ 5670 if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH && 5671 mtd->ooblayout != nand_get_large_page_ooblayout()) 5672 nanddev->ecc.user_conf.flags &= ~NAND_ECC_MAXIMIZE_STRENGTH; 5673 5674 ret = rawnand_sw_bch_init(chip); 5675 if (ret) { 5676 WARN(1, "BCH ECC initialization failed!\n"); 5677 return ret; 5678 } 5679 5680 return 0; 5681 default: 5682 WARN(1, "Unsupported ECC algorithm!\n"); 5683 return -EINVAL; 5684 } 5685 } 5686 5687 /** 5688 * nand_check_ecc_caps - check the sanity of preset ECC settings 5689 * @chip: nand chip info structure 5690 * @caps: ECC caps info structure 5691 * @oobavail: OOB size that the ECC engine can use 5692 * 5693 * When ECC step size and strength are already set, check if they are supported 5694 * by the controller and the calculated ECC bytes fit within the chip's OOB. 5695 * On success, the calculated ECC bytes is set. 5696 */ 5697 static int 5698 nand_check_ecc_caps(struct nand_chip *chip, 5699 const struct nand_ecc_caps *caps, int oobavail) 5700 { 5701 struct mtd_info *mtd = nand_to_mtd(chip); 5702 const struct nand_ecc_step_info *stepinfo; 5703 int preset_step = chip->ecc.size; 5704 int preset_strength = chip->ecc.strength; 5705 int ecc_bytes, nsteps = mtd->writesize / preset_step; 5706 int i, j; 5707 5708 for (i = 0; i < caps->nstepinfos; i++) { 5709 stepinfo = &caps->stepinfos[i]; 5710 5711 if (stepinfo->stepsize != preset_step) 5712 continue; 5713 5714 for (j = 0; j < stepinfo->nstrengths; j++) { 5715 if (stepinfo->strengths[j] != preset_strength) 5716 continue; 5717 5718 ecc_bytes = caps->calc_ecc_bytes(preset_step, 5719 preset_strength); 5720 if (WARN_ON_ONCE(ecc_bytes < 0)) 5721 return ecc_bytes; 5722 5723 if (ecc_bytes * nsteps > oobavail) { 5724 pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB", 5725 preset_step, preset_strength); 5726 return -ENOSPC; 5727 } 5728 5729 chip->ecc.bytes = ecc_bytes; 5730 5731 return 0; 5732 } 5733 } 5734 5735 pr_err("ECC (step, strength) = (%d, %d) not supported on this controller", 5736 preset_step, preset_strength); 5737 5738 return -ENOTSUPP; 5739 } 5740 5741 /** 5742 * nand_match_ecc_req - meet the chip's requirement with least ECC bytes 5743 * @chip: nand chip info structure 5744 * @caps: ECC engine caps info structure 5745 * @oobavail: OOB size that the ECC engine can use 5746 * 5747 * If a chip's ECC requirement is provided, try to meet it with the least 5748 * number of ECC bytes (i.e. with the largest number of OOB-free bytes). 5749 * On success, the chosen ECC settings are set. 5750 */ 5751 static int 5752 nand_match_ecc_req(struct nand_chip *chip, 5753 const struct nand_ecc_caps *caps, int oobavail) 5754 { 5755 const struct nand_ecc_props *requirements = 5756 nanddev_get_ecc_requirements(&chip->base); 5757 struct mtd_info *mtd = nand_to_mtd(chip); 5758 const struct nand_ecc_step_info *stepinfo; 5759 int req_step = requirements->step_size; 5760 int req_strength = requirements->strength; 5761 int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total; 5762 int best_step, best_strength, best_ecc_bytes; 5763 int best_ecc_bytes_total = INT_MAX; 5764 int i, j; 5765 5766 /* No information provided by the NAND chip */ 5767 if (!req_step || !req_strength) 5768 return -ENOTSUPP; 5769 5770 /* number of correctable bits the chip requires in a page */ 5771 req_corr = mtd->writesize / req_step * req_strength; 5772 5773 for (i = 0; i < caps->nstepinfos; i++) { 5774 stepinfo = &caps->stepinfos[i]; 5775 step_size = stepinfo->stepsize; 5776 5777 for (j = 0; j < stepinfo->nstrengths; j++) { 5778 strength = stepinfo->strengths[j]; 5779 5780 /* 5781 * If both step size and strength are smaller than the 5782 * chip's requirement, it is not easy to compare the 5783 * resulted reliability. 5784 */ 5785 if (step_size < req_step && strength < req_strength) 5786 continue; 5787 5788 if (mtd->writesize % step_size) 5789 continue; 5790 5791 nsteps = mtd->writesize / step_size; 5792 5793 ecc_bytes = caps->calc_ecc_bytes(step_size, strength); 5794 if (WARN_ON_ONCE(ecc_bytes < 0)) 5795 continue; 5796 ecc_bytes_total = ecc_bytes * nsteps; 5797 5798 if (ecc_bytes_total > oobavail || 5799 strength * nsteps < req_corr) 5800 continue; 5801 5802 /* 5803 * We assume the best is to meet the chip's requrement 5804 * with the least number of ECC bytes. 5805 */ 5806 if (ecc_bytes_total < best_ecc_bytes_total) { 5807 best_ecc_bytes_total = ecc_bytes_total; 5808 best_step = step_size; 5809 best_strength = strength; 5810 best_ecc_bytes = ecc_bytes; 5811 } 5812 } 5813 } 5814 5815 if (best_ecc_bytes_total == INT_MAX) 5816 return -ENOTSUPP; 5817 5818 chip->ecc.size = best_step; 5819 chip->ecc.strength = best_strength; 5820 chip->ecc.bytes = best_ecc_bytes; 5821 5822 return 0; 5823 } 5824 5825 /** 5826 * nand_maximize_ecc - choose the max ECC strength available 5827 * @chip: nand chip info structure 5828 * @caps: ECC engine caps info structure 5829 * @oobavail: OOB size that the ECC engine can use 5830 * 5831 * Choose the max ECC strength that is supported on the controller, and can fit 5832 * within the chip's OOB. On success, the chosen ECC settings are set. 5833 */ 5834 static int 5835 nand_maximize_ecc(struct nand_chip *chip, 5836 const struct nand_ecc_caps *caps, int oobavail) 5837 { 5838 struct mtd_info *mtd = nand_to_mtd(chip); 5839 const struct nand_ecc_step_info *stepinfo; 5840 int step_size, strength, nsteps, ecc_bytes, corr; 5841 int best_corr = 0; 5842 int best_step = 0; 5843 int best_strength, best_ecc_bytes; 5844 int i, j; 5845 5846 for (i = 0; i < caps->nstepinfos; i++) { 5847 stepinfo = &caps->stepinfos[i]; 5848 step_size = stepinfo->stepsize; 5849 5850 /* If chip->ecc.size is already set, respect it */ 5851 if (chip->ecc.size && step_size != chip->ecc.size) 5852 continue; 5853 5854 for (j = 0; j < stepinfo->nstrengths; j++) { 5855 strength = stepinfo->strengths[j]; 5856 5857 if (mtd->writesize % step_size) 5858 continue; 5859 5860 nsteps = mtd->writesize / step_size; 5861 5862 ecc_bytes = caps->calc_ecc_bytes(step_size, strength); 5863 if (WARN_ON_ONCE(ecc_bytes < 0)) 5864 continue; 5865 5866 if (ecc_bytes * nsteps > oobavail) 5867 continue; 5868 5869 corr = strength * nsteps; 5870 5871 /* 5872 * If the number of correctable bits is the same, 5873 * bigger step_size has more reliability. 5874 */ 5875 if (corr > best_corr || 5876 (corr == best_corr && step_size > best_step)) { 5877 best_corr = corr; 5878 best_step = step_size; 5879 best_strength = strength; 5880 best_ecc_bytes = ecc_bytes; 5881 } 5882 } 5883 } 5884 5885 if (!best_corr) 5886 return -ENOTSUPP; 5887 5888 chip->ecc.size = best_step; 5889 chip->ecc.strength = best_strength; 5890 chip->ecc.bytes = best_ecc_bytes; 5891 5892 return 0; 5893 } 5894 5895 /** 5896 * nand_ecc_choose_conf - Set the ECC strength and ECC step size 5897 * @chip: nand chip info structure 5898 * @caps: ECC engine caps info structure 5899 * @oobavail: OOB size that the ECC engine can use 5900 * 5901 * Choose the ECC configuration according to following logic. 5902 * 5903 * 1. If both ECC step size and ECC strength are already set (usually by DT) 5904 * then check if it is supported by this controller. 5905 * 2. If the user provided the nand-ecc-maximize property, then select maximum 5906 * ECC strength. 5907 * 3. Otherwise, try to match the ECC step size and ECC strength closest 5908 * to the chip's requirement. If available OOB size can't fit the chip 5909 * requirement then fallback to the maximum ECC step size and ECC strength. 5910 * 5911 * On success, the chosen ECC settings are set. 5912 */ 5913 int nand_ecc_choose_conf(struct nand_chip *chip, 5914 const struct nand_ecc_caps *caps, int oobavail) 5915 { 5916 struct mtd_info *mtd = nand_to_mtd(chip); 5917 struct nand_device *nanddev = mtd_to_nanddev(mtd); 5918 5919 if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize)) 5920 return -EINVAL; 5921 5922 if (chip->ecc.size && chip->ecc.strength) 5923 return nand_check_ecc_caps(chip, caps, oobavail); 5924 5925 if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH) 5926 return nand_maximize_ecc(chip, caps, oobavail); 5927 5928 if (!nand_match_ecc_req(chip, caps, oobavail)) 5929 return 0; 5930 5931 return nand_maximize_ecc(chip, caps, oobavail); 5932 } 5933 EXPORT_SYMBOL_GPL(nand_ecc_choose_conf); 5934 5935 static int rawnand_erase(struct nand_device *nand, const struct nand_pos *pos) 5936 { 5937 struct nand_chip *chip = container_of(nand, struct nand_chip, 5938 base); 5939 unsigned int eb = nanddev_pos_to_row(nand, pos); 5940 int ret; 5941 5942 eb >>= nand->rowconv.eraseblock_addr_shift; 5943 5944 nand_select_target(chip, pos->target); 5945 ret = nand_erase_op(chip, eb); 5946 nand_deselect_target(chip); 5947 5948 return ret; 5949 } 5950 5951 static int rawnand_markbad(struct nand_device *nand, 5952 const struct nand_pos *pos) 5953 { 5954 struct nand_chip *chip = container_of(nand, struct nand_chip, 5955 base); 5956 5957 return nand_markbad_bbm(chip, nanddev_pos_to_offs(nand, pos)); 5958 } 5959 5960 static bool rawnand_isbad(struct nand_device *nand, const struct nand_pos *pos) 5961 { 5962 struct nand_chip *chip = container_of(nand, struct nand_chip, 5963 base); 5964 int ret; 5965 5966 nand_select_target(chip, pos->target); 5967 ret = nand_isbad_bbm(chip, nanddev_pos_to_offs(nand, pos)); 5968 nand_deselect_target(chip); 5969 5970 return ret; 5971 } 5972 5973 static const struct nand_ops rawnand_ops = { 5974 .erase = rawnand_erase, 5975 .markbad = rawnand_markbad, 5976 .isbad = rawnand_isbad, 5977 }; 5978 5979 /** 5980 * nand_scan_tail - Scan for the NAND device 5981 * @chip: NAND chip object 5982 * 5983 * This is the second phase of the normal nand_scan() function. It fills out 5984 * all the uninitialized function pointers with the defaults and scans for a 5985 * bad block table if appropriate. 5986 */ 5987 static int nand_scan_tail(struct nand_chip *chip) 5988 { 5989 struct mtd_info *mtd = nand_to_mtd(chip); 5990 struct nand_ecc_ctrl *ecc = &chip->ecc; 5991 int ret, i; 5992 5993 /* New bad blocks should be marked in OOB, flash-based BBT, or both */ 5994 if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) && 5995 !(chip->bbt_options & NAND_BBT_USE_FLASH))) { 5996 return -EINVAL; 5997 } 5998 5999 chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL); 6000 if (!chip->data_buf) 6001 return -ENOMEM; 6002 6003 /* 6004 * FIXME: some NAND manufacturer drivers expect the first die to be 6005 * selected when manufacturer->init() is called. They should be fixed 6006 * to explictly select the relevant die when interacting with the NAND 6007 * chip. 6008 */ 6009 nand_select_target(chip, 0); 6010 ret = nand_manufacturer_init(chip); 6011 nand_deselect_target(chip); 6012 if (ret) 6013 goto err_free_buf; 6014 6015 /* Set the internal oob buffer location, just after the page data */ 6016 chip->oob_poi = chip->data_buf + mtd->writesize; 6017 6018 /* 6019 * If no default placement scheme is given, select an appropriate one. 6020 */ 6021 if (!mtd->ooblayout && 6022 !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT && 6023 ecc->algo == NAND_ECC_ALGO_BCH) && 6024 !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT && 6025 ecc->algo == NAND_ECC_ALGO_HAMMING)) { 6026 switch (mtd->oobsize) { 6027 case 8: 6028 case 16: 6029 mtd_set_ooblayout(mtd, nand_get_small_page_ooblayout()); 6030 break; 6031 case 64: 6032 case 128: 6033 mtd_set_ooblayout(mtd, 6034 nand_get_large_page_hamming_ooblayout()); 6035 break; 6036 default: 6037 /* 6038 * Expose the whole OOB area to users if ECC_NONE 6039 * is passed. We could do that for all kind of 6040 * ->oobsize, but we must keep the old large/small 6041 * page with ECC layout when ->oobsize <= 128 for 6042 * compatibility reasons. 6043 */ 6044 if (ecc->engine_type == NAND_ECC_ENGINE_TYPE_NONE) { 6045 mtd_set_ooblayout(mtd, 6046 nand_get_large_page_ooblayout()); 6047 break; 6048 } 6049 6050 WARN(1, "No oob scheme defined for oobsize %d\n", 6051 mtd->oobsize); 6052 ret = -EINVAL; 6053 goto err_nand_manuf_cleanup; 6054 } 6055 } 6056 6057 /* 6058 * Check ECC mode, default to software if 3byte/512byte hardware ECC is 6059 * selected and we have 256 byte pagesize fallback to software ECC 6060 */ 6061 6062 switch (ecc->engine_type) { 6063 case NAND_ECC_ENGINE_TYPE_ON_HOST: 6064 ret = nand_set_ecc_on_host_ops(chip); 6065 if (ret) 6066 goto err_nand_manuf_cleanup; 6067 6068 if (mtd->writesize >= ecc->size) { 6069 if (!ecc->strength) { 6070 WARN(1, "Driver must set ecc.strength when using hardware ECC\n"); 6071 ret = -EINVAL; 6072 goto err_nand_manuf_cleanup; 6073 } 6074 break; 6075 } 6076 pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n", 6077 ecc->size, mtd->writesize); 6078 ecc->engine_type = NAND_ECC_ENGINE_TYPE_SOFT; 6079 ecc->algo = NAND_ECC_ALGO_HAMMING; 6080 fallthrough; 6081 6082 case NAND_ECC_ENGINE_TYPE_SOFT: 6083 ret = nand_set_ecc_soft_ops(chip); 6084 if (ret) 6085 goto err_nand_manuf_cleanup; 6086 break; 6087 6088 case NAND_ECC_ENGINE_TYPE_ON_DIE: 6089 if (!ecc->read_page || !ecc->write_page) { 6090 WARN(1, "No ECC functions supplied; on-die ECC not possible\n"); 6091 ret = -EINVAL; 6092 goto err_nand_manuf_cleanup; 6093 } 6094 if (!ecc->read_oob) 6095 ecc->read_oob = nand_read_oob_std; 6096 if (!ecc->write_oob) 6097 ecc->write_oob = nand_write_oob_std; 6098 break; 6099 6100 case NAND_ECC_ENGINE_TYPE_NONE: 6101 pr_warn("NAND_ECC_ENGINE_TYPE_NONE selected by board driver. This is not recommended!\n"); 6102 ecc->read_page = nand_read_page_raw; 6103 ecc->write_page = nand_write_page_raw; 6104 ecc->read_oob = nand_read_oob_std; 6105 ecc->read_page_raw = nand_read_page_raw; 6106 ecc->write_page_raw = nand_write_page_raw; 6107 ecc->write_oob = nand_write_oob_std; 6108 ecc->size = mtd->writesize; 6109 ecc->bytes = 0; 6110 ecc->strength = 0; 6111 break; 6112 6113 default: 6114 WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->engine_type); 6115 ret = -EINVAL; 6116 goto err_nand_manuf_cleanup; 6117 } 6118 6119 if (ecc->correct || ecc->calculate) { 6120 ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL); 6121 ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL); 6122 if (!ecc->calc_buf || !ecc->code_buf) { 6123 ret = -ENOMEM; 6124 goto err_nand_manuf_cleanup; 6125 } 6126 } 6127 6128 /* For many systems, the standard OOB write also works for raw */ 6129 if (!ecc->read_oob_raw) 6130 ecc->read_oob_raw = ecc->read_oob; 6131 if (!ecc->write_oob_raw) 6132 ecc->write_oob_raw = ecc->write_oob; 6133 6134 /* propagate ecc info to mtd_info */ 6135 mtd->ecc_strength = ecc->strength; 6136 mtd->ecc_step_size = ecc->size; 6137 6138 /* 6139 * Set the number of read / write steps for one page depending on ECC 6140 * mode. 6141 */ 6142 if (!ecc->steps) 6143 ecc->steps = mtd->writesize / ecc->size; 6144 if (ecc->steps * ecc->size != mtd->writesize) { 6145 WARN(1, "Invalid ECC parameters\n"); 6146 ret = -EINVAL; 6147 goto err_nand_manuf_cleanup; 6148 } 6149 6150 if (!ecc->total) { 6151 ecc->total = ecc->steps * ecc->bytes; 6152 chip->base.ecc.ctx.total = ecc->total; 6153 } 6154 6155 if (ecc->total > mtd->oobsize) { 6156 WARN(1, "Total number of ECC bytes exceeded oobsize\n"); 6157 ret = -EINVAL; 6158 goto err_nand_manuf_cleanup; 6159 } 6160 6161 /* 6162 * The number of bytes available for a client to place data into 6163 * the out of band area. 6164 */ 6165 ret = mtd_ooblayout_count_freebytes(mtd); 6166 if (ret < 0) 6167 ret = 0; 6168 6169 mtd->oobavail = ret; 6170 6171 /* ECC sanity check: warn if it's too weak */ 6172 if (!nand_ecc_is_strong_enough(&chip->base)) 6173 pr_warn("WARNING: %s: the ECC used on your system (%db/%dB) is too weak compared to the one required by the NAND chip (%db/%dB)\n", 6174 mtd->name, chip->ecc.strength, chip->ecc.size, 6175 nanddev_get_ecc_requirements(&chip->base)->strength, 6176 nanddev_get_ecc_requirements(&chip->base)->step_size); 6177 6178 /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */ 6179 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) { 6180 switch (ecc->steps) { 6181 case 2: 6182 mtd->subpage_sft = 1; 6183 break; 6184 case 4: 6185 case 8: 6186 case 16: 6187 mtd->subpage_sft = 2; 6188 break; 6189 } 6190 } 6191 chip->subpagesize = mtd->writesize >> mtd->subpage_sft; 6192 6193 /* Invalidate the pagebuffer reference */ 6194 chip->pagecache.page = -1; 6195 6196 /* Large page NAND with SOFT_ECC should support subpage reads */ 6197 switch (ecc->engine_type) { 6198 case NAND_ECC_ENGINE_TYPE_SOFT: 6199 if (chip->page_shift > 9) 6200 chip->options |= NAND_SUBPAGE_READ; 6201 break; 6202 6203 default: 6204 break; 6205 } 6206 6207 ret = nanddev_init(&chip->base, &rawnand_ops, mtd->owner); 6208 if (ret) 6209 goto err_nand_manuf_cleanup; 6210 6211 /* Adjust the MTD_CAP_ flags when NAND_ROM is set. */ 6212 if (chip->options & NAND_ROM) 6213 mtd->flags = MTD_CAP_ROM; 6214 6215 /* Fill in remaining MTD driver data */ 6216 mtd->_erase = nand_erase; 6217 mtd->_point = NULL; 6218 mtd->_unpoint = NULL; 6219 mtd->_panic_write = panic_nand_write; 6220 mtd->_read_oob = nand_read_oob; 6221 mtd->_write_oob = nand_write_oob; 6222 mtd->_sync = nand_sync; 6223 mtd->_lock = nand_lock; 6224 mtd->_unlock = nand_unlock; 6225 mtd->_suspend = nand_suspend; 6226 mtd->_resume = nand_resume; 6227 mtd->_reboot = nand_shutdown; 6228 mtd->_block_isreserved = nand_block_isreserved; 6229 mtd->_block_isbad = nand_block_isbad; 6230 mtd->_block_markbad = nand_block_markbad; 6231 mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks; 6232 6233 /* 6234 * Initialize bitflip_threshold to its default prior scan_bbt() call. 6235 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be 6236 * properly set. 6237 */ 6238 if (!mtd->bitflip_threshold) 6239 mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4); 6240 6241 /* Find the fastest data interface for this chip */ 6242 ret = nand_choose_interface_config(chip); 6243 if (ret) 6244 goto err_nanddev_cleanup; 6245 6246 /* Enter fastest possible mode on all dies. */ 6247 for (i = 0; i < nanddev_ntargets(&chip->base); i++) { 6248 ret = nand_setup_interface(chip, i); 6249 if (ret) 6250 goto err_free_interface_config; 6251 } 6252 6253 /* 6254 * Look for secure regions in the NAND chip. These regions are supposed 6255 * to be protected by a secure element like Trustzone. So the read/write 6256 * accesses to these regions will be blocked in the runtime by this 6257 * driver. 6258 */ 6259 ret = of_get_nand_secure_regions(chip); 6260 if (ret) 6261 goto err_free_interface_config; 6262 6263 /* Check, if we should skip the bad block table scan */ 6264 if (chip->options & NAND_SKIP_BBTSCAN) 6265 return 0; 6266 6267 /* Build bad block table */ 6268 ret = nand_create_bbt(chip); 6269 if (ret) 6270 goto err_free_secure_regions; 6271 6272 return 0; 6273 6274 err_free_secure_regions: 6275 kfree(chip->secure_regions); 6276 6277 err_free_interface_config: 6278 kfree(chip->best_interface_config); 6279 6280 err_nanddev_cleanup: 6281 nanddev_cleanup(&chip->base); 6282 6283 err_nand_manuf_cleanup: 6284 nand_manufacturer_cleanup(chip); 6285 6286 err_free_buf: 6287 kfree(chip->data_buf); 6288 kfree(ecc->code_buf); 6289 kfree(ecc->calc_buf); 6290 6291 return ret; 6292 } 6293 6294 static int nand_attach(struct nand_chip *chip) 6295 { 6296 if (chip->controller->ops && chip->controller->ops->attach_chip) 6297 return chip->controller->ops->attach_chip(chip); 6298 6299 return 0; 6300 } 6301 6302 static void nand_detach(struct nand_chip *chip) 6303 { 6304 if (chip->controller->ops && chip->controller->ops->detach_chip) 6305 chip->controller->ops->detach_chip(chip); 6306 } 6307 6308 /** 6309 * nand_scan_with_ids - [NAND Interface] Scan for the NAND device 6310 * @chip: NAND chip object 6311 * @maxchips: number of chips to scan for. 6312 * @ids: optional flash IDs table 6313 * 6314 * This fills out all the uninitialized function pointers with the defaults. 6315 * The flash ID is read and the mtd/chip structures are filled with the 6316 * appropriate values. 6317 */ 6318 int nand_scan_with_ids(struct nand_chip *chip, unsigned int maxchips, 6319 struct nand_flash_dev *ids) 6320 { 6321 int ret; 6322 6323 if (!maxchips) 6324 return -EINVAL; 6325 6326 ret = nand_scan_ident(chip, maxchips, ids); 6327 if (ret) 6328 return ret; 6329 6330 ret = nand_attach(chip); 6331 if (ret) 6332 goto cleanup_ident; 6333 6334 ret = nand_scan_tail(chip); 6335 if (ret) 6336 goto detach_chip; 6337 6338 return 0; 6339 6340 detach_chip: 6341 nand_detach(chip); 6342 cleanup_ident: 6343 nand_scan_ident_cleanup(chip); 6344 6345 return ret; 6346 } 6347 EXPORT_SYMBOL(nand_scan_with_ids); 6348 6349 /** 6350 * nand_cleanup - [NAND Interface] Free resources held by the NAND device 6351 * @chip: NAND chip object 6352 */ 6353 void nand_cleanup(struct nand_chip *chip) 6354 { 6355 if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT) { 6356 if (chip->ecc.algo == NAND_ECC_ALGO_HAMMING) 6357 rawnand_sw_hamming_cleanup(chip); 6358 else if (chip->ecc.algo == NAND_ECC_ALGO_BCH) 6359 rawnand_sw_bch_cleanup(chip); 6360 } 6361 6362 nanddev_cleanup(&chip->base); 6363 6364 /* Free secure regions data */ 6365 kfree(chip->secure_regions); 6366 6367 /* Free bad block table memory */ 6368 kfree(chip->bbt); 6369 kfree(chip->data_buf); 6370 kfree(chip->ecc.code_buf); 6371 kfree(chip->ecc.calc_buf); 6372 6373 /* Free bad block descriptor memory */ 6374 if (chip->badblock_pattern && chip->badblock_pattern->options 6375 & NAND_BBT_DYNAMICSTRUCT) 6376 kfree(chip->badblock_pattern); 6377 6378 /* Free the data interface */ 6379 kfree(chip->best_interface_config); 6380 6381 /* Free manufacturer priv data. */ 6382 nand_manufacturer_cleanup(chip); 6383 6384 /* Free controller specific allocations after chip identification */ 6385 nand_detach(chip); 6386 6387 /* Free identification phase allocations */ 6388 nand_scan_ident_cleanup(chip); 6389 } 6390 6391 EXPORT_SYMBOL_GPL(nand_cleanup); 6392 6393 MODULE_LICENSE("GPL"); 6394 MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>"); 6395 MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>"); 6396 MODULE_DESCRIPTION("Generic NAND flash driver code"); 6397