1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Overview: 4 * This is the generic MTD driver for NAND flash devices. It should be 5 * capable of working with almost all NAND chips currently available. 6 * 7 * Additional technical information is available on 8 * http://www.linux-mtd.infradead.org/doc/nand.html 9 * 10 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com) 11 * 2002-2006 Thomas Gleixner (tglx@kernel.org) 12 * 13 * Credits: 14 * David Woodhouse for adding multichip support 15 * 16 * Aleph One Ltd. and Toby Churchill Ltd. for supporting the 17 * rework for 2K page size chips 18 * 19 * TODO: 20 * Enable cached programming for 2k page size chips 21 * Check, if mtd->ecctype should be set to MTD_ECC_HW 22 * if we have HW ECC support. 23 * BBT table is not serialized, has to be fixed 24 */ 25 26 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 27 28 #include <linux/module.h> 29 #include <linux/delay.h> 30 #include <linux/errno.h> 31 #include <linux/err.h> 32 #include <linux/sched.h> 33 #include <linux/slab.h> 34 #include <linux/mm.h> 35 #include <linux/types.h> 36 #include <linux/mtd/mtd.h> 37 #include <linux/mtd/nand.h> 38 #include <linux/mtd/nand-ecc-sw-hamming.h> 39 #include <linux/mtd/nand-ecc-sw-bch.h> 40 #include <linux/interrupt.h> 41 #include <linux/bitops.h> 42 #include <linux/io.h> 43 #include <linux/mtd/partitions.h> 44 #include <linux/of.h> 45 #include <linux/gpio/consumer.h> 46 #include <linux/cleanup.h> 47 48 #include "internals.h" 49 50 static int nand_pairing_dist3_get_info(struct mtd_info *mtd, int page, 51 struct mtd_pairing_info *info) 52 { 53 int lastpage = (mtd->erasesize / mtd->writesize) - 1; 54 int dist = 3; 55 56 if (page == lastpage) 57 dist = 2; 58 59 if (!page || (page & 1)) { 60 info->group = 0; 61 info->pair = (page + 1) / 2; 62 } else { 63 info->group = 1; 64 info->pair = (page + 1 - dist) / 2; 65 } 66 67 return 0; 68 } 69 70 static int nand_pairing_dist3_get_wunit(struct mtd_info *mtd, 71 const struct mtd_pairing_info *info) 72 { 73 int lastpair = ((mtd->erasesize / mtd->writesize) - 1) / 2; 74 int page = info->pair * 2; 75 int dist = 3; 76 77 if (!info->group && !info->pair) 78 return 0; 79 80 if (info->pair == lastpair && info->group) 81 dist = 2; 82 83 if (!info->group) 84 page--; 85 else if (info->pair) 86 page += dist - 1; 87 88 if (page >= mtd->erasesize / mtd->writesize) 89 return -EINVAL; 90 91 return page; 92 } 93 94 const struct mtd_pairing_scheme dist3_pairing_scheme = { 95 .ngroups = 2, 96 .get_info = nand_pairing_dist3_get_info, 97 .get_wunit = nand_pairing_dist3_get_wunit, 98 }; 99 100 static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len) 101 { 102 int ret = 0; 103 104 /* Start address must align on block boundary */ 105 if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) { 106 pr_debug("%s: unaligned address\n", __func__); 107 ret = -EINVAL; 108 } 109 110 /* Length must align on block boundary */ 111 if (len & ((1ULL << chip->phys_erase_shift) - 1)) { 112 pr_debug("%s: length not block aligned\n", __func__); 113 ret = -EINVAL; 114 } 115 116 return ret; 117 } 118 119 /** 120 * nand_extract_bits - Copy unaligned bits from one buffer to another one 121 * @dst: destination buffer 122 * @dst_off: bit offset at which the writing starts 123 * @src: source buffer 124 * @src_off: bit offset at which the reading starts 125 * @nbits: number of bits to copy from @src to @dst 126 * 127 * Copy bits from one memory region to another (overlap authorized). 128 */ 129 void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src, 130 unsigned int src_off, unsigned int nbits) 131 { 132 unsigned int tmp, n; 133 134 dst += dst_off / 8; 135 dst_off %= 8; 136 src += src_off / 8; 137 src_off %= 8; 138 139 while (nbits) { 140 n = min3(8 - dst_off, 8 - src_off, nbits); 141 142 tmp = (*src >> src_off) & GENMASK(n - 1, 0); 143 *dst &= ~GENMASK(n - 1 + dst_off, dst_off); 144 *dst |= tmp << dst_off; 145 146 dst_off += n; 147 if (dst_off >= 8) { 148 dst++; 149 dst_off -= 8; 150 } 151 152 src_off += n; 153 if (src_off >= 8) { 154 src++; 155 src_off -= 8; 156 } 157 158 nbits -= n; 159 } 160 } 161 EXPORT_SYMBOL_GPL(nand_extract_bits); 162 163 /** 164 * nand_select_target() - Select a NAND target (A.K.A. die) 165 * @chip: NAND chip object 166 * @cs: the CS line to select. Note that this CS id is always from the chip 167 * PoV, not the controller one 168 * 169 * Select a NAND target so that further operations executed on @chip go to the 170 * selected NAND target. 171 */ 172 void nand_select_target(struct nand_chip *chip, unsigned int cs) 173 { 174 /* 175 * cs should always lie between 0 and nanddev_ntargets(), when that's 176 * not the case it's a bug and the caller should be fixed. 177 */ 178 if (WARN_ON(cs > nanddev_ntargets(&chip->base))) 179 return; 180 181 chip->cur_cs = cs; 182 183 if (chip->legacy.select_chip) 184 chip->legacy.select_chip(chip, cs); 185 } 186 EXPORT_SYMBOL_GPL(nand_select_target); 187 188 /** 189 * nand_deselect_target() - Deselect the currently selected target 190 * @chip: NAND chip object 191 * 192 * Deselect the currently selected NAND target. The result of operations 193 * executed on @chip after the target has been deselected is undefined. 194 */ 195 void nand_deselect_target(struct nand_chip *chip) 196 { 197 if (chip->legacy.select_chip) 198 chip->legacy.select_chip(chip, -1); 199 200 chip->cur_cs = -1; 201 } 202 EXPORT_SYMBOL_GPL(nand_deselect_target); 203 204 /** 205 * nand_release_device - [GENERIC] release chip 206 * @chip: NAND chip object 207 * 208 * Release chip lock and wake up anyone waiting on the device. 209 */ 210 static void nand_release_device(struct nand_chip *chip) 211 { 212 /* Release the controller and the chip */ 213 mutex_unlock(&chip->controller->lock); 214 mutex_unlock(&chip->lock); 215 } 216 217 /** 218 * nand_bbm_get_next_page - Get the next page for bad block markers 219 * @chip: NAND chip object 220 * @page: First page to start checking for bad block marker usage 221 * 222 * Returns an integer that corresponds to the page offset within a block, for 223 * a page that is used to store bad block markers. If no more pages are 224 * available, -EINVAL is returned. 225 */ 226 int nand_bbm_get_next_page(struct nand_chip *chip, int page) 227 { 228 struct mtd_info *mtd = nand_to_mtd(chip); 229 int last_page = ((mtd->erasesize - mtd->writesize) >> 230 chip->page_shift) & chip->pagemask; 231 unsigned int bbm_flags = NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE 232 | NAND_BBM_LASTPAGE; 233 234 if (page == 0 && !(chip->options & bbm_flags)) 235 return 0; 236 if (page == 0 && chip->options & NAND_BBM_FIRSTPAGE) 237 return 0; 238 if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE) 239 return 1; 240 if (page <= last_page && chip->options & NAND_BBM_LASTPAGE) 241 return last_page; 242 243 return -EINVAL; 244 } 245 246 /** 247 * nand_block_bad - [DEFAULT] Read bad block marker from the chip 248 * @chip: NAND chip object 249 * @ofs: offset from device start 250 * 251 * Check, if the block is bad. 252 */ 253 static int nand_block_bad(struct nand_chip *chip, loff_t ofs) 254 { 255 int first_page, page_offset; 256 int res; 257 u8 bad; 258 259 first_page = (int)(ofs >> chip->page_shift) & chip->pagemask; 260 page_offset = nand_bbm_get_next_page(chip, 0); 261 262 while (page_offset >= 0) { 263 res = chip->ecc.read_oob(chip, first_page + page_offset); 264 if (res < 0) 265 return res; 266 267 bad = chip->oob_poi[chip->badblockpos]; 268 269 if (likely(chip->badblockbits == 8)) 270 res = bad != 0xFF; 271 else 272 res = hweight8(bad) < chip->badblockbits; 273 if (res) 274 return res; 275 276 page_offset = nand_bbm_get_next_page(chip, page_offset + 1); 277 } 278 279 return 0; 280 } 281 282 /** 283 * nand_region_is_secured() - Check if the region is secured 284 * @chip: NAND chip object 285 * @offset: Offset of the region to check 286 * @size: Size of the region to check 287 * 288 * Checks if the region is secured by comparing the offset and size with the 289 * list of secure regions obtained from DT. Returns true if the region is 290 * secured else false. 291 */ 292 static bool nand_region_is_secured(struct nand_chip *chip, loff_t offset, u64 size) 293 { 294 int i; 295 296 /* Skip touching the secure regions if present */ 297 for (i = 0; i < chip->nr_secure_regions; i++) { 298 const struct nand_secure_region *region = &chip->secure_regions[i]; 299 300 if (offset + size <= region->offset || 301 offset >= region->offset + region->size) 302 continue; 303 304 pr_debug("%s: Region 0x%llx - 0x%llx is secured!", 305 __func__, offset, offset + size); 306 307 return true; 308 } 309 310 return false; 311 } 312 313 static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs) 314 { 315 struct mtd_info *mtd = nand_to_mtd(chip); 316 317 if (chip->options & NAND_NO_BBM_QUIRK) 318 return 0; 319 320 /* Check if the region is secured */ 321 if (nand_region_is_secured(chip, ofs, mtd->erasesize)) 322 return -EIO; 323 324 if (mtd_check_expert_analysis_mode()) 325 return 0; 326 327 if (chip->legacy.block_bad) 328 return chip->legacy.block_bad(chip, ofs); 329 330 return nand_block_bad(chip, ofs); 331 } 332 333 /** 334 * nand_get_device - [GENERIC] Get chip for selected access 335 * @chip: NAND chip structure 336 * 337 * Lock the device and its controller for exclusive access 338 */ 339 static void nand_get_device(struct nand_chip *chip) 340 { 341 /* Wait until the device is resumed. */ 342 while (1) { 343 mutex_lock(&chip->lock); 344 if (!chip->suspended) { 345 mutex_lock(&chip->controller->lock); 346 return; 347 } 348 mutex_unlock(&chip->lock); 349 350 wait_event(chip->resume_wq, !chip->suspended); 351 } 352 } 353 354 /** 355 * nand_check_wp - [GENERIC] check if the chip is write protected 356 * @chip: NAND chip object 357 * 358 * Check, if the device is write protected. The function expects, that the 359 * device is already selected. 360 */ 361 static int nand_check_wp(struct nand_chip *chip) 362 { 363 u8 status; 364 int ret; 365 366 /* Broken xD cards report WP despite being writable */ 367 if (chip->options & NAND_BROKEN_XD) 368 return 0; 369 370 /* controller responsible for NAND write protect */ 371 if (chip->controller->controller_wp) 372 return 0; 373 374 /* Check the WP bit */ 375 ret = nand_status_op(chip, &status); 376 if (ret) 377 return ret; 378 379 return status & NAND_STATUS_WP ? 0 : 1; 380 } 381 382 /** 383 * nand_fill_oob - [INTERN] Transfer client buffer to oob 384 * @chip: NAND chip object 385 * @oob: oob data buffer 386 * @len: oob data write length 387 * @ops: oob ops structure 388 */ 389 static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len, 390 struct mtd_oob_ops *ops) 391 { 392 struct mtd_info *mtd = nand_to_mtd(chip); 393 int ret; 394 395 /* 396 * Initialise to all 0xFF, to avoid the possibility of left over OOB 397 * data from a previous OOB read. 398 */ 399 memset(chip->oob_poi, 0xff, mtd->oobsize); 400 401 switch (ops->mode) { 402 403 case MTD_OPS_PLACE_OOB: 404 case MTD_OPS_RAW: 405 memcpy(chip->oob_poi + ops->ooboffs, oob, len); 406 return oob + len; 407 408 case MTD_OPS_AUTO_OOB: 409 ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi, 410 ops->ooboffs, len); 411 BUG_ON(ret); 412 return oob + len; 413 414 default: 415 BUG(); 416 } 417 return NULL; 418 } 419 420 /** 421 * nand_do_write_oob - [MTD Interface] NAND write out-of-band 422 * @chip: NAND chip object 423 * @to: offset to write to 424 * @ops: oob operation description structure 425 * 426 * NAND write out-of-band. 427 */ 428 static int nand_do_write_oob(struct nand_chip *chip, loff_t to, 429 struct mtd_oob_ops *ops) 430 { 431 struct mtd_info *mtd = nand_to_mtd(chip); 432 int chipnr, page, status, len, ret; 433 434 pr_debug("%s: to = 0x%08x, len = %i\n", 435 __func__, (unsigned int)to, (int)ops->ooblen); 436 437 len = mtd_oobavail(mtd, ops); 438 439 /* Do not allow write past end of page */ 440 if ((ops->ooboffs + ops->ooblen) > len) { 441 pr_debug("%s: attempt to write past end of page\n", 442 __func__); 443 return -EINVAL; 444 } 445 446 /* Check if the region is secured */ 447 if (nand_region_is_secured(chip, to, ops->ooblen)) 448 return -EIO; 449 450 chipnr = (int)(to >> chip->chip_shift); 451 452 /* 453 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one 454 * of my DiskOnChip 2000 test units) will clear the whole data page too 455 * if we don't do this. I have no clue why, but I seem to have 'fixed' 456 * it in the doc2000 driver in August 1999. dwmw2. 457 */ 458 ret = nand_reset(chip, chipnr); 459 if (ret) 460 return ret; 461 462 nand_select_target(chip, chipnr); 463 464 /* Shift to get page */ 465 page = (int)(to >> chip->page_shift); 466 467 /* Check, if it is write protected */ 468 if (nand_check_wp(chip)) { 469 nand_deselect_target(chip); 470 return -EROFS; 471 } 472 473 /* Invalidate the page cache, if we write to the cached page */ 474 if (page == chip->pagecache.page) 475 chip->pagecache.page = -1; 476 477 nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops); 478 479 if (ops->mode == MTD_OPS_RAW) 480 status = chip->ecc.write_oob_raw(chip, page & chip->pagemask); 481 else 482 status = chip->ecc.write_oob(chip, page & chip->pagemask); 483 484 nand_deselect_target(chip); 485 486 if (status) 487 return status; 488 489 ops->oobretlen = ops->ooblen; 490 491 return 0; 492 } 493 494 /** 495 * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker 496 * @chip: NAND chip object 497 * @ofs: offset from device start 498 * 499 * This is the default implementation, which can be overridden by a hardware 500 * specific driver. It provides the details for writing a bad block marker to a 501 * block. 502 */ 503 static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs) 504 { 505 struct mtd_info *mtd = nand_to_mtd(chip); 506 struct mtd_oob_ops ops; 507 uint8_t buf[2] = { 0, 0 }; 508 int ret = 0, res, page_offset; 509 510 memset(&ops, 0, sizeof(ops)); 511 ops.oobbuf = buf; 512 ops.ooboffs = chip->badblockpos; 513 if (chip->options & NAND_BUSWIDTH_16) { 514 ops.ooboffs &= ~0x01; 515 ops.len = ops.ooblen = 2; 516 } else { 517 ops.len = ops.ooblen = 1; 518 } 519 ops.mode = MTD_OPS_PLACE_OOB; 520 521 page_offset = nand_bbm_get_next_page(chip, 0); 522 523 while (page_offset >= 0) { 524 res = nand_do_write_oob(chip, 525 ofs + (page_offset * mtd->writesize), 526 &ops); 527 528 if (!ret) 529 ret = res; 530 531 page_offset = nand_bbm_get_next_page(chip, page_offset + 1); 532 } 533 534 return ret; 535 } 536 537 /** 538 * nand_markbad_bbm - mark a block by updating the BBM 539 * @chip: NAND chip object 540 * @ofs: offset of the block to mark bad 541 */ 542 int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs) 543 { 544 if (chip->legacy.block_markbad) 545 return chip->legacy.block_markbad(chip, ofs); 546 547 return nand_default_block_markbad(chip, ofs); 548 } 549 550 /** 551 * nand_block_markbad_lowlevel - mark a block bad 552 * @chip: NAND chip object 553 * @ofs: offset from device start 554 * 555 * This function performs the generic NAND bad block marking steps (i.e., bad 556 * block table(s) and/or marker(s)). We only allow the hardware driver to 557 * specify how to write bad block markers to OOB (chip->legacy.block_markbad). 558 * 559 * We try operations in the following order: 560 * 561 * (1) erase the affected block, to allow OOB marker to be written cleanly 562 * (2) write bad block marker to OOB area of affected block (unless flag 563 * NAND_BBT_NO_OOB_BBM is present) 564 * (3) update the BBT 565 * 566 * Note that we retain the first error encountered in (2) or (3), finish the 567 * procedures, and dump the error in the end. 568 */ 569 static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs) 570 { 571 struct mtd_info *mtd = nand_to_mtd(chip); 572 int res, ret = 0; 573 574 if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) { 575 struct erase_info einfo; 576 577 /* Attempt erase before marking OOB */ 578 memset(&einfo, 0, sizeof(einfo)); 579 einfo.addr = ofs; 580 einfo.len = 1ULL << chip->phys_erase_shift; 581 nand_erase_nand(chip, &einfo, 0); 582 583 /* Write bad block marker to OOB */ 584 nand_get_device(chip); 585 586 ret = nand_markbad_bbm(chip, ofs); 587 nand_release_device(chip); 588 } 589 590 /* Mark block bad in BBT */ 591 if (chip->bbt) { 592 res = nand_markbad_bbt(chip, ofs); 593 if (!ret) 594 ret = res; 595 } 596 597 if (!ret) 598 mtd->ecc_stats.badblocks++; 599 600 return ret; 601 } 602 603 /** 604 * nand_block_isreserved - [GENERIC] Check if a block is marked reserved. 605 * @mtd: MTD device structure 606 * @ofs: offset from device start 607 * 608 * Check if the block is marked as reserved. 609 */ 610 static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs) 611 { 612 struct nand_chip *chip = mtd_to_nand(mtd); 613 614 if (!chip->bbt) 615 return 0; 616 /* Return info from the table */ 617 return nand_isreserved_bbt(chip, ofs); 618 } 619 620 /** 621 * nand_block_checkbad - [GENERIC] Check if a block is marked bad 622 * @chip: NAND chip object 623 * @ofs: offset from device start 624 * @allowbbt: 1, if its allowed to access the bbt area 625 * 626 * Check, if the block is bad. Either by reading the bad block table or 627 * calling of the scan function. 628 */ 629 static int nand_block_checkbad(struct nand_chip *chip, loff_t ofs, int allowbbt) 630 { 631 /* Return info from the table */ 632 if (chip->bbt) 633 return nand_isbad_bbt(chip, ofs, allowbbt); 634 635 return nand_isbad_bbm(chip, ofs); 636 } 637 638 /** 639 * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1 640 * @chip: NAND chip structure 641 * @timeout_ms: Timeout in ms 642 * 643 * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1. 644 * If that does not happen whitin the specified timeout, -ETIMEDOUT is 645 * returned. 646 * 647 * This helper is intended to be used when the controller does not have access 648 * to the NAND R/B pin. 649 * 650 * Be aware that calling this helper from an ->exec_op() implementation means 651 * ->exec_op() must be re-entrant. 652 * 653 * Return 0 if the NAND chip is ready, a negative error otherwise. 654 */ 655 int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms) 656 { 657 const struct nand_interface_config *conf; 658 u8 status = 0; 659 int ret; 660 661 if (!nand_has_exec_op(chip)) 662 return -ENOTSUPP; 663 664 /* Wait tWB before polling the STATUS reg. */ 665 conf = nand_get_interface_config(chip); 666 ndelay(NAND_COMMON_TIMING_NS(conf, tWB_max)); 667 668 ret = nand_status_op(chip, NULL); 669 if (ret) 670 return ret; 671 672 /* 673 * +1 below is necessary because if we are now in the last fraction 674 * of jiffy and msecs_to_jiffies is 1 then we will wait only that 675 * small jiffy fraction - possibly leading to false timeout 676 */ 677 timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1; 678 do { 679 ret = nand_read_data_op(chip, &status, sizeof(status), true, 680 false); 681 if (ret) 682 break; 683 684 if (status & NAND_STATUS_READY) 685 break; 686 687 /* 688 * Typical lowest execution time for a tR on most NANDs is 10us, 689 * use this as polling delay before doing something smarter (ie. 690 * deriving a delay from the timeout value, timeout_ms/ratio). 691 */ 692 udelay(10); 693 } while (time_before(jiffies, timeout_ms)); 694 695 /* 696 * We have to exit READ_STATUS mode in order to read real data on the 697 * bus in case the WAITRDY instruction is preceding a DATA_IN 698 * instruction. 699 */ 700 nand_exit_status_op(chip); 701 702 if (ret) 703 return ret; 704 705 return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT; 706 }; 707 EXPORT_SYMBOL_GPL(nand_soft_waitrdy); 708 709 /** 710 * nand_gpio_waitrdy - Poll R/B GPIO pin until ready 711 * @chip: NAND chip structure 712 * @gpiod: GPIO descriptor of R/B pin 713 * @timeout_ms: Timeout in ms 714 * 715 * Poll the R/B GPIO pin until it becomes ready. If that does not happen 716 * whitin the specified timeout, -ETIMEDOUT is returned. 717 * 718 * This helper is intended to be used when the controller has access to the 719 * NAND R/B pin over GPIO. 720 * 721 * Return 0 if the R/B pin indicates chip is ready, a negative error otherwise. 722 */ 723 int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod, 724 unsigned long timeout_ms) 725 { 726 727 /* 728 * Wait until R/B pin indicates chip is ready or timeout occurs. 729 * +1 below is necessary because if we are now in the last fraction 730 * of jiffy and msecs_to_jiffies is 1 then we will wait only that 731 * small jiffy fraction - possibly leading to false timeout. 732 */ 733 timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1; 734 do { 735 if (gpiod_get_value_cansleep(gpiod)) 736 return 0; 737 738 cond_resched(); 739 } while (time_before(jiffies, timeout_ms)); 740 741 return gpiod_get_value_cansleep(gpiod) ? 0 : -ETIMEDOUT; 742 }; 743 EXPORT_SYMBOL_GPL(nand_gpio_waitrdy); 744 745 /** 746 * panic_nand_wait - [GENERIC] wait until the command is done 747 * @chip: NAND chip structure 748 * @timeo: timeout 749 * 750 * Wait for command done. This is a helper function for nand_wait used when 751 * we are in interrupt context. May happen when in panic and trying to write 752 * an oops through mtdoops. 753 */ 754 void panic_nand_wait(struct nand_chip *chip, unsigned long timeo) 755 { 756 int i; 757 for (i = 0; i < timeo; i++) { 758 if (chip->legacy.dev_ready) { 759 if (chip->legacy.dev_ready(chip)) 760 break; 761 } else { 762 int ret; 763 u8 status; 764 765 ret = nand_read_data_op(chip, &status, sizeof(status), 766 true, false); 767 if (ret) 768 return; 769 770 if (status & NAND_STATUS_READY) 771 break; 772 } 773 mdelay(1); 774 } 775 } 776 777 static bool nand_supports_get_features(struct nand_chip *chip, int addr) 778 { 779 return (chip->parameters.supports_set_get_features && 780 test_bit(addr, chip->parameters.get_feature_list)); 781 } 782 783 static bool nand_supports_set_features(struct nand_chip *chip, int addr) 784 { 785 return (chip->parameters.supports_set_get_features && 786 test_bit(addr, chip->parameters.set_feature_list)); 787 } 788 789 /** 790 * nand_reset_interface - Reset data interface and timings 791 * @chip: The NAND chip 792 * @chipnr: Internal die id 793 * 794 * Reset the Data interface and timings to ONFI mode 0. 795 * 796 * Returns 0 for success or negative error code otherwise. 797 */ 798 static int nand_reset_interface(struct nand_chip *chip, int chipnr) 799 { 800 const struct nand_controller_ops *ops = chip->controller->ops; 801 int ret; 802 803 if (!nand_controller_can_setup_interface(chip)) 804 return 0; 805 806 /* 807 * The ONFI specification says: 808 * " 809 * To transition from NV-DDR or NV-DDR2 to the SDR data 810 * interface, the host shall use the Reset (FFh) command 811 * using SDR timing mode 0. A device in any timing mode is 812 * required to recognize Reset (FFh) command issued in SDR 813 * timing mode 0. 814 * " 815 * 816 * Configure the data interface in SDR mode and set the 817 * timings to timing mode 0. 818 */ 819 820 chip->current_interface_config = nand_get_reset_interface_config(); 821 ret = ops->setup_interface(chip, chipnr, 822 chip->current_interface_config); 823 if (ret) 824 pr_err("Failed to configure data interface to SDR timing mode 0\n"); 825 826 return ret; 827 } 828 829 /** 830 * nand_setup_interface - Setup the best data interface and timings 831 * @chip: The NAND chip 832 * @chipnr: Internal die id 833 * 834 * Configure what has been reported to be the best data interface and NAND 835 * timings supported by the chip and the driver. 836 * 837 * Returns 0 for success or negative error code otherwise. 838 */ 839 static int nand_setup_interface(struct nand_chip *chip, int chipnr) 840 { 841 const struct nand_controller_ops *ops = chip->controller->ops; 842 u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { }, request; 843 int ret; 844 845 if (!nand_controller_can_setup_interface(chip)) 846 return 0; 847 848 /* 849 * A nand_reset_interface() put both the NAND chip and the NAND 850 * controller in timings mode 0. If the default mode for this chip is 851 * also 0, no need to proceed to the change again. Plus, at probe time, 852 * nand_setup_interface() uses ->set/get_features() which would 853 * fail anyway as the parameter page is not available yet. 854 */ 855 if (!chip->best_interface_config) 856 return 0; 857 858 request = chip->best_interface_config->timings.mode; 859 if (nand_interface_is_sdr(chip->best_interface_config)) 860 request |= ONFI_DATA_INTERFACE_SDR; 861 else 862 request |= ONFI_DATA_INTERFACE_NVDDR; 863 tmode_param[0] = request; 864 865 /* Change the mode on the chip side (if supported by the NAND chip) */ 866 if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) { 867 nand_select_target(chip, chipnr); 868 ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE, 869 tmode_param); 870 nand_deselect_target(chip); 871 if (ret) 872 return ret; 873 } 874 875 /* Change the mode on the controller side */ 876 ret = ops->setup_interface(chip, chipnr, chip->best_interface_config); 877 if (ret) 878 return ret; 879 880 /* Check the mode has been accepted by the chip, if supported */ 881 if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) 882 goto update_interface_config; 883 884 memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN); 885 nand_select_target(chip, chipnr); 886 ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE, 887 tmode_param); 888 nand_deselect_target(chip); 889 if (ret) 890 goto err_reset_chip; 891 892 if (request != tmode_param[0]) { 893 pr_warn("%s timing mode %d not acknowledged by the NAND chip\n", 894 nand_interface_is_nvddr(chip->best_interface_config) ? "NV-DDR" : "SDR", 895 chip->best_interface_config->timings.mode); 896 pr_debug("NAND chip would work in %s timing mode %d\n", 897 tmode_param[0] & ONFI_DATA_INTERFACE_NVDDR ? "NV-DDR" : "SDR", 898 (unsigned int)ONFI_TIMING_MODE_PARAM(tmode_param[0])); 899 goto err_reset_chip; 900 } 901 902 update_interface_config: 903 chip->current_interface_config = chip->best_interface_config; 904 905 return 0; 906 907 err_reset_chip: 908 /* 909 * Fallback to mode 0 if the chip explicitly did not ack the chosen 910 * timing mode. 911 */ 912 nand_reset_interface(chip, chipnr); 913 nand_select_target(chip, chipnr); 914 nand_reset_op(chip); 915 nand_deselect_target(chip); 916 917 return ret; 918 } 919 920 /** 921 * nand_choose_best_sdr_timings - Pick up the best SDR timings that both the 922 * NAND controller and the NAND chip support 923 * @chip: the NAND chip 924 * @iface: the interface configuration (can eventually be updated) 925 * @spec_timings: specific timings, when not fitting the ONFI specification 926 * 927 * If specific timings are provided, use them. Otherwise, retrieve supported 928 * timing modes from ONFI information. 929 */ 930 int nand_choose_best_sdr_timings(struct nand_chip *chip, 931 struct nand_interface_config *iface, 932 struct nand_sdr_timings *spec_timings) 933 { 934 const struct nand_controller_ops *ops = chip->controller->ops; 935 int best_mode = 0, mode, ret = -EOPNOTSUPP; 936 937 iface->type = NAND_SDR_IFACE; 938 939 if (spec_timings) { 940 iface->timings.sdr = *spec_timings; 941 iface->timings.mode = onfi_find_closest_sdr_mode(spec_timings); 942 943 /* Verify the controller supports the requested interface */ 944 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY, 945 iface); 946 if (!ret) { 947 chip->best_interface_config = iface; 948 return ret; 949 } 950 951 /* Fallback to slower modes */ 952 best_mode = iface->timings.mode; 953 } else if (chip->parameters.onfi) { 954 best_mode = fls(chip->parameters.onfi->sdr_timing_modes) - 1; 955 } 956 957 for (mode = best_mode; mode >= 0; mode--) { 958 onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, mode); 959 960 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY, 961 iface); 962 if (!ret) { 963 chip->best_interface_config = iface; 964 break; 965 } 966 } 967 968 return ret; 969 } 970 971 /** 972 * nand_choose_best_nvddr_timings - Pick up the best NVDDR timings that both the 973 * NAND controller and the NAND chip support 974 * @chip: the NAND chip 975 * @iface: the interface configuration (can eventually be updated) 976 * @spec_timings: specific timings, when not fitting the ONFI specification 977 * 978 * If specific timings are provided, use them. Otherwise, retrieve supported 979 * timing modes from ONFI information. 980 */ 981 int nand_choose_best_nvddr_timings(struct nand_chip *chip, 982 struct nand_interface_config *iface, 983 struct nand_nvddr_timings *spec_timings) 984 { 985 const struct nand_controller_ops *ops = chip->controller->ops; 986 int best_mode = 0, mode, ret = -EOPNOTSUPP; 987 988 iface->type = NAND_NVDDR_IFACE; 989 990 if (spec_timings) { 991 iface->timings.nvddr = *spec_timings; 992 iface->timings.mode = onfi_find_closest_nvddr_mode(spec_timings); 993 994 /* Verify the controller supports the requested interface */ 995 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY, 996 iface); 997 if (!ret) { 998 chip->best_interface_config = iface; 999 return ret; 1000 } 1001 1002 /* Fallback to slower modes */ 1003 best_mode = iface->timings.mode; 1004 } else if (chip->parameters.onfi) { 1005 best_mode = fls(chip->parameters.onfi->nvddr_timing_modes) - 1; 1006 } 1007 1008 for (mode = best_mode; mode >= 0; mode--) { 1009 onfi_fill_interface_config(chip, iface, NAND_NVDDR_IFACE, mode); 1010 1011 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY, 1012 iface); 1013 if (!ret) { 1014 chip->best_interface_config = iface; 1015 break; 1016 } 1017 } 1018 1019 return ret; 1020 } 1021 1022 /** 1023 * nand_choose_best_timings - Pick up the best NVDDR or SDR timings that both 1024 * NAND controller and the NAND chip support 1025 * @chip: the NAND chip 1026 * @iface: the interface configuration (can eventually be updated) 1027 * 1028 * If specific timings are provided, use them. Otherwise, retrieve supported 1029 * timing modes from ONFI information. 1030 */ 1031 static int nand_choose_best_timings(struct nand_chip *chip, 1032 struct nand_interface_config *iface) 1033 { 1034 int ret; 1035 1036 /* Try the fastest timings: NV-DDR */ 1037 ret = nand_choose_best_nvddr_timings(chip, iface, NULL); 1038 if (!ret) 1039 return 0; 1040 1041 /* Fallback to SDR timings otherwise */ 1042 return nand_choose_best_sdr_timings(chip, iface, NULL); 1043 } 1044 1045 /** 1046 * nand_choose_interface_config - find the best data interface and timings 1047 * @chip: The NAND chip 1048 * 1049 * Find the best data interface and NAND timings supported by the chip 1050 * and the driver. Eventually let the NAND manufacturer driver propose his own 1051 * set of timings. 1052 * 1053 * After this function nand_chip->interface_config is initialized with the best 1054 * timing mode available. 1055 * 1056 * Returns 0 for success or negative error code otherwise. 1057 */ 1058 static int nand_choose_interface_config(struct nand_chip *chip) 1059 { 1060 struct nand_interface_config *iface; 1061 int ret; 1062 1063 if (!nand_controller_can_setup_interface(chip)) 1064 return 0; 1065 1066 iface = kzalloc_obj(*iface); 1067 if (!iface) 1068 return -ENOMEM; 1069 1070 if (chip->ops.choose_interface_config) 1071 ret = chip->ops.choose_interface_config(chip, iface); 1072 else 1073 ret = nand_choose_best_timings(chip, iface); 1074 1075 if (ret) 1076 kfree(iface); 1077 1078 return ret; 1079 } 1080 1081 /** 1082 * nand_fill_column_cycles - fill the column cycles of an address 1083 * @chip: The NAND chip 1084 * @addrs: Array of address cycles to fill 1085 * @offset_in_page: The offset in the page 1086 * 1087 * Fills the first or the first two bytes of the @addrs field depending 1088 * on the NAND bus width and the page size. 1089 * 1090 * Returns the number of cycles needed to encode the column, or a negative 1091 * error code in case one of the arguments is invalid. 1092 */ 1093 static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs, 1094 unsigned int offset_in_page) 1095 { 1096 struct mtd_info *mtd = nand_to_mtd(chip); 1097 bool ident_stage = !mtd->writesize; 1098 1099 /* Bypass all checks during NAND identification */ 1100 if (likely(!ident_stage)) { 1101 /* Make sure the offset is less than the actual page size. */ 1102 if (offset_in_page > mtd->writesize + mtd->oobsize) 1103 return -EINVAL; 1104 1105 /* 1106 * On small page NANDs, there's a dedicated command to access the OOB 1107 * area, and the column address is relative to the start of the OOB 1108 * area, not the start of the page. Asjust the address accordingly. 1109 */ 1110 if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize) 1111 offset_in_page -= mtd->writesize; 1112 1113 /* 1114 * The offset in page is expressed in bytes, if the NAND bus is 16-bit 1115 * wide, then it must be divided by 2. 1116 */ 1117 if (chip->options & NAND_BUSWIDTH_16) { 1118 if (WARN_ON(offset_in_page % 2)) 1119 return -EINVAL; 1120 1121 offset_in_page /= 2; 1122 } 1123 } 1124 1125 addrs[0] = offset_in_page; 1126 1127 /* 1128 * Small page NANDs use 1 cycle for the columns, while large page NANDs 1129 * need 2 1130 */ 1131 if (!ident_stage && mtd->writesize <= 512) 1132 return 1; 1133 1134 addrs[1] = offset_in_page >> 8; 1135 1136 return 2; 1137 } 1138 1139 static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page, 1140 unsigned int offset_in_page, void *buf, 1141 unsigned int len) 1142 { 1143 const struct nand_interface_config *conf = 1144 nand_get_interface_config(chip); 1145 struct mtd_info *mtd = nand_to_mtd(chip); 1146 u8 addrs[4]; 1147 struct nand_op_instr instrs[] = { 1148 NAND_OP_CMD(NAND_CMD_READ0, 0), 1149 NAND_OP_ADDR(3, addrs, NAND_COMMON_TIMING_NS(conf, tWB_max)), 1150 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), 1151 NAND_COMMON_TIMING_NS(conf, tRR_min)), 1152 NAND_OP_DATA_IN(len, buf, 0), 1153 }; 1154 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1155 int ret; 1156 1157 /* Drop the DATA_IN instruction if len is set to 0. */ 1158 if (!len) 1159 op.ninstrs--; 1160 1161 if (offset_in_page >= mtd->writesize) 1162 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB; 1163 else if (offset_in_page >= 256 && 1164 !(chip->options & NAND_BUSWIDTH_16)) 1165 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1; 1166 1167 ret = nand_fill_column_cycles(chip, addrs, offset_in_page); 1168 if (ret < 0) 1169 return ret; 1170 1171 addrs[1] = page; 1172 addrs[2] = page >> 8; 1173 1174 if (chip->options & NAND_ROW_ADDR_3) { 1175 addrs[3] = page >> 16; 1176 instrs[1].ctx.addr.naddrs++; 1177 } 1178 1179 return nand_exec_op(chip, &op); 1180 } 1181 1182 static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page, 1183 unsigned int offset_in_page, void *buf, 1184 unsigned int len) 1185 { 1186 const struct nand_interface_config *conf = 1187 nand_get_interface_config(chip); 1188 u8 addrs[5]; 1189 struct nand_op_instr instrs[] = { 1190 NAND_OP_CMD(NAND_CMD_READ0, 0), 1191 NAND_OP_ADDR(4, addrs, 0), 1192 NAND_OP_CMD(NAND_CMD_READSTART, NAND_COMMON_TIMING_NS(conf, tWB_max)), 1193 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), 1194 NAND_COMMON_TIMING_NS(conf, tRR_min)), 1195 NAND_OP_DATA_IN(len, buf, 0), 1196 }; 1197 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1198 int ret; 1199 1200 /* Drop the DATA_IN instruction if len is set to 0. */ 1201 if (!len) 1202 op.ninstrs--; 1203 1204 ret = nand_fill_column_cycles(chip, addrs, offset_in_page); 1205 if (ret < 0) 1206 return ret; 1207 1208 addrs[2] = page; 1209 addrs[3] = page >> 8; 1210 1211 if (chip->options & NAND_ROW_ADDR_3) { 1212 addrs[4] = page >> 16; 1213 instrs[1].ctx.addr.naddrs++; 1214 } 1215 1216 return nand_exec_op(chip, &op); 1217 } 1218 1219 static unsigned int rawnand_last_page_of_lun(unsigned int pages_per_lun, unsigned int lun) 1220 { 1221 /* lun is expected to be very small */ 1222 return (lun * pages_per_lun) + pages_per_lun - 1; 1223 } 1224 1225 static void rawnand_cap_cont_reads(struct nand_chip *chip) 1226 { 1227 struct nand_memory_organization *memorg; 1228 unsigned int ppl, first_lun, last_lun; 1229 1230 memorg = nanddev_get_memorg(&chip->base); 1231 ppl = memorg->pages_per_eraseblock * memorg->eraseblocks_per_lun; 1232 first_lun = chip->cont_read.first_page / ppl; 1233 last_lun = chip->cont_read.last_page / ppl; 1234 1235 /* Prevent sequential cache reads across LUN boundaries */ 1236 if (first_lun != last_lun) 1237 chip->cont_read.pause_page = rawnand_last_page_of_lun(ppl, first_lun); 1238 else 1239 chip->cont_read.pause_page = chip->cont_read.last_page; 1240 1241 if (chip->cont_read.first_page == chip->cont_read.pause_page) { 1242 chip->cont_read.first_page++; 1243 chip->cont_read.pause_page = min(chip->cont_read.last_page, 1244 rawnand_last_page_of_lun(ppl, first_lun + 1)); 1245 } 1246 1247 if (chip->cont_read.first_page >= chip->cont_read.last_page) 1248 chip->cont_read.ongoing = false; 1249 } 1250 1251 static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int page, 1252 unsigned int offset_in_page, void *buf, 1253 unsigned int len, bool check_only) 1254 { 1255 const struct nand_interface_config *conf = 1256 nand_get_interface_config(chip); 1257 u8 addrs[5]; 1258 struct nand_op_instr start_instrs[] = { 1259 NAND_OP_CMD(NAND_CMD_READ0, 0), 1260 NAND_OP_ADDR(4, addrs, 0), 1261 NAND_OP_CMD(NAND_CMD_READSTART, NAND_COMMON_TIMING_NS(conf, tWB_max)), 1262 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), 0), 1263 NAND_OP_CMD(NAND_CMD_READCACHESEQ, NAND_COMMON_TIMING_NS(conf, tWB_max)), 1264 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), 1265 NAND_COMMON_TIMING_NS(conf, tRR_min)), 1266 NAND_OP_DATA_IN(len, buf, 0), 1267 }; 1268 struct nand_op_instr cont_instrs[] = { 1269 NAND_OP_CMD(page == chip->cont_read.pause_page ? 1270 NAND_CMD_READCACHEEND : NAND_CMD_READCACHESEQ, 1271 NAND_COMMON_TIMING_NS(conf, tWB_max)), 1272 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), 1273 NAND_COMMON_TIMING_NS(conf, tRR_min)), 1274 NAND_OP_DATA_IN(len, buf, 0), 1275 }; 1276 struct nand_operation start_op = NAND_OPERATION(chip->cur_cs, start_instrs); 1277 struct nand_operation cont_op = NAND_OPERATION(chip->cur_cs, cont_instrs); 1278 int ret; 1279 1280 if (!len) { 1281 start_op.ninstrs--; 1282 cont_op.ninstrs--; 1283 } 1284 1285 ret = nand_fill_column_cycles(chip, addrs, offset_in_page); 1286 if (ret < 0) 1287 return ret; 1288 1289 addrs[2] = page; 1290 addrs[3] = page >> 8; 1291 1292 if (chip->options & NAND_ROW_ADDR_3) { 1293 addrs[4] = page >> 16; 1294 start_instrs[1].ctx.addr.naddrs++; 1295 } 1296 1297 /* Check if cache reads are supported */ 1298 if (check_only) { 1299 if (nand_check_op(chip, &start_op) || nand_check_op(chip, &cont_op)) 1300 return -EOPNOTSUPP; 1301 1302 return 0; 1303 } 1304 1305 if (page == chip->cont_read.first_page) 1306 ret = nand_exec_op(chip, &start_op); 1307 else 1308 ret = nand_exec_op(chip, &cont_op); 1309 if (ret) 1310 return ret; 1311 1312 if (!chip->cont_read.ongoing) 1313 return 0; 1314 1315 if (page == chip->cont_read.last_page) { 1316 chip->cont_read.ongoing = false; 1317 } else if (page == chip->cont_read.pause_page) { 1318 chip->cont_read.first_page++; 1319 rawnand_cap_cont_reads(chip); 1320 } 1321 1322 return 0; 1323 } 1324 1325 static bool rawnand_cont_read_ongoing(struct nand_chip *chip, unsigned int page) 1326 { 1327 return chip->cont_read.ongoing && page >= chip->cont_read.first_page; 1328 } 1329 1330 /** 1331 * nand_read_page_op - Do a READ PAGE operation 1332 * @chip: The NAND chip 1333 * @page: page to read 1334 * @offset_in_page: offset within the page 1335 * @buf: buffer used to store the data 1336 * @len: length of the buffer 1337 * 1338 * This function issues a READ PAGE operation. 1339 * This function does not select/unselect the CS line. 1340 * 1341 * Returns 0 on success, a negative error code otherwise. 1342 */ 1343 int nand_read_page_op(struct nand_chip *chip, unsigned int page, 1344 unsigned int offset_in_page, void *buf, unsigned int len) 1345 { 1346 struct mtd_info *mtd = nand_to_mtd(chip); 1347 1348 if (len && !buf) 1349 return -EINVAL; 1350 1351 if (offset_in_page + len > mtd->writesize + mtd->oobsize) 1352 return -EINVAL; 1353 1354 if (nand_has_exec_op(chip)) { 1355 if (mtd->writesize > 512) { 1356 if (rawnand_cont_read_ongoing(chip, page)) 1357 return nand_lp_exec_cont_read_page_op(chip, page, 1358 offset_in_page, 1359 buf, len, false); 1360 else 1361 return nand_lp_exec_read_page_op(chip, page, 1362 offset_in_page, buf, 1363 len); 1364 } 1365 1366 return nand_sp_exec_read_page_op(chip, page, offset_in_page, 1367 buf, len); 1368 } 1369 1370 chip->legacy.cmdfunc(chip, NAND_CMD_READ0, offset_in_page, page); 1371 if (len) 1372 chip->legacy.read_buf(chip, buf, len); 1373 1374 return 0; 1375 } 1376 EXPORT_SYMBOL_GPL(nand_read_page_op); 1377 1378 /** 1379 * nand_read_param_page_op - Do a READ PARAMETER PAGE operation 1380 * @chip: The NAND chip 1381 * @page: parameter page to read 1382 * @buf: buffer used to store the data 1383 * @len: length of the buffer 1384 * 1385 * This function issues a READ PARAMETER PAGE operation. 1386 * This function does not select/unselect the CS line. 1387 * 1388 * Returns 0 on success, a negative error code otherwise. 1389 */ 1390 int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf, 1391 unsigned int len) 1392 { 1393 unsigned int i; 1394 u8 *p = buf; 1395 1396 if (len && !buf) 1397 return -EINVAL; 1398 1399 if (nand_has_exec_op(chip)) { 1400 const struct nand_interface_config *conf = 1401 nand_get_interface_config(chip); 1402 struct nand_op_instr instrs[] = { 1403 NAND_OP_CMD(NAND_CMD_PARAM, 0), 1404 NAND_OP_ADDR(1, &page, 1405 NAND_COMMON_TIMING_NS(conf, tWB_max)), 1406 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), 1407 NAND_COMMON_TIMING_NS(conf, tRR_min)), 1408 NAND_OP_8BIT_DATA_IN(len, buf, 0), 1409 }; 1410 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1411 1412 /* Drop the DATA_IN instruction if len is set to 0. */ 1413 if (!len) 1414 op.ninstrs--; 1415 1416 return nand_exec_op(chip, &op); 1417 } 1418 1419 chip->legacy.cmdfunc(chip, NAND_CMD_PARAM, page, -1); 1420 for (i = 0; i < len; i++) 1421 p[i] = chip->legacy.read_byte(chip); 1422 1423 return 0; 1424 } 1425 1426 /** 1427 * nand_change_read_column_op - Do a CHANGE READ COLUMN operation 1428 * @chip: The NAND chip 1429 * @offset_in_page: offset within the page 1430 * @buf: buffer used to store the data 1431 * @len: length of the buffer 1432 * @force_8bit: force 8-bit bus access 1433 * 1434 * This function issues a CHANGE READ COLUMN operation. 1435 * This function does not select/unselect the CS line. 1436 * 1437 * Returns 0 on success, a negative error code otherwise. 1438 */ 1439 int nand_change_read_column_op(struct nand_chip *chip, 1440 unsigned int offset_in_page, void *buf, 1441 unsigned int len, bool force_8bit) 1442 { 1443 struct mtd_info *mtd = nand_to_mtd(chip); 1444 bool ident_stage = !mtd->writesize; 1445 1446 if (len && !buf) 1447 return -EINVAL; 1448 1449 if (!ident_stage) { 1450 if (offset_in_page + len > mtd->writesize + mtd->oobsize) 1451 return -EINVAL; 1452 1453 /* Small page NANDs do not support column change. */ 1454 if (mtd->writesize <= 512) 1455 return -ENOTSUPP; 1456 } 1457 1458 if (nand_has_exec_op(chip)) { 1459 const struct nand_interface_config *conf = 1460 nand_get_interface_config(chip); 1461 u8 addrs[2] = {}; 1462 struct nand_op_instr instrs[] = { 1463 NAND_OP_CMD(NAND_CMD_RNDOUT, 0), 1464 NAND_OP_ADDR(2, addrs, 0), 1465 NAND_OP_CMD(NAND_CMD_RNDOUTSTART, 1466 NAND_COMMON_TIMING_NS(conf, tCCS_min)), 1467 NAND_OP_DATA_IN(len, buf, 0), 1468 }; 1469 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1470 int ret; 1471 1472 ret = nand_fill_column_cycles(chip, addrs, offset_in_page); 1473 if (ret < 0) 1474 return ret; 1475 1476 /* Drop the DATA_IN instruction if len is set to 0. */ 1477 if (!len) 1478 op.ninstrs--; 1479 1480 instrs[3].ctx.data.force_8bit = force_8bit; 1481 1482 return nand_exec_op(chip, &op); 1483 } 1484 1485 chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, offset_in_page, -1); 1486 if (len) 1487 chip->legacy.read_buf(chip, buf, len); 1488 1489 return 0; 1490 } 1491 EXPORT_SYMBOL_GPL(nand_change_read_column_op); 1492 1493 /** 1494 * nand_read_oob_op - Do a READ OOB operation 1495 * @chip: The NAND chip 1496 * @page: page to read 1497 * @offset_in_oob: offset within the OOB area 1498 * @buf: buffer used to store the data 1499 * @len: length of the buffer 1500 * 1501 * This function issues a READ OOB operation. 1502 * This function does not select/unselect the CS line. 1503 * 1504 * Returns 0 on success, a negative error code otherwise. 1505 */ 1506 int nand_read_oob_op(struct nand_chip *chip, unsigned int page, 1507 unsigned int offset_in_oob, void *buf, unsigned int len) 1508 { 1509 struct mtd_info *mtd = nand_to_mtd(chip); 1510 1511 if (len && !buf) 1512 return -EINVAL; 1513 1514 if (offset_in_oob + len > mtd->oobsize) 1515 return -EINVAL; 1516 1517 if (nand_has_exec_op(chip)) 1518 return nand_read_page_op(chip, page, 1519 mtd->writesize + offset_in_oob, 1520 buf, len); 1521 1522 chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, offset_in_oob, page); 1523 if (len) 1524 chip->legacy.read_buf(chip, buf, len); 1525 1526 return 0; 1527 } 1528 EXPORT_SYMBOL_GPL(nand_read_oob_op); 1529 1530 static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page, 1531 unsigned int offset_in_page, const void *buf, 1532 unsigned int len, bool prog) 1533 { 1534 const struct nand_interface_config *conf = 1535 nand_get_interface_config(chip); 1536 struct mtd_info *mtd = nand_to_mtd(chip); 1537 u8 addrs[5] = {}; 1538 struct nand_op_instr instrs[] = { 1539 /* 1540 * The first instruction will be dropped if we're dealing 1541 * with a large page NAND and adjusted if we're dealing 1542 * with a small page NAND and the page offset is > 255. 1543 */ 1544 NAND_OP_CMD(NAND_CMD_READ0, 0), 1545 NAND_OP_CMD(NAND_CMD_SEQIN, 0), 1546 NAND_OP_ADDR(0, addrs, NAND_COMMON_TIMING_NS(conf, tADL_min)), 1547 NAND_OP_DATA_OUT(len, buf, 0), 1548 NAND_OP_CMD(NAND_CMD_PAGEPROG, 1549 NAND_COMMON_TIMING_NS(conf, tWB_max)), 1550 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max), 0), 1551 }; 1552 struct nand_operation op = NAND_DESTRUCTIVE_OPERATION(chip->cur_cs, 1553 instrs); 1554 int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page); 1555 1556 if (naddrs < 0) 1557 return naddrs; 1558 1559 addrs[naddrs++] = page; 1560 addrs[naddrs++] = page >> 8; 1561 if (chip->options & NAND_ROW_ADDR_3) 1562 addrs[naddrs++] = page >> 16; 1563 1564 instrs[2].ctx.addr.naddrs = naddrs; 1565 1566 /* Drop the last two instructions if we're not programming the page. */ 1567 if (!prog) { 1568 op.ninstrs -= 2; 1569 /* Also drop the DATA_OUT instruction if empty. */ 1570 if (!len) 1571 op.ninstrs--; 1572 } 1573 1574 if (mtd->writesize <= 512) { 1575 /* 1576 * Small pages need some more tweaking: we have to adjust the 1577 * first instruction depending on the page offset we're trying 1578 * to access. 1579 */ 1580 if (offset_in_page >= mtd->writesize) 1581 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB; 1582 else if (offset_in_page >= 256 && 1583 !(chip->options & NAND_BUSWIDTH_16)) 1584 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1; 1585 } else { 1586 /* 1587 * Drop the first command if we're dealing with a large page 1588 * NAND. 1589 */ 1590 op.instrs++; 1591 op.ninstrs--; 1592 } 1593 1594 return nand_exec_op(chip, &op); 1595 } 1596 1597 /** 1598 * nand_prog_page_begin_op - starts a PROG PAGE operation 1599 * @chip: The NAND chip 1600 * @page: page to write 1601 * @offset_in_page: offset within the page 1602 * @buf: buffer containing the data to write to the page 1603 * @len: length of the buffer 1604 * 1605 * This function issues the first half of a PROG PAGE operation. 1606 * This function does not select/unselect the CS line. 1607 * 1608 * Returns 0 on success, a negative error code otherwise. 1609 */ 1610 int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page, 1611 unsigned int offset_in_page, const void *buf, 1612 unsigned int len) 1613 { 1614 struct mtd_info *mtd = nand_to_mtd(chip); 1615 1616 if (len && !buf) 1617 return -EINVAL; 1618 1619 if (offset_in_page + len > mtd->writesize + mtd->oobsize) 1620 return -EINVAL; 1621 1622 if (nand_has_exec_op(chip)) 1623 return nand_exec_prog_page_op(chip, page, offset_in_page, buf, 1624 len, false); 1625 1626 chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, page); 1627 1628 if (buf) 1629 chip->legacy.write_buf(chip, buf, len); 1630 1631 return 0; 1632 } 1633 EXPORT_SYMBOL_GPL(nand_prog_page_begin_op); 1634 1635 /** 1636 * nand_prog_page_end_op - ends a PROG PAGE operation 1637 * @chip: The NAND chip 1638 * 1639 * This function issues the second half of a PROG PAGE operation. 1640 * This function does not select/unselect the CS line. 1641 * 1642 * Returns 0 on success, a negative error code otherwise. 1643 */ 1644 int nand_prog_page_end_op(struct nand_chip *chip) 1645 { 1646 int ret; 1647 u8 status; 1648 1649 if (nand_has_exec_op(chip)) { 1650 const struct nand_interface_config *conf = 1651 nand_get_interface_config(chip); 1652 struct nand_op_instr instrs[] = { 1653 NAND_OP_CMD(NAND_CMD_PAGEPROG, 1654 NAND_COMMON_TIMING_NS(conf, tWB_max)), 1655 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max), 1656 0), 1657 }; 1658 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1659 1660 ret = nand_exec_op(chip, &op); 1661 if (ret) 1662 return ret; 1663 1664 ret = nand_status_op(chip, &status); 1665 if (ret) 1666 return ret; 1667 } else { 1668 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1); 1669 ret = chip->legacy.waitfunc(chip); 1670 if (ret < 0) 1671 return ret; 1672 1673 status = ret; 1674 } 1675 1676 if (status & NAND_STATUS_FAIL) 1677 return -EIO; 1678 1679 return 0; 1680 } 1681 EXPORT_SYMBOL_GPL(nand_prog_page_end_op); 1682 1683 /** 1684 * nand_prog_page_op - Do a full PROG PAGE operation 1685 * @chip: The NAND chip 1686 * @page: page to write 1687 * @offset_in_page: offset within the page 1688 * @buf: buffer containing the data to write to the page 1689 * @len: length of the buffer 1690 * 1691 * This function issues a full PROG PAGE operation. 1692 * This function does not select/unselect the CS line. 1693 * 1694 * Returns 0 on success, a negative error code otherwise. 1695 */ 1696 int nand_prog_page_op(struct nand_chip *chip, unsigned int page, 1697 unsigned int offset_in_page, const void *buf, 1698 unsigned int len) 1699 { 1700 struct mtd_info *mtd = nand_to_mtd(chip); 1701 u8 status; 1702 int ret; 1703 1704 if (!len || !buf) 1705 return -EINVAL; 1706 1707 if (offset_in_page + len > mtd->writesize + mtd->oobsize) 1708 return -EINVAL; 1709 1710 if (nand_has_exec_op(chip)) { 1711 ret = nand_exec_prog_page_op(chip, page, offset_in_page, buf, 1712 len, true); 1713 if (ret) 1714 return ret; 1715 1716 ret = nand_status_op(chip, &status); 1717 if (ret) 1718 return ret; 1719 } else { 1720 chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, 1721 page); 1722 chip->legacy.write_buf(chip, buf, len); 1723 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1); 1724 ret = chip->legacy.waitfunc(chip); 1725 if (ret < 0) 1726 return ret; 1727 1728 status = ret; 1729 } 1730 1731 if (status & NAND_STATUS_FAIL) 1732 return -EIO; 1733 1734 return 0; 1735 } 1736 EXPORT_SYMBOL_GPL(nand_prog_page_op); 1737 1738 /** 1739 * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation 1740 * @chip: The NAND chip 1741 * @offset_in_page: offset within the page 1742 * @buf: buffer containing the data to send to the NAND 1743 * @len: length of the buffer 1744 * @force_8bit: force 8-bit bus access 1745 * 1746 * This function issues a CHANGE WRITE COLUMN operation. 1747 * This function does not select/unselect the CS line. 1748 * 1749 * Returns 0 on success, a negative error code otherwise. 1750 */ 1751 int nand_change_write_column_op(struct nand_chip *chip, 1752 unsigned int offset_in_page, 1753 const void *buf, unsigned int len, 1754 bool force_8bit) 1755 { 1756 struct mtd_info *mtd = nand_to_mtd(chip); 1757 1758 if (len && !buf) 1759 return -EINVAL; 1760 1761 if (offset_in_page + len > mtd->writesize + mtd->oobsize) 1762 return -EINVAL; 1763 1764 /* Small page NANDs do not support column change. */ 1765 if (mtd->writesize <= 512) 1766 return -ENOTSUPP; 1767 1768 if (nand_has_exec_op(chip)) { 1769 const struct nand_interface_config *conf = 1770 nand_get_interface_config(chip); 1771 u8 addrs[2]; 1772 struct nand_op_instr instrs[] = { 1773 NAND_OP_CMD(NAND_CMD_RNDIN, 0), 1774 NAND_OP_ADDR(2, addrs, NAND_COMMON_TIMING_NS(conf, tCCS_min)), 1775 NAND_OP_DATA_OUT(len, buf, 0), 1776 }; 1777 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1778 int ret; 1779 1780 ret = nand_fill_column_cycles(chip, addrs, offset_in_page); 1781 if (ret < 0) 1782 return ret; 1783 1784 instrs[2].ctx.data.force_8bit = force_8bit; 1785 1786 /* Drop the DATA_OUT instruction if len is set to 0. */ 1787 if (!len) 1788 op.ninstrs--; 1789 1790 return nand_exec_op(chip, &op); 1791 } 1792 1793 chip->legacy.cmdfunc(chip, NAND_CMD_RNDIN, offset_in_page, -1); 1794 if (len) 1795 chip->legacy.write_buf(chip, buf, len); 1796 1797 return 0; 1798 } 1799 EXPORT_SYMBOL_GPL(nand_change_write_column_op); 1800 1801 /** 1802 * nand_readid_op - Do a READID operation 1803 * @chip: The NAND chip 1804 * @addr: address cycle to pass after the READID command 1805 * @buf: buffer used to store the ID 1806 * @len: length of the buffer 1807 * 1808 * This function sends a READID command and reads back the ID returned by the 1809 * NAND. 1810 * This function does not select/unselect the CS line. 1811 * 1812 * Returns 0 on success, a negative error code otherwise. 1813 */ 1814 int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf, 1815 unsigned int len) 1816 { 1817 unsigned int i; 1818 u8 *id = buf, *ddrbuf = NULL; 1819 1820 if (len && !buf) 1821 return -EINVAL; 1822 1823 if (nand_has_exec_op(chip)) { 1824 const struct nand_interface_config *conf = 1825 nand_get_interface_config(chip); 1826 struct nand_op_instr instrs[] = { 1827 NAND_OP_CMD(NAND_CMD_READID, 0), 1828 NAND_OP_ADDR(1, &addr, 1829 NAND_COMMON_TIMING_NS(conf, tADL_min)), 1830 NAND_OP_8BIT_DATA_IN(len, buf, 0), 1831 }; 1832 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1833 int ret; 1834 1835 /* READ_ID data bytes are received twice in NV-DDR mode */ 1836 if (len && nand_interface_is_nvddr(conf)) { 1837 ddrbuf = kcalloc(2, len, GFP_KERNEL); 1838 if (!ddrbuf) 1839 return -ENOMEM; 1840 1841 instrs[2].ctx.data.len *= 2; 1842 instrs[2].ctx.data.buf.in = ddrbuf; 1843 } 1844 1845 /* Drop the DATA_IN instruction if len is set to 0. */ 1846 if (!len) 1847 op.ninstrs--; 1848 1849 ret = nand_exec_op(chip, &op); 1850 if (!ret && len && nand_interface_is_nvddr(conf)) { 1851 for (i = 0; i < len; i++) 1852 id[i] = ddrbuf[i * 2]; 1853 } 1854 1855 kfree(ddrbuf); 1856 1857 return ret; 1858 } 1859 1860 chip->legacy.cmdfunc(chip, NAND_CMD_READID, addr, -1); 1861 1862 for (i = 0; i < len; i++) 1863 id[i] = chip->legacy.read_byte(chip); 1864 1865 return 0; 1866 } 1867 EXPORT_SYMBOL_GPL(nand_readid_op); 1868 1869 /** 1870 * nand_status_op - Do a STATUS operation 1871 * @chip: The NAND chip 1872 * @status: out variable to store the NAND status 1873 * 1874 * This function sends a STATUS command and reads back the status returned by 1875 * the NAND. 1876 * This function does not select/unselect the CS line. 1877 * 1878 * Returns 0 on success, a negative error code otherwise. 1879 */ 1880 int nand_status_op(struct nand_chip *chip, u8 *status) 1881 { 1882 if (nand_has_exec_op(chip)) { 1883 const struct nand_interface_config *conf = 1884 nand_get_interface_config(chip); 1885 u8 ddrstatus[2]; 1886 struct nand_op_instr instrs[] = { 1887 NAND_OP_CMD(NAND_CMD_STATUS, 1888 NAND_COMMON_TIMING_NS(conf, tADL_min)), 1889 NAND_OP_8BIT_DATA_IN(1, status, 0), 1890 }; 1891 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1892 int ret; 1893 1894 /* The status data byte will be received twice in NV-DDR mode */ 1895 if (status && nand_interface_is_nvddr(conf)) { 1896 instrs[1].ctx.data.len *= 2; 1897 instrs[1].ctx.data.buf.in = ddrstatus; 1898 } 1899 1900 if (!status) 1901 op.ninstrs--; 1902 1903 ret = nand_exec_op(chip, &op); 1904 if (!ret && status && nand_interface_is_nvddr(conf)) 1905 *status = ddrstatus[0]; 1906 1907 return ret; 1908 } 1909 1910 chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1); 1911 if (status) 1912 *status = chip->legacy.read_byte(chip); 1913 1914 return 0; 1915 } 1916 EXPORT_SYMBOL_GPL(nand_status_op); 1917 1918 /** 1919 * nand_exit_status_op - Exit a STATUS operation 1920 * @chip: The NAND chip 1921 * 1922 * This function sends a READ0 command to cancel the effect of the STATUS 1923 * command to avoid reading only the status until a new read command is sent. 1924 * 1925 * This function does not select/unselect the CS line. 1926 * 1927 * Returns 0 on success, a negative error code otherwise. 1928 */ 1929 int nand_exit_status_op(struct nand_chip *chip) 1930 { 1931 if (nand_has_exec_op(chip)) { 1932 struct nand_op_instr instrs[] = { 1933 NAND_OP_CMD(NAND_CMD_READ0, 0), 1934 }; 1935 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1936 1937 return nand_exec_op(chip, &op); 1938 } 1939 1940 chip->legacy.cmdfunc(chip, NAND_CMD_READ0, -1, -1); 1941 1942 return 0; 1943 } 1944 EXPORT_SYMBOL_GPL(nand_exit_status_op); 1945 1946 /** 1947 * nand_erase_op - Do an erase operation 1948 * @chip: The NAND chip 1949 * @eraseblock: block to erase 1950 * 1951 * This function sends an ERASE command and waits for the NAND to be ready 1952 * before returning. 1953 * This function does not select/unselect the CS line. 1954 * 1955 * Returns 0 on success, a negative error code otherwise. 1956 */ 1957 int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock) 1958 { 1959 unsigned int page = eraseblock << 1960 (chip->phys_erase_shift - chip->page_shift); 1961 int ret; 1962 u8 status; 1963 1964 if (nand_has_exec_op(chip)) { 1965 const struct nand_interface_config *conf = 1966 nand_get_interface_config(chip); 1967 u8 addrs[3] = { page, page >> 8, page >> 16 }; 1968 struct nand_op_instr instrs[] = { 1969 NAND_OP_CMD(NAND_CMD_ERASE1, 0), 1970 NAND_OP_ADDR(2, addrs, 0), 1971 NAND_OP_CMD(NAND_CMD_ERASE2, 1972 NAND_COMMON_TIMING_NS(conf, tWB_max)), 1973 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tBERS_max), 1974 0), 1975 }; 1976 struct nand_operation op = NAND_DESTRUCTIVE_OPERATION(chip->cur_cs, 1977 instrs); 1978 1979 if (chip->options & NAND_ROW_ADDR_3) 1980 instrs[1].ctx.addr.naddrs++; 1981 1982 ret = nand_exec_op(chip, &op); 1983 if (ret) 1984 return ret; 1985 1986 ret = nand_status_op(chip, &status); 1987 if (ret) 1988 return ret; 1989 } else { 1990 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE1, -1, page); 1991 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE2, -1, -1); 1992 1993 ret = chip->legacy.waitfunc(chip); 1994 if (ret < 0) 1995 return ret; 1996 1997 status = ret; 1998 } 1999 2000 if (status & NAND_STATUS_FAIL) 2001 return -EIO; 2002 2003 return 0; 2004 } 2005 EXPORT_SYMBOL_GPL(nand_erase_op); 2006 2007 /** 2008 * nand_set_features_op - Do a SET FEATURES operation 2009 * @chip: The NAND chip 2010 * @feature: feature id 2011 * @data: 4 bytes of data 2012 * 2013 * This function sends a SET FEATURES command and waits for the NAND to be 2014 * ready before returning. 2015 * This function does not select/unselect the CS line. 2016 * 2017 * Returns 0 on success, a negative error code otherwise. 2018 */ 2019 static int nand_set_features_op(struct nand_chip *chip, u8 feature, 2020 const void *data) 2021 { 2022 const u8 *params = data; 2023 int i, ret; 2024 2025 if (nand_has_exec_op(chip)) { 2026 const struct nand_interface_config *conf = 2027 nand_get_interface_config(chip); 2028 struct nand_op_instr instrs[] = { 2029 NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0), 2030 NAND_OP_ADDR(1, &feature, NAND_COMMON_TIMING_NS(conf, 2031 tADL_min)), 2032 NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data, 2033 NAND_COMMON_TIMING_NS(conf, 2034 tWB_max)), 2035 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tFEAT_max), 2036 0), 2037 }; 2038 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 2039 2040 return nand_exec_op(chip, &op); 2041 } 2042 2043 chip->legacy.cmdfunc(chip, NAND_CMD_SET_FEATURES, feature, -1); 2044 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i) 2045 chip->legacy.write_byte(chip, params[i]); 2046 2047 ret = chip->legacy.waitfunc(chip); 2048 if (ret < 0) 2049 return ret; 2050 2051 if (ret & NAND_STATUS_FAIL) 2052 return -EIO; 2053 2054 return 0; 2055 } 2056 2057 /** 2058 * nand_get_features_op - Do a GET FEATURES operation 2059 * @chip: The NAND chip 2060 * @feature: feature id 2061 * @data: 4 bytes of data 2062 * 2063 * This function sends a GET FEATURES command and waits for the NAND to be 2064 * ready before returning. 2065 * This function does not select/unselect the CS line. 2066 * 2067 * Returns 0 on success, a negative error code otherwise. 2068 */ 2069 static int nand_get_features_op(struct nand_chip *chip, u8 feature, 2070 void *data) 2071 { 2072 u8 *params = data, ddrbuf[ONFI_SUBFEATURE_PARAM_LEN * 2]; 2073 int i; 2074 2075 if (nand_has_exec_op(chip)) { 2076 const struct nand_interface_config *conf = 2077 nand_get_interface_config(chip); 2078 struct nand_op_instr instrs[] = { 2079 NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0), 2080 NAND_OP_ADDR(1, &feature, 2081 NAND_COMMON_TIMING_NS(conf, tWB_max)), 2082 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tFEAT_max), 2083 NAND_COMMON_TIMING_NS(conf, tRR_min)), 2084 NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN, 2085 data, 0), 2086 }; 2087 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 2088 int ret; 2089 2090 /* GET_FEATURE data bytes are received twice in NV-DDR mode */ 2091 if (nand_interface_is_nvddr(conf)) { 2092 instrs[3].ctx.data.len *= 2; 2093 instrs[3].ctx.data.buf.in = ddrbuf; 2094 } 2095 2096 ret = nand_exec_op(chip, &op); 2097 if (nand_interface_is_nvddr(conf)) { 2098 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; i++) 2099 params[i] = ddrbuf[i * 2]; 2100 } 2101 2102 return ret; 2103 } 2104 2105 chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, feature, -1); 2106 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i) 2107 params[i] = chip->legacy.read_byte(chip); 2108 2109 return 0; 2110 } 2111 2112 static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms, 2113 unsigned int delay_ns) 2114 { 2115 if (nand_has_exec_op(chip)) { 2116 struct nand_op_instr instrs[] = { 2117 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(timeout_ms), 2118 PSEC_TO_NSEC(delay_ns)), 2119 }; 2120 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 2121 2122 return nand_exec_op(chip, &op); 2123 } 2124 2125 /* Apply delay or wait for ready/busy pin */ 2126 if (!chip->legacy.dev_ready) 2127 udelay(chip->legacy.chip_delay); 2128 else 2129 nand_wait_ready(chip); 2130 2131 return 0; 2132 } 2133 2134 /** 2135 * nand_reset_op - Do a reset operation 2136 * @chip: The NAND chip 2137 * 2138 * This function sends a RESET command and waits for the NAND to be ready 2139 * before returning. 2140 * This function does not select/unselect the CS line. 2141 * 2142 * Returns 0 on success, a negative error code otherwise. 2143 */ 2144 int nand_reset_op(struct nand_chip *chip) 2145 { 2146 if (nand_has_exec_op(chip)) { 2147 const struct nand_interface_config *conf = 2148 nand_get_interface_config(chip); 2149 struct nand_op_instr instrs[] = { 2150 NAND_OP_CMD(NAND_CMD_RESET, 2151 NAND_COMMON_TIMING_NS(conf, tWB_max)), 2152 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tRST_max), 2153 0), 2154 }; 2155 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 2156 2157 return nand_exec_op(chip, &op); 2158 } 2159 2160 chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1); 2161 2162 return 0; 2163 } 2164 EXPORT_SYMBOL_GPL(nand_reset_op); 2165 2166 /** 2167 * nand_read_data_op - Read data from the NAND 2168 * @chip: The NAND chip 2169 * @buf: buffer used to store the data 2170 * @len: length of the buffer 2171 * @force_8bit: force 8-bit bus access 2172 * @check_only: do not actually run the command, only checks if the 2173 * controller driver supports it 2174 * 2175 * This function does a raw data read on the bus. Usually used after launching 2176 * another NAND operation like nand_read_page_op(). 2177 * This function does not select/unselect the CS line. 2178 * 2179 * Returns 0 on success, a negative error code otherwise. 2180 */ 2181 int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len, 2182 bool force_8bit, bool check_only) 2183 { 2184 if (!len || (!check_only && !buf)) 2185 return -EINVAL; 2186 2187 if (nand_has_exec_op(chip)) { 2188 const struct nand_interface_config *conf = 2189 nand_get_interface_config(chip); 2190 struct nand_op_instr instrs[] = { 2191 NAND_OP_DATA_IN(len, buf, 0), 2192 }; 2193 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 2194 u8 *ddrbuf = NULL; 2195 int ret, i; 2196 2197 instrs[0].ctx.data.force_8bit = force_8bit; 2198 2199 /* 2200 * Parameter payloads (ID, status, features, etc) do not go 2201 * through the same pipeline as regular data, hence the 2202 * force_8bit flag must be set and this also indicates that in 2203 * case NV-DDR timings are being used the data will be received 2204 * twice. 2205 */ 2206 if (force_8bit && nand_interface_is_nvddr(conf)) { 2207 ddrbuf = kcalloc(2, len, GFP_KERNEL); 2208 if (!ddrbuf) 2209 return -ENOMEM; 2210 2211 instrs[0].ctx.data.len *= 2; 2212 instrs[0].ctx.data.buf.in = ddrbuf; 2213 } 2214 2215 if (check_only) { 2216 ret = nand_check_op(chip, &op); 2217 kfree(ddrbuf); 2218 return ret; 2219 } 2220 2221 ret = nand_exec_op(chip, &op); 2222 if (!ret && force_8bit && nand_interface_is_nvddr(conf)) { 2223 u8 *dst = buf; 2224 2225 for (i = 0; i < len; i++) 2226 dst[i] = ddrbuf[i * 2]; 2227 } 2228 2229 kfree(ddrbuf); 2230 2231 return ret; 2232 } 2233 2234 if (check_only) 2235 return 0; 2236 2237 if (force_8bit) { 2238 u8 *p = buf; 2239 unsigned int i; 2240 2241 for (i = 0; i < len; i++) 2242 p[i] = chip->legacy.read_byte(chip); 2243 } else { 2244 chip->legacy.read_buf(chip, buf, len); 2245 } 2246 2247 return 0; 2248 } 2249 EXPORT_SYMBOL_GPL(nand_read_data_op); 2250 2251 /** 2252 * nand_write_data_op - Write data from the NAND 2253 * @chip: The NAND chip 2254 * @buf: buffer containing the data to send on the bus 2255 * @len: length of the buffer 2256 * @force_8bit: force 8-bit bus access 2257 * 2258 * This function does a raw data write on the bus. Usually used after launching 2259 * another NAND operation like nand_write_page_begin_op(). 2260 * This function does not select/unselect the CS line. 2261 * 2262 * Returns 0 on success, a negative error code otherwise. 2263 */ 2264 int nand_write_data_op(struct nand_chip *chip, const void *buf, 2265 unsigned int len, bool force_8bit) 2266 { 2267 if (!len || !buf) 2268 return -EINVAL; 2269 2270 if (nand_has_exec_op(chip)) { 2271 struct nand_op_instr instrs[] = { 2272 NAND_OP_DATA_OUT(len, buf, 0), 2273 }; 2274 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 2275 2276 instrs[0].ctx.data.force_8bit = force_8bit; 2277 2278 return nand_exec_op(chip, &op); 2279 } 2280 2281 if (force_8bit) { 2282 const u8 *p = buf; 2283 unsigned int i; 2284 2285 for (i = 0; i < len; i++) 2286 chip->legacy.write_byte(chip, p[i]); 2287 } else { 2288 chip->legacy.write_buf(chip, buf, len); 2289 } 2290 2291 return 0; 2292 } 2293 EXPORT_SYMBOL_GPL(nand_write_data_op); 2294 2295 /** 2296 * struct nand_op_parser_ctx - Context used by the parser 2297 * @instrs: array of all the instructions that must be addressed 2298 * @ninstrs: length of the @instrs array 2299 * @subop: Sub-operation to be passed to the NAND controller 2300 * 2301 * This structure is used by the core to split NAND operations into 2302 * sub-operations that can be handled by the NAND controller. 2303 */ 2304 struct nand_op_parser_ctx { 2305 const struct nand_op_instr *instrs; 2306 unsigned int ninstrs; 2307 struct nand_subop subop; 2308 }; 2309 2310 /** 2311 * nand_op_parser_must_split_instr - Checks if an instruction must be split 2312 * @pat: the parser pattern element that matches @instr 2313 * @instr: pointer to the instruction to check 2314 * @start_offset: this is an in/out parameter. If @instr has already been 2315 * split, then @start_offset is the offset from which to start 2316 * (either an address cycle or an offset in the data buffer). 2317 * Conversely, if the function returns true (ie. instr must be 2318 * split), this parameter is updated to point to the first 2319 * data/address cycle that has not been taken care of. 2320 * 2321 * Some NAND controllers are limited and cannot send X address cycles with a 2322 * unique operation, or cannot read/write more than Y bytes at the same time. 2323 * In this case, split the instruction that does not fit in a single 2324 * controller-operation into two or more chunks. 2325 * 2326 * Returns true if the instruction must be split, false otherwise. 2327 * The @start_offset parameter is also updated to the offset at which the next 2328 * bundle of instruction must start (if an address or a data instruction). 2329 */ 2330 static bool 2331 nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat, 2332 const struct nand_op_instr *instr, 2333 unsigned int *start_offset) 2334 { 2335 switch (pat->type) { 2336 case NAND_OP_ADDR_INSTR: 2337 if (!pat->ctx.addr.maxcycles) 2338 break; 2339 2340 if (instr->ctx.addr.naddrs - *start_offset > 2341 pat->ctx.addr.maxcycles) { 2342 *start_offset += pat->ctx.addr.maxcycles; 2343 return true; 2344 } 2345 break; 2346 2347 case NAND_OP_DATA_IN_INSTR: 2348 case NAND_OP_DATA_OUT_INSTR: 2349 if (!pat->ctx.data.maxlen) 2350 break; 2351 2352 if (instr->ctx.data.len - *start_offset > 2353 pat->ctx.data.maxlen) { 2354 *start_offset += pat->ctx.data.maxlen; 2355 return true; 2356 } 2357 break; 2358 2359 default: 2360 break; 2361 } 2362 2363 return false; 2364 } 2365 2366 /** 2367 * nand_op_parser_match_pat - Checks if a pattern matches the instructions 2368 * remaining in the parser context 2369 * @pat: the pattern to test 2370 * @ctx: the parser context structure to match with the pattern @pat 2371 * 2372 * Check if @pat matches the set or a sub-set of instructions remaining in @ctx. 2373 * Returns true if this is the case, false ortherwise. When true is returned, 2374 * @ctx->subop is updated with the set of instructions to be passed to the 2375 * controller driver. 2376 */ 2377 static bool 2378 nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat, 2379 struct nand_op_parser_ctx *ctx) 2380 { 2381 unsigned int instr_offset = ctx->subop.first_instr_start_off; 2382 const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs; 2383 const struct nand_op_instr *instr = ctx->subop.instrs; 2384 unsigned int i, ninstrs; 2385 2386 for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) { 2387 /* 2388 * The pattern instruction does not match the operation 2389 * instruction. If the instruction is marked optional in the 2390 * pattern definition, we skip the pattern element and continue 2391 * to the next one. If the element is mandatory, there's no 2392 * match and we can return false directly. 2393 */ 2394 if (instr->type != pat->elems[i].type) { 2395 if (!pat->elems[i].optional) 2396 return false; 2397 2398 continue; 2399 } 2400 2401 /* 2402 * Now check the pattern element constraints. If the pattern is 2403 * not able to handle the whole instruction in a single step, 2404 * we have to split it. 2405 * The last_instr_end_off value comes back updated to point to 2406 * the position where we have to split the instruction (the 2407 * start of the next subop chunk). 2408 */ 2409 if (nand_op_parser_must_split_instr(&pat->elems[i], instr, 2410 &instr_offset)) { 2411 ninstrs++; 2412 i++; 2413 break; 2414 } 2415 2416 instr++; 2417 ninstrs++; 2418 instr_offset = 0; 2419 } 2420 2421 /* 2422 * This can happen if all instructions of a pattern are optional. 2423 * Still, if there's not at least one instruction handled by this 2424 * pattern, this is not a match, and we should try the next one (if 2425 * any). 2426 */ 2427 if (!ninstrs) 2428 return false; 2429 2430 /* 2431 * We had a match on the pattern head, but the pattern may be longer 2432 * than the instructions we're asked to execute. We need to make sure 2433 * there's no mandatory elements in the pattern tail. 2434 */ 2435 for (; i < pat->nelems; i++) { 2436 if (!pat->elems[i].optional) 2437 return false; 2438 } 2439 2440 /* 2441 * We have a match: update the subop structure accordingly and return 2442 * true. 2443 */ 2444 ctx->subop.ninstrs = ninstrs; 2445 ctx->subop.last_instr_end_off = instr_offset; 2446 2447 return true; 2448 } 2449 2450 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG) 2451 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx) 2452 { 2453 const struct nand_op_instr *instr; 2454 char *prefix = " "; 2455 unsigned int i; 2456 2457 pr_debug("executing subop (CS%d):\n", ctx->subop.cs); 2458 2459 for (i = 0; i < ctx->ninstrs; i++) { 2460 instr = &ctx->instrs[i]; 2461 2462 if (instr == &ctx->subop.instrs[0]) 2463 prefix = " ->"; 2464 2465 nand_op_trace(prefix, instr); 2466 2467 if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1]) 2468 prefix = " "; 2469 } 2470 } 2471 #else 2472 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx) 2473 { 2474 /* NOP */ 2475 } 2476 #endif 2477 2478 static int nand_op_parser_cmp_ctx(const struct nand_op_parser_ctx *a, 2479 const struct nand_op_parser_ctx *b) 2480 { 2481 if (a->subop.ninstrs < b->subop.ninstrs) 2482 return -1; 2483 else if (a->subop.ninstrs > b->subop.ninstrs) 2484 return 1; 2485 2486 if (a->subop.last_instr_end_off < b->subop.last_instr_end_off) 2487 return -1; 2488 else if (a->subop.last_instr_end_off > b->subop.last_instr_end_off) 2489 return 1; 2490 2491 return 0; 2492 } 2493 2494 /** 2495 * nand_op_parser_exec_op - exec_op parser 2496 * @chip: the NAND chip 2497 * @parser: patterns description provided by the controller driver 2498 * @op: the NAND operation to address 2499 * @check_only: when true, the function only checks if @op can be handled but 2500 * does not execute the operation 2501 * 2502 * Helper function designed to ease integration of NAND controller drivers that 2503 * only support a limited set of instruction sequences. The supported sequences 2504 * are described in @parser, and the framework takes care of splitting @op into 2505 * multiple sub-operations (if required) and pass them back to the ->exec() 2506 * callback of the matching pattern if @check_only is set to false. 2507 * 2508 * NAND controller drivers should call this function from their own ->exec_op() 2509 * implementation. 2510 * 2511 * Returns 0 on success, a negative error code otherwise. A failure can be 2512 * caused by an unsupported operation (none of the supported patterns is able 2513 * to handle the requested operation), or an error returned by one of the 2514 * matching pattern->exec() hook. 2515 */ 2516 int nand_op_parser_exec_op(struct nand_chip *chip, 2517 const struct nand_op_parser *parser, 2518 const struct nand_operation *op, bool check_only) 2519 { 2520 struct nand_op_parser_ctx ctx = { 2521 .subop.cs = op->cs, 2522 .subop.instrs = op->instrs, 2523 .instrs = op->instrs, 2524 .ninstrs = op->ninstrs, 2525 }; 2526 unsigned int i; 2527 2528 while (ctx.subop.instrs < op->instrs + op->ninstrs) { 2529 const struct nand_op_parser_pattern *pattern; 2530 struct nand_op_parser_ctx best_ctx; 2531 int ret, best_pattern = -1; 2532 2533 for (i = 0; i < parser->npatterns; i++) { 2534 struct nand_op_parser_ctx test_ctx = ctx; 2535 2536 pattern = &parser->patterns[i]; 2537 if (!nand_op_parser_match_pat(pattern, &test_ctx)) 2538 continue; 2539 2540 if (best_pattern >= 0 && 2541 nand_op_parser_cmp_ctx(&test_ctx, &best_ctx) <= 0) 2542 continue; 2543 2544 best_pattern = i; 2545 best_ctx = test_ctx; 2546 } 2547 2548 if (best_pattern < 0) { 2549 pr_debug("->exec_op() parser: pattern not found!\n"); 2550 return -ENOTSUPP; 2551 } 2552 2553 ctx = best_ctx; 2554 nand_op_parser_trace(&ctx); 2555 2556 if (!check_only) { 2557 pattern = &parser->patterns[best_pattern]; 2558 ret = pattern->exec(chip, &ctx.subop); 2559 if (ret) 2560 return ret; 2561 } 2562 2563 /* 2564 * Update the context structure by pointing to the start of the 2565 * next subop. 2566 */ 2567 ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs; 2568 if (ctx.subop.last_instr_end_off) 2569 ctx.subop.instrs -= 1; 2570 2571 ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off; 2572 } 2573 2574 return 0; 2575 } 2576 EXPORT_SYMBOL_GPL(nand_op_parser_exec_op); 2577 2578 static bool nand_instr_is_data(const struct nand_op_instr *instr) 2579 { 2580 return instr && (instr->type == NAND_OP_DATA_IN_INSTR || 2581 instr->type == NAND_OP_DATA_OUT_INSTR); 2582 } 2583 2584 static bool nand_subop_instr_is_valid(const struct nand_subop *subop, 2585 unsigned int instr_idx) 2586 { 2587 return subop && instr_idx < subop->ninstrs; 2588 } 2589 2590 static unsigned int nand_subop_get_start_off(const struct nand_subop *subop, 2591 unsigned int instr_idx) 2592 { 2593 if (instr_idx) 2594 return 0; 2595 2596 return subop->first_instr_start_off; 2597 } 2598 2599 /** 2600 * nand_subop_get_addr_start_off - Get the start offset in an address array 2601 * @subop: The entire sub-operation 2602 * @instr_idx: Index of the instruction inside the sub-operation 2603 * 2604 * During driver development, one could be tempted to directly use the 2605 * ->addr.addrs field of address instructions. This is wrong as address 2606 * instructions might be split. 2607 * 2608 * Given an address instruction, returns the offset of the first cycle to issue. 2609 */ 2610 unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop, 2611 unsigned int instr_idx) 2612 { 2613 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) || 2614 subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)) 2615 return 0; 2616 2617 return nand_subop_get_start_off(subop, instr_idx); 2618 } 2619 EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off); 2620 2621 /** 2622 * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert 2623 * @subop: The entire sub-operation 2624 * @instr_idx: Index of the instruction inside the sub-operation 2625 * 2626 * During driver development, one could be tempted to directly use the 2627 * ->addr->naddrs field of a data instruction. This is wrong as instructions 2628 * might be split. 2629 * 2630 * Given an address instruction, returns the number of address cycle to issue. 2631 */ 2632 unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop, 2633 unsigned int instr_idx) 2634 { 2635 int start_off, end_off; 2636 2637 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) || 2638 subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)) 2639 return 0; 2640 2641 start_off = nand_subop_get_addr_start_off(subop, instr_idx); 2642 2643 if (instr_idx == subop->ninstrs - 1 && 2644 subop->last_instr_end_off) 2645 end_off = subop->last_instr_end_off; 2646 else 2647 end_off = subop->instrs[instr_idx].ctx.addr.naddrs; 2648 2649 return end_off - start_off; 2650 } 2651 EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc); 2652 2653 /** 2654 * nand_subop_get_data_start_off - Get the start offset in a data array 2655 * @subop: The entire sub-operation 2656 * @instr_idx: Index of the instruction inside the sub-operation 2657 * 2658 * During driver development, one could be tempted to directly use the 2659 * ->data->buf.{in,out} field of data instructions. This is wrong as data 2660 * instructions might be split. 2661 * 2662 * Given a data instruction, returns the offset to start from. 2663 */ 2664 unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop, 2665 unsigned int instr_idx) 2666 { 2667 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) || 2668 !nand_instr_is_data(&subop->instrs[instr_idx]))) 2669 return 0; 2670 2671 return nand_subop_get_start_off(subop, instr_idx); 2672 } 2673 EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off); 2674 2675 /** 2676 * nand_subop_get_data_len - Get the number of bytes to retrieve 2677 * @subop: The entire sub-operation 2678 * @instr_idx: Index of the instruction inside the sub-operation 2679 * 2680 * During driver development, one could be tempted to directly use the 2681 * ->data->len field of a data instruction. This is wrong as data instructions 2682 * might be split. 2683 * 2684 * Returns the length of the chunk of data to send/receive. 2685 */ 2686 unsigned int nand_subop_get_data_len(const struct nand_subop *subop, 2687 unsigned int instr_idx) 2688 { 2689 int start_off = 0, end_off; 2690 2691 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) || 2692 !nand_instr_is_data(&subop->instrs[instr_idx]))) 2693 return 0; 2694 2695 start_off = nand_subop_get_data_start_off(subop, instr_idx); 2696 2697 if (instr_idx == subop->ninstrs - 1 && 2698 subop->last_instr_end_off) 2699 end_off = subop->last_instr_end_off; 2700 else 2701 end_off = subop->instrs[instr_idx].ctx.data.len; 2702 2703 return end_off - start_off; 2704 } 2705 EXPORT_SYMBOL_GPL(nand_subop_get_data_len); 2706 2707 /** 2708 * nand_reset - Reset and initialize a NAND device 2709 * @chip: The NAND chip 2710 * @chipnr: Internal die id 2711 * 2712 * Save the timings data structure, then apply SDR timings mode 0 (see 2713 * nand_reset_interface for details), do the reset operation, and apply 2714 * back the previous timings. 2715 * 2716 * Returns 0 on success, a negative error code otherwise. 2717 */ 2718 int nand_reset(struct nand_chip *chip, int chipnr) 2719 { 2720 int ret; 2721 2722 ret = nand_reset_interface(chip, chipnr); 2723 if (ret) 2724 return ret; 2725 2726 /* 2727 * The CS line has to be released before we can apply the new NAND 2728 * interface settings, hence this weird nand_select_target() 2729 * nand_deselect_target() dance. 2730 */ 2731 nand_select_target(chip, chipnr); 2732 ret = nand_reset_op(chip); 2733 nand_deselect_target(chip); 2734 if (ret) 2735 return ret; 2736 2737 ret = nand_setup_interface(chip, chipnr); 2738 if (ret) 2739 return ret; 2740 2741 return 0; 2742 } 2743 EXPORT_SYMBOL_GPL(nand_reset); 2744 2745 /** 2746 * nand_get_features - wrapper to perform a GET_FEATURE 2747 * @chip: NAND chip info structure 2748 * @addr: feature address 2749 * @subfeature_param: the subfeature parameters, a four bytes array 2750 * 2751 * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the 2752 * operation cannot be handled. 2753 */ 2754 int nand_get_features(struct nand_chip *chip, int addr, 2755 u8 *subfeature_param) 2756 { 2757 if (!nand_supports_get_features(chip, addr)) 2758 return -ENOTSUPP; 2759 2760 if (chip->legacy.get_features) 2761 return chip->legacy.get_features(chip, addr, subfeature_param); 2762 2763 return nand_get_features_op(chip, addr, subfeature_param); 2764 } 2765 2766 /** 2767 * nand_set_features - wrapper to perform a SET_FEATURE 2768 * @chip: NAND chip info structure 2769 * @addr: feature address 2770 * @subfeature_param: the subfeature parameters, a four bytes array 2771 * 2772 * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the 2773 * operation cannot be handled. 2774 */ 2775 int nand_set_features(struct nand_chip *chip, int addr, 2776 u8 *subfeature_param) 2777 { 2778 if (!nand_supports_set_features(chip, addr)) 2779 return -ENOTSUPP; 2780 2781 if (chip->legacy.set_features) 2782 return chip->legacy.set_features(chip, addr, subfeature_param); 2783 2784 return nand_set_features_op(chip, addr, subfeature_param); 2785 } 2786 2787 /** 2788 * nand_read_page_raw_notsupp - dummy read raw page function 2789 * @chip: nand chip info structure 2790 * @buf: buffer to store read data 2791 * @oob_required: caller requires OOB data read to chip->oob_poi 2792 * @page: page number to read 2793 * 2794 * Returns -ENOTSUPP unconditionally. 2795 */ 2796 int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf, 2797 int oob_required, int page) 2798 { 2799 return -ENOTSUPP; 2800 } 2801 2802 /** 2803 * nand_read_page_raw - [INTERN] read raw page data without ecc 2804 * @chip: nand chip info structure 2805 * @buf: buffer to store read data 2806 * @oob_required: caller requires OOB data read to chip->oob_poi 2807 * @page: page number to read 2808 * 2809 * Not for syndrome calculating ECC controllers, which use a special oob layout. 2810 */ 2811 int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required, 2812 int page) 2813 { 2814 struct mtd_info *mtd = nand_to_mtd(chip); 2815 int ret; 2816 2817 ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize); 2818 if (ret) 2819 return ret; 2820 2821 if (oob_required) { 2822 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, 2823 false, false); 2824 if (ret) 2825 return ret; 2826 } 2827 2828 return 0; 2829 } 2830 EXPORT_SYMBOL(nand_read_page_raw); 2831 2832 /** 2833 * nand_monolithic_read_page_raw - Monolithic page read in raw mode 2834 * @chip: NAND chip info structure 2835 * @buf: buffer to store read data 2836 * @oob_required: caller requires OOB data read to chip->oob_poi 2837 * @page: page number to read 2838 * 2839 * This is a raw page read, ie. without any error detection/correction. 2840 * Monolithic means we are requesting all the relevant data (main plus 2841 * eventually OOB) to be loaded in the NAND cache and sent over the 2842 * bus (from the NAND chip to the NAND controller) in a single 2843 * operation. This is an alternative to nand_read_page_raw(), which 2844 * first reads the main data, and if the OOB data is requested too, 2845 * then reads more data on the bus. 2846 */ 2847 int nand_monolithic_read_page_raw(struct nand_chip *chip, u8 *buf, 2848 int oob_required, int page) 2849 { 2850 struct mtd_info *mtd = nand_to_mtd(chip); 2851 unsigned int size = mtd->writesize; 2852 u8 *read_buf = buf; 2853 int ret; 2854 2855 if (oob_required) { 2856 size += mtd->oobsize; 2857 2858 if (buf != chip->data_buf) 2859 read_buf = nand_get_data_buf(chip); 2860 } 2861 2862 ret = nand_read_page_op(chip, page, 0, read_buf, size); 2863 if (ret) 2864 return ret; 2865 2866 if (buf != chip->data_buf) 2867 memcpy(buf, read_buf, mtd->writesize); 2868 2869 return 0; 2870 } 2871 EXPORT_SYMBOL(nand_monolithic_read_page_raw); 2872 2873 /** 2874 * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc 2875 * @chip: nand chip info structure 2876 * @buf: buffer to store read data 2877 * @oob_required: caller requires OOB data read to chip->oob_poi 2878 * @page: page number to read 2879 * 2880 * We need a special oob layout and handling even when OOB isn't used. 2881 */ 2882 static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf, 2883 int oob_required, int page) 2884 { 2885 struct mtd_info *mtd = nand_to_mtd(chip); 2886 int eccsize = chip->ecc.size; 2887 int eccbytes = chip->ecc.bytes; 2888 uint8_t *oob = chip->oob_poi; 2889 int steps, size, ret; 2890 2891 ret = nand_read_page_op(chip, page, 0, NULL, 0); 2892 if (ret) 2893 return ret; 2894 2895 for (steps = chip->ecc.steps; steps > 0; steps--) { 2896 ret = nand_read_data_op(chip, buf, eccsize, false, false); 2897 if (ret) 2898 return ret; 2899 2900 buf += eccsize; 2901 2902 if (chip->ecc.prepad) { 2903 ret = nand_read_data_op(chip, oob, chip->ecc.prepad, 2904 false, false); 2905 if (ret) 2906 return ret; 2907 2908 oob += chip->ecc.prepad; 2909 } 2910 2911 ret = nand_read_data_op(chip, oob, eccbytes, false, false); 2912 if (ret) 2913 return ret; 2914 2915 oob += eccbytes; 2916 2917 if (chip->ecc.postpad) { 2918 ret = nand_read_data_op(chip, oob, chip->ecc.postpad, 2919 false, false); 2920 if (ret) 2921 return ret; 2922 2923 oob += chip->ecc.postpad; 2924 } 2925 } 2926 2927 size = mtd->oobsize - (oob - chip->oob_poi); 2928 if (size) { 2929 ret = nand_read_data_op(chip, oob, size, false, false); 2930 if (ret) 2931 return ret; 2932 } 2933 2934 return 0; 2935 } 2936 2937 /** 2938 * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function 2939 * @chip: nand chip info structure 2940 * @buf: buffer to store read data 2941 * @oob_required: caller requires OOB data read to chip->oob_poi 2942 * @page: page number to read 2943 */ 2944 static int nand_read_page_swecc(struct nand_chip *chip, uint8_t *buf, 2945 int oob_required, int page) 2946 { 2947 struct mtd_info *mtd = nand_to_mtd(chip); 2948 int i, eccsize = chip->ecc.size, ret; 2949 int eccbytes = chip->ecc.bytes; 2950 int eccsteps = chip->ecc.steps; 2951 uint8_t *p = buf; 2952 uint8_t *ecc_calc = chip->ecc.calc_buf; 2953 uint8_t *ecc_code = chip->ecc.code_buf; 2954 unsigned int max_bitflips = 0; 2955 2956 chip->ecc.read_page_raw(chip, buf, 1, page); 2957 2958 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) 2959 chip->ecc.calculate(chip, p, &ecc_calc[i]); 2960 2961 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, 2962 chip->ecc.total); 2963 if (ret) 2964 return ret; 2965 2966 eccsteps = chip->ecc.steps; 2967 p = buf; 2968 2969 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 2970 int stat; 2971 2972 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]); 2973 if (stat < 0) { 2974 mtd->ecc_stats.failed++; 2975 } else { 2976 mtd->ecc_stats.corrected += stat; 2977 max_bitflips = max_t(unsigned int, max_bitflips, stat); 2978 } 2979 } 2980 return max_bitflips; 2981 } 2982 2983 /** 2984 * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function 2985 * @chip: nand chip info structure 2986 * @data_offs: offset of requested data within the page 2987 * @readlen: data length 2988 * @bufpoi: buffer to store read data 2989 * @page: page number to read 2990 */ 2991 static int nand_read_subpage(struct nand_chip *chip, uint32_t data_offs, 2992 uint32_t readlen, uint8_t *bufpoi, int page) 2993 { 2994 struct mtd_info *mtd = nand_to_mtd(chip); 2995 int start_step, end_step, num_steps, ret; 2996 uint8_t *p; 2997 int data_col_addr, i, gaps = 0; 2998 int datafrag_len, eccfrag_len, aligned_len, aligned_pos; 2999 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1; 3000 int index, section = 0; 3001 unsigned int max_bitflips = 0; 3002 struct mtd_oob_region oobregion = { }; 3003 3004 /* Column address within the page aligned to ECC size (256bytes) */ 3005 start_step = data_offs / chip->ecc.size; 3006 end_step = (data_offs + readlen - 1) / chip->ecc.size; 3007 num_steps = end_step - start_step + 1; 3008 index = start_step * chip->ecc.bytes; 3009 3010 /* Data size aligned to ECC ecc.size */ 3011 datafrag_len = num_steps * chip->ecc.size; 3012 eccfrag_len = num_steps * chip->ecc.bytes; 3013 3014 data_col_addr = start_step * chip->ecc.size; 3015 /* If we read not a page aligned data */ 3016 p = bufpoi + data_col_addr; 3017 ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len); 3018 if (ret) 3019 return ret; 3020 3021 /* Calculate ECC */ 3022 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) 3023 chip->ecc.calculate(chip, p, &chip->ecc.calc_buf[i]); 3024 3025 /* 3026 * The performance is faster if we position offsets according to 3027 * ecc.pos. Let's make sure that there are no gaps in ECC positions. 3028 */ 3029 ret = mtd_ooblayout_find_eccregion(mtd, index, §ion, &oobregion); 3030 if (ret) 3031 return ret; 3032 3033 if (oobregion.length < eccfrag_len) 3034 gaps = 1; 3035 3036 if (gaps) { 3037 ret = nand_change_read_column_op(chip, mtd->writesize, 3038 chip->oob_poi, mtd->oobsize, 3039 false); 3040 if (ret) 3041 return ret; 3042 } else { 3043 /* 3044 * Send the command to read the particular ECC bytes take care 3045 * about buswidth alignment in read_buf. 3046 */ 3047 aligned_pos = oobregion.offset & ~(busw - 1); 3048 aligned_len = eccfrag_len; 3049 if (oobregion.offset & (busw - 1)) 3050 aligned_len++; 3051 if ((oobregion.offset + (num_steps * chip->ecc.bytes)) & 3052 (busw - 1)) 3053 aligned_len++; 3054 3055 ret = nand_change_read_column_op(chip, 3056 mtd->writesize + aligned_pos, 3057 &chip->oob_poi[aligned_pos], 3058 aligned_len, false); 3059 if (ret) 3060 return ret; 3061 } 3062 3063 ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf, 3064 chip->oob_poi, index, eccfrag_len); 3065 if (ret) 3066 return ret; 3067 3068 p = bufpoi + data_col_addr; 3069 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) { 3070 int stat; 3071 3072 stat = chip->ecc.correct(chip, p, &chip->ecc.code_buf[i], 3073 &chip->ecc.calc_buf[i]); 3074 if (stat == -EBADMSG && 3075 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) { 3076 /* check for empty pages with bitflips */ 3077 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size, 3078 &chip->ecc.code_buf[i], 3079 chip->ecc.bytes, 3080 NULL, 0, 3081 chip->ecc.strength); 3082 } 3083 3084 if (stat < 0) { 3085 mtd->ecc_stats.failed++; 3086 } else { 3087 mtd->ecc_stats.corrected += stat; 3088 max_bitflips = max_t(unsigned int, max_bitflips, stat); 3089 } 3090 } 3091 return max_bitflips; 3092 } 3093 3094 /** 3095 * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function 3096 * @chip: nand chip info structure 3097 * @buf: buffer to store read data 3098 * @oob_required: caller requires OOB data read to chip->oob_poi 3099 * @page: page number to read 3100 * 3101 * Not for syndrome calculating ECC controllers which need a special oob layout. 3102 */ 3103 static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf, 3104 int oob_required, int page) 3105 { 3106 struct mtd_info *mtd = nand_to_mtd(chip); 3107 int i, eccsize = chip->ecc.size, ret; 3108 int eccbytes = chip->ecc.bytes; 3109 int eccsteps = chip->ecc.steps; 3110 uint8_t *p = buf; 3111 uint8_t *ecc_calc = chip->ecc.calc_buf; 3112 uint8_t *ecc_code = chip->ecc.code_buf; 3113 unsigned int max_bitflips = 0; 3114 3115 ret = nand_read_page_op(chip, page, 0, NULL, 0); 3116 if (ret) 3117 return ret; 3118 3119 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 3120 chip->ecc.hwctl(chip, NAND_ECC_READ); 3121 3122 ret = nand_read_data_op(chip, p, eccsize, false, false); 3123 if (ret) 3124 return ret; 3125 3126 chip->ecc.calculate(chip, p, &ecc_calc[i]); 3127 } 3128 3129 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false, 3130 false); 3131 if (ret) 3132 return ret; 3133 3134 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, 3135 chip->ecc.total); 3136 if (ret) 3137 return ret; 3138 3139 eccsteps = chip->ecc.steps; 3140 p = buf; 3141 3142 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 3143 int stat; 3144 3145 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]); 3146 if (stat == -EBADMSG && 3147 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) { 3148 /* check for empty pages with bitflips */ 3149 stat = nand_check_erased_ecc_chunk(p, eccsize, 3150 &ecc_code[i], eccbytes, 3151 NULL, 0, 3152 chip->ecc.strength); 3153 } 3154 3155 if (stat < 0) { 3156 mtd->ecc_stats.failed++; 3157 } else { 3158 mtd->ecc_stats.corrected += stat; 3159 max_bitflips = max_t(unsigned int, max_bitflips, stat); 3160 } 3161 } 3162 return max_bitflips; 3163 } 3164 3165 /** 3166 * nand_read_page_hwecc_oob_first - Hardware ECC page read with ECC 3167 * data read from OOB area 3168 * @chip: nand chip info structure 3169 * @buf: buffer to store read data 3170 * @oob_required: caller requires OOB data read to chip->oob_poi 3171 * @page: page number to read 3172 * 3173 * Hardware ECC for large page chips, which requires the ECC data to be 3174 * extracted from the OOB before the actual data is read. 3175 */ 3176 int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf, 3177 int oob_required, int page) 3178 { 3179 struct mtd_info *mtd = nand_to_mtd(chip); 3180 int i, eccsize = chip->ecc.size, ret; 3181 int eccbytes = chip->ecc.bytes; 3182 int eccsteps = chip->ecc.steps; 3183 uint8_t *p = buf; 3184 uint8_t *ecc_code = chip->ecc.code_buf; 3185 unsigned int max_bitflips = 0; 3186 3187 /* Read the OOB area first */ 3188 ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize); 3189 if (ret) 3190 return ret; 3191 3192 /* Move read cursor to start of page */ 3193 ret = nand_change_read_column_op(chip, 0, NULL, 0, false); 3194 if (ret) 3195 return ret; 3196 3197 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, 3198 chip->ecc.total); 3199 if (ret) 3200 return ret; 3201 3202 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 3203 int stat; 3204 3205 chip->ecc.hwctl(chip, NAND_ECC_READ); 3206 3207 ret = nand_read_data_op(chip, p, eccsize, false, false); 3208 if (ret) 3209 return ret; 3210 3211 stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL); 3212 if (stat == -EBADMSG && 3213 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) { 3214 /* check for empty pages with bitflips */ 3215 stat = nand_check_erased_ecc_chunk(p, eccsize, 3216 &ecc_code[i], 3217 eccbytes, NULL, 0, 3218 chip->ecc.strength); 3219 } 3220 3221 if (stat < 0) { 3222 mtd->ecc_stats.failed++; 3223 } else { 3224 mtd->ecc_stats.corrected += stat; 3225 max_bitflips = max_t(unsigned int, max_bitflips, stat); 3226 } 3227 } 3228 return max_bitflips; 3229 } 3230 EXPORT_SYMBOL_GPL(nand_read_page_hwecc_oob_first); 3231 3232 /** 3233 * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read 3234 * @chip: nand chip info structure 3235 * @buf: buffer to store read data 3236 * @oob_required: caller requires OOB data read to chip->oob_poi 3237 * @page: page number to read 3238 * 3239 * The hw generator calculates the error syndrome automatically. Therefore we 3240 * need a special oob layout and handling. 3241 */ 3242 static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf, 3243 int oob_required, int page) 3244 { 3245 struct mtd_info *mtd = nand_to_mtd(chip); 3246 int ret, i, eccsize = chip->ecc.size; 3247 int eccbytes = chip->ecc.bytes; 3248 int eccsteps = chip->ecc.steps; 3249 int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad; 3250 uint8_t *p = buf; 3251 uint8_t *oob = chip->oob_poi; 3252 unsigned int max_bitflips = 0; 3253 3254 ret = nand_read_page_op(chip, page, 0, NULL, 0); 3255 if (ret) 3256 return ret; 3257 3258 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 3259 int stat; 3260 3261 chip->ecc.hwctl(chip, NAND_ECC_READ); 3262 3263 ret = nand_read_data_op(chip, p, eccsize, false, false); 3264 if (ret) 3265 return ret; 3266 3267 if (chip->ecc.prepad) { 3268 ret = nand_read_data_op(chip, oob, chip->ecc.prepad, 3269 false, false); 3270 if (ret) 3271 return ret; 3272 3273 oob += chip->ecc.prepad; 3274 } 3275 3276 chip->ecc.hwctl(chip, NAND_ECC_READSYN); 3277 3278 ret = nand_read_data_op(chip, oob, eccbytes, false, false); 3279 if (ret) 3280 return ret; 3281 3282 stat = chip->ecc.correct(chip, p, oob, NULL); 3283 3284 oob += eccbytes; 3285 3286 if (chip->ecc.postpad) { 3287 ret = nand_read_data_op(chip, oob, chip->ecc.postpad, 3288 false, false); 3289 if (ret) 3290 return ret; 3291 3292 oob += chip->ecc.postpad; 3293 } 3294 3295 if (stat == -EBADMSG && 3296 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) { 3297 /* check for empty pages with bitflips */ 3298 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size, 3299 oob - eccpadbytes, 3300 eccpadbytes, 3301 NULL, 0, 3302 chip->ecc.strength); 3303 } 3304 3305 if (stat < 0) { 3306 mtd->ecc_stats.failed++; 3307 } else { 3308 mtd->ecc_stats.corrected += stat; 3309 max_bitflips = max_t(unsigned int, max_bitflips, stat); 3310 } 3311 } 3312 3313 /* Calculate remaining oob bytes */ 3314 i = mtd->oobsize - (oob - chip->oob_poi); 3315 if (i) { 3316 ret = nand_read_data_op(chip, oob, i, false, false); 3317 if (ret) 3318 return ret; 3319 } 3320 3321 return max_bitflips; 3322 } 3323 3324 /** 3325 * nand_transfer_oob - [INTERN] Transfer oob to client buffer 3326 * @chip: NAND chip object 3327 * @oob: oob destination address 3328 * @ops: oob ops structure 3329 * @len: size of oob to transfer 3330 */ 3331 static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob, 3332 struct mtd_oob_ops *ops, size_t len) 3333 { 3334 struct mtd_info *mtd = nand_to_mtd(chip); 3335 int ret; 3336 3337 switch (ops->mode) { 3338 3339 case MTD_OPS_PLACE_OOB: 3340 case MTD_OPS_RAW: 3341 memcpy(oob, chip->oob_poi + ops->ooboffs, len); 3342 return oob + len; 3343 3344 case MTD_OPS_AUTO_OOB: 3345 ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi, 3346 ops->ooboffs, len); 3347 BUG_ON(ret); 3348 return oob + len; 3349 3350 default: 3351 BUG(); 3352 } 3353 return NULL; 3354 } 3355 3356 static void rawnand_enable_cont_reads(struct nand_chip *chip, unsigned int page, 3357 u32 readlen, int col) 3358 { 3359 struct mtd_info *mtd = nand_to_mtd(chip); 3360 unsigned int first_page, last_page; 3361 3362 chip->cont_read.ongoing = false; 3363 3364 if (!chip->controller->supported_op.cont_read) 3365 return; 3366 3367 /* 3368 * Don't bother making any calculations if the length is too small. 3369 * Side effect: avoids possible integer underflows below. 3370 */ 3371 if (readlen < (2 * mtd->writesize)) 3372 return; 3373 3374 /* Derive the page where continuous read should start (the first full page read) */ 3375 first_page = page; 3376 if (col) 3377 first_page++; 3378 3379 /* Derive the page where continuous read should stop (the last full page read) */ 3380 last_page = page + ((col + readlen) / mtd->writesize) - 1; 3381 3382 /* Configure and enable continuous read when suitable */ 3383 if (first_page < last_page) { 3384 chip->cont_read.first_page = first_page; 3385 chip->cont_read.last_page = last_page; 3386 chip->cont_read.ongoing = true; 3387 /* May reset the ongoing flag */ 3388 rawnand_cap_cont_reads(chip); 3389 } 3390 } 3391 3392 static void rawnand_cont_read_skip_first_page(struct nand_chip *chip, unsigned int page) 3393 { 3394 if (!chip->cont_read.ongoing || page != chip->cont_read.first_page) 3395 return; 3396 3397 chip->cont_read.first_page++; 3398 rawnand_cap_cont_reads(chip); 3399 } 3400 3401 /** 3402 * nand_setup_read_retry - [INTERN] Set the READ RETRY mode 3403 * @chip: NAND chip object 3404 * @retry_mode: the retry mode to use 3405 * 3406 * Some vendors supply a special command to shift the Vt threshold, to be used 3407 * when there are too many bitflips in a page (i.e., ECC error). After setting 3408 * a new threshold, the host should retry reading the page. 3409 */ 3410 static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode) 3411 { 3412 pr_debug("setting READ RETRY mode %d\n", retry_mode); 3413 3414 if (retry_mode >= chip->read_retries) 3415 return -EINVAL; 3416 3417 if (!chip->ops.setup_read_retry) 3418 return -EOPNOTSUPP; 3419 3420 return chip->ops.setup_read_retry(chip, retry_mode); 3421 } 3422 3423 static void nand_wait_readrdy(struct nand_chip *chip) 3424 { 3425 const struct nand_interface_config *conf; 3426 3427 if (!(chip->options & NAND_NEED_READRDY)) 3428 return; 3429 3430 conf = nand_get_interface_config(chip); 3431 WARN_ON(nand_wait_rdy_op(chip, NAND_COMMON_TIMING_MS(conf, tR_max), 0)); 3432 } 3433 3434 /** 3435 * nand_do_read_ops - [INTERN] Read data with ECC 3436 * @chip: NAND chip object 3437 * @from: offset to read from 3438 * @ops: oob ops structure 3439 * 3440 * Internal function. Called with chip held. 3441 */ 3442 static int nand_do_read_ops(struct nand_chip *chip, loff_t from, 3443 struct mtd_oob_ops *ops) 3444 { 3445 int chipnr, page, realpage, col, bytes, aligned, oob_required; 3446 struct mtd_info *mtd = nand_to_mtd(chip); 3447 int ret = 0; 3448 uint32_t readlen = ops->len; 3449 uint32_t oobreadlen = ops->ooblen; 3450 uint32_t max_oobsize = mtd_oobavail(mtd, ops); 3451 3452 uint8_t *bufpoi, *oob, *buf; 3453 int use_bounce_buf; 3454 unsigned int max_bitflips = 0; 3455 int retry_mode = 0; 3456 bool ecc_fail = false; 3457 3458 /* Check if the region is secured */ 3459 if (nand_region_is_secured(chip, from, readlen)) 3460 return -EIO; 3461 3462 chipnr = (int)(from >> chip->chip_shift); 3463 nand_select_target(chip, chipnr); 3464 3465 realpage = (int)(from >> chip->page_shift); 3466 page = realpage & chip->pagemask; 3467 3468 col = (int)(from & (mtd->writesize - 1)); 3469 3470 buf = ops->datbuf; 3471 oob = ops->oobbuf; 3472 oob_required = oob ? 1 : 0; 3473 3474 if (likely(ops->mode != MTD_OPS_RAW)) 3475 rawnand_enable_cont_reads(chip, page, readlen, col); 3476 3477 while (1) { 3478 struct mtd_ecc_stats ecc_stats = mtd->ecc_stats; 3479 3480 bytes = min(mtd->writesize - col, readlen); 3481 aligned = (bytes == mtd->writesize); 3482 3483 if (!aligned) 3484 use_bounce_buf = 1; 3485 else if (chip->options & NAND_USES_DMA) 3486 use_bounce_buf = !virt_addr_valid(buf) || 3487 !IS_ALIGNED((unsigned long)buf, 3488 chip->buf_align); 3489 else 3490 use_bounce_buf = 0; 3491 3492 /* Is the current page in the buffer? */ 3493 if (realpage != chip->pagecache.page || oob) { 3494 bufpoi = use_bounce_buf ? chip->data_buf : buf; 3495 3496 if (use_bounce_buf && aligned) 3497 pr_debug("%s: using read bounce buffer for buf@%p\n", 3498 __func__, buf); 3499 3500 read_retry: 3501 /* 3502 * Now read the page into the buffer. Absent an error, 3503 * the read methods return max bitflips per ecc step. 3504 */ 3505 if (unlikely(ops->mode == MTD_OPS_RAW)) 3506 ret = chip->ecc.read_page_raw(chip, bufpoi, 3507 oob_required, 3508 page); 3509 else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) && 3510 !oob) 3511 ret = chip->ecc.read_subpage(chip, col, bytes, 3512 bufpoi, page); 3513 else 3514 ret = chip->ecc.read_page(chip, bufpoi, 3515 oob_required, page); 3516 if (ret < 0) { 3517 if (use_bounce_buf) 3518 /* Invalidate page cache */ 3519 chip->pagecache.page = -1; 3520 break; 3521 } 3522 3523 /* 3524 * Copy back the data in the initial buffer when reading 3525 * partial pages or when a bounce buffer is required. 3526 */ 3527 if (use_bounce_buf) { 3528 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob && 3529 !(mtd->ecc_stats.failed - ecc_stats.failed) && 3530 (ops->mode != MTD_OPS_RAW)) { 3531 chip->pagecache.page = realpage; 3532 chip->pagecache.bitflips = ret; 3533 } else { 3534 /* Invalidate page cache */ 3535 chip->pagecache.page = -1; 3536 } 3537 memcpy(buf, bufpoi + col, bytes); 3538 } 3539 3540 if (unlikely(oob)) { 3541 int toread = min(oobreadlen, max_oobsize); 3542 3543 if (toread) { 3544 oob = nand_transfer_oob(chip, oob, ops, 3545 toread); 3546 oobreadlen -= toread; 3547 } 3548 } 3549 3550 nand_wait_readrdy(chip); 3551 3552 if (mtd->ecc_stats.failed - ecc_stats.failed) { 3553 if (retry_mode + 1 < chip->read_retries) { 3554 retry_mode++; 3555 ret = nand_setup_read_retry(chip, 3556 retry_mode); 3557 if (ret < 0) 3558 break; 3559 3560 /* Reset ecc_stats; retry */ 3561 mtd->ecc_stats = ecc_stats; 3562 goto read_retry; 3563 } else { 3564 /* No more retry modes; real failure */ 3565 ecc_fail = true; 3566 } 3567 } 3568 3569 buf += bytes; 3570 max_bitflips = max_t(unsigned int, max_bitflips, ret); 3571 } else { 3572 memcpy(buf, chip->data_buf + col, bytes); 3573 buf += bytes; 3574 max_bitflips = max_t(unsigned int, max_bitflips, 3575 chip->pagecache.bitflips); 3576 3577 rawnand_cont_read_skip_first_page(chip, page); 3578 } 3579 3580 readlen -= bytes; 3581 3582 /* Reset to retry mode 0 */ 3583 if (retry_mode) { 3584 ret = nand_setup_read_retry(chip, 0); 3585 if (ret < 0) 3586 break; 3587 retry_mode = 0; 3588 } 3589 3590 if (!readlen) 3591 break; 3592 3593 /* For subsequent reads align to page boundary */ 3594 col = 0; 3595 /* Increment page address */ 3596 realpage++; 3597 3598 page = realpage & chip->pagemask; 3599 /* Check, if we cross a chip boundary */ 3600 if (!page) { 3601 chipnr++; 3602 nand_deselect_target(chip); 3603 nand_select_target(chip, chipnr); 3604 } 3605 } 3606 nand_deselect_target(chip); 3607 3608 if (WARN_ON_ONCE(chip->cont_read.ongoing)) 3609 chip->cont_read.ongoing = false; 3610 3611 ops->retlen = ops->len - (size_t) readlen; 3612 if (oob) 3613 ops->oobretlen = ops->ooblen - oobreadlen; 3614 3615 if (ret < 0) 3616 return ret; 3617 3618 if (ecc_fail) 3619 return -EBADMSG; 3620 3621 return max_bitflips; 3622 } 3623 3624 /** 3625 * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function 3626 * @chip: nand chip info structure 3627 * @page: page number to read 3628 */ 3629 int nand_read_oob_std(struct nand_chip *chip, int page) 3630 { 3631 struct mtd_info *mtd = nand_to_mtd(chip); 3632 3633 return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize); 3634 } 3635 EXPORT_SYMBOL(nand_read_oob_std); 3636 3637 /** 3638 * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC 3639 * with syndromes 3640 * @chip: nand chip info structure 3641 * @page: page number to read 3642 */ 3643 static int nand_read_oob_syndrome(struct nand_chip *chip, int page) 3644 { 3645 struct mtd_info *mtd = nand_to_mtd(chip); 3646 int length = mtd->oobsize; 3647 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad; 3648 int eccsize = chip->ecc.size; 3649 uint8_t *bufpoi = chip->oob_poi; 3650 int i, toread, sndrnd = 0, pos, ret; 3651 3652 ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0); 3653 if (ret) 3654 return ret; 3655 3656 for (i = 0; i < chip->ecc.steps; i++) { 3657 if (sndrnd) { 3658 int ret; 3659 3660 pos = eccsize + i * (eccsize + chunk); 3661 if (mtd->writesize > 512) 3662 ret = nand_change_read_column_op(chip, pos, 3663 NULL, 0, 3664 false); 3665 else 3666 ret = nand_read_page_op(chip, page, pos, NULL, 3667 0); 3668 3669 if (ret) 3670 return ret; 3671 } else 3672 sndrnd = 1; 3673 toread = min_t(int, length, chunk); 3674 3675 ret = nand_read_data_op(chip, bufpoi, toread, false, false); 3676 if (ret) 3677 return ret; 3678 3679 bufpoi += toread; 3680 length -= toread; 3681 } 3682 if (length > 0) { 3683 ret = nand_read_data_op(chip, bufpoi, length, false, false); 3684 if (ret) 3685 return ret; 3686 } 3687 3688 return 0; 3689 } 3690 3691 /** 3692 * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function 3693 * @chip: nand chip info structure 3694 * @page: page number to write 3695 */ 3696 int nand_write_oob_std(struct nand_chip *chip, int page) 3697 { 3698 struct mtd_info *mtd = nand_to_mtd(chip); 3699 3700 return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi, 3701 mtd->oobsize); 3702 } 3703 EXPORT_SYMBOL(nand_write_oob_std); 3704 3705 /** 3706 * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC 3707 * with syndrome - only for large page flash 3708 * @chip: nand chip info structure 3709 * @page: page number to write 3710 */ 3711 static int nand_write_oob_syndrome(struct nand_chip *chip, int page) 3712 { 3713 struct mtd_info *mtd = nand_to_mtd(chip); 3714 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad; 3715 int eccsize = chip->ecc.size, length = mtd->oobsize; 3716 int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps; 3717 const uint8_t *bufpoi = chip->oob_poi; 3718 3719 /* 3720 * data-ecc-data-ecc ... ecc-oob 3721 * or 3722 * data-pad-ecc-pad-data-pad .... ecc-pad-oob 3723 */ 3724 if (!chip->ecc.prepad && !chip->ecc.postpad) { 3725 pos = steps * (eccsize + chunk); 3726 steps = 0; 3727 } else 3728 pos = eccsize; 3729 3730 ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0); 3731 if (ret) 3732 return ret; 3733 3734 for (i = 0; i < steps; i++) { 3735 if (sndcmd) { 3736 if (mtd->writesize <= 512) { 3737 uint32_t fill = 0xFFFFFFFF; 3738 3739 len = eccsize; 3740 while (len > 0) { 3741 int num = min_t(int, len, 4); 3742 3743 ret = nand_write_data_op(chip, &fill, 3744 num, false); 3745 if (ret) 3746 return ret; 3747 3748 len -= num; 3749 } 3750 } else { 3751 pos = eccsize + i * (eccsize + chunk); 3752 ret = nand_change_write_column_op(chip, pos, 3753 NULL, 0, 3754 false); 3755 if (ret) 3756 return ret; 3757 } 3758 } else 3759 sndcmd = 1; 3760 len = min_t(int, length, chunk); 3761 3762 ret = nand_write_data_op(chip, bufpoi, len, false); 3763 if (ret) 3764 return ret; 3765 3766 bufpoi += len; 3767 length -= len; 3768 } 3769 if (length > 0) { 3770 ret = nand_write_data_op(chip, bufpoi, length, false); 3771 if (ret) 3772 return ret; 3773 } 3774 3775 return nand_prog_page_end_op(chip); 3776 } 3777 3778 /** 3779 * nand_do_read_oob - [INTERN] NAND read out-of-band 3780 * @chip: NAND chip object 3781 * @from: offset to read from 3782 * @ops: oob operations description structure 3783 * 3784 * NAND read out-of-band data from the spare area. 3785 */ 3786 static int nand_do_read_oob(struct nand_chip *chip, loff_t from, 3787 struct mtd_oob_ops *ops) 3788 { 3789 struct mtd_info *mtd = nand_to_mtd(chip); 3790 unsigned int max_bitflips = 0; 3791 int page, realpage, chipnr; 3792 struct mtd_ecc_stats stats; 3793 int readlen = ops->ooblen; 3794 int len; 3795 uint8_t *buf = ops->oobbuf; 3796 int ret = 0; 3797 3798 pr_debug("%s: from = 0x%08Lx, len = %i\n", 3799 __func__, (unsigned long long)from, readlen); 3800 3801 /* Check if the region is secured */ 3802 if (nand_region_is_secured(chip, from, readlen)) 3803 return -EIO; 3804 3805 stats = mtd->ecc_stats; 3806 3807 len = mtd_oobavail(mtd, ops); 3808 3809 chipnr = (int)(from >> chip->chip_shift); 3810 nand_select_target(chip, chipnr); 3811 3812 /* Shift to get page */ 3813 realpage = (int)(from >> chip->page_shift); 3814 page = realpage & chip->pagemask; 3815 3816 while (1) { 3817 if (ops->mode == MTD_OPS_RAW) 3818 ret = chip->ecc.read_oob_raw(chip, page); 3819 else 3820 ret = chip->ecc.read_oob(chip, page); 3821 3822 if (ret < 0) 3823 break; 3824 3825 len = min(len, readlen); 3826 buf = nand_transfer_oob(chip, buf, ops, len); 3827 3828 nand_wait_readrdy(chip); 3829 3830 max_bitflips = max_t(unsigned int, max_bitflips, ret); 3831 3832 readlen -= len; 3833 if (!readlen) 3834 break; 3835 3836 /* Increment page address */ 3837 realpage++; 3838 3839 page = realpage & chip->pagemask; 3840 /* Check, if we cross a chip boundary */ 3841 if (!page) { 3842 chipnr++; 3843 nand_deselect_target(chip); 3844 nand_select_target(chip, chipnr); 3845 } 3846 } 3847 nand_deselect_target(chip); 3848 3849 ops->oobretlen = ops->ooblen - readlen; 3850 3851 if (ret < 0) 3852 return ret; 3853 3854 if (mtd->ecc_stats.failed - stats.failed) 3855 return -EBADMSG; 3856 3857 return max_bitflips; 3858 } 3859 3860 /** 3861 * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band 3862 * @mtd: MTD device structure 3863 * @from: offset to read from 3864 * @ops: oob operation description structure 3865 * 3866 * NAND read data and/or out-of-band data. 3867 */ 3868 static int nand_read_oob(struct mtd_info *mtd, loff_t from, 3869 struct mtd_oob_ops *ops) 3870 { 3871 struct nand_chip *chip = mtd_to_nand(mtd); 3872 struct mtd_ecc_stats old_stats; 3873 int ret; 3874 3875 ops->retlen = 0; 3876 3877 if (ops->mode != MTD_OPS_PLACE_OOB && 3878 ops->mode != MTD_OPS_AUTO_OOB && 3879 ops->mode != MTD_OPS_RAW) 3880 return -ENOTSUPP; 3881 3882 nand_get_device(chip); 3883 3884 old_stats = mtd->ecc_stats; 3885 3886 if (!ops->datbuf) 3887 ret = nand_do_read_oob(chip, from, ops); 3888 else 3889 ret = nand_do_read_ops(chip, from, ops); 3890 3891 if (ops->stats) { 3892 ops->stats->uncorrectable_errors += 3893 mtd->ecc_stats.failed - old_stats.failed; 3894 ops->stats->corrected_bitflips += 3895 mtd->ecc_stats.corrected - old_stats.corrected; 3896 } 3897 3898 nand_release_device(chip); 3899 return ret; 3900 } 3901 3902 /** 3903 * nand_write_page_raw_notsupp - dummy raw page write function 3904 * @chip: nand chip info structure 3905 * @buf: data buffer 3906 * @oob_required: must write chip->oob_poi to OOB 3907 * @page: page number to write 3908 * 3909 * Returns -ENOTSUPP unconditionally. 3910 */ 3911 int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf, 3912 int oob_required, int page) 3913 { 3914 return -ENOTSUPP; 3915 } 3916 3917 /** 3918 * nand_write_page_raw - [INTERN] raw page write function 3919 * @chip: nand chip info structure 3920 * @buf: data buffer 3921 * @oob_required: must write chip->oob_poi to OOB 3922 * @page: page number to write 3923 * 3924 * Not for syndrome calculating ECC controllers, which use a special oob layout. 3925 */ 3926 int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf, 3927 int oob_required, int page) 3928 { 3929 struct mtd_info *mtd = nand_to_mtd(chip); 3930 int ret; 3931 3932 ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize); 3933 if (ret) 3934 return ret; 3935 3936 if (oob_required) { 3937 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, 3938 false); 3939 if (ret) 3940 return ret; 3941 } 3942 3943 return nand_prog_page_end_op(chip); 3944 } 3945 EXPORT_SYMBOL(nand_write_page_raw); 3946 3947 /** 3948 * nand_monolithic_write_page_raw - Monolithic page write in raw mode 3949 * @chip: NAND chip info structure 3950 * @buf: data buffer to write 3951 * @oob_required: must write chip->oob_poi to OOB 3952 * @page: page number to write 3953 * 3954 * This is a raw page write, ie. without any error detection/correction. 3955 * Monolithic means we are requesting all the relevant data (main plus 3956 * eventually OOB) to be sent over the bus and effectively programmed 3957 * into the NAND chip arrays in a single operation. This is an 3958 * alternative to nand_write_page_raw(), which first sends the main 3959 * data, then eventually send the OOB data by latching more data 3960 * cycles on the NAND bus, and finally sends the program command to 3961 * synchronyze the NAND chip cache. 3962 */ 3963 int nand_monolithic_write_page_raw(struct nand_chip *chip, const u8 *buf, 3964 int oob_required, int page) 3965 { 3966 struct mtd_info *mtd = nand_to_mtd(chip); 3967 unsigned int size = mtd->writesize; 3968 u8 *write_buf = (u8 *)buf; 3969 3970 if (oob_required) { 3971 size += mtd->oobsize; 3972 3973 if (buf != chip->data_buf) { 3974 write_buf = nand_get_data_buf(chip); 3975 memcpy(write_buf, buf, mtd->writesize); 3976 } 3977 } 3978 3979 return nand_prog_page_op(chip, page, 0, write_buf, size); 3980 } 3981 EXPORT_SYMBOL(nand_monolithic_write_page_raw); 3982 3983 /** 3984 * nand_write_page_raw_syndrome - [INTERN] raw page write function 3985 * @chip: nand chip info structure 3986 * @buf: data buffer 3987 * @oob_required: must write chip->oob_poi to OOB 3988 * @page: page number to write 3989 * 3990 * We need a special oob layout and handling even when ECC isn't checked. 3991 */ 3992 static int nand_write_page_raw_syndrome(struct nand_chip *chip, 3993 const uint8_t *buf, int oob_required, 3994 int page) 3995 { 3996 struct mtd_info *mtd = nand_to_mtd(chip); 3997 int eccsize = chip->ecc.size; 3998 int eccbytes = chip->ecc.bytes; 3999 uint8_t *oob = chip->oob_poi; 4000 int steps, size, ret; 4001 4002 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0); 4003 if (ret) 4004 return ret; 4005 4006 for (steps = chip->ecc.steps; steps > 0; steps--) { 4007 ret = nand_write_data_op(chip, buf, eccsize, false); 4008 if (ret) 4009 return ret; 4010 4011 buf += eccsize; 4012 4013 if (chip->ecc.prepad) { 4014 ret = nand_write_data_op(chip, oob, chip->ecc.prepad, 4015 false); 4016 if (ret) 4017 return ret; 4018 4019 oob += chip->ecc.prepad; 4020 } 4021 4022 ret = nand_write_data_op(chip, oob, eccbytes, false); 4023 if (ret) 4024 return ret; 4025 4026 oob += eccbytes; 4027 4028 if (chip->ecc.postpad) { 4029 ret = nand_write_data_op(chip, oob, chip->ecc.postpad, 4030 false); 4031 if (ret) 4032 return ret; 4033 4034 oob += chip->ecc.postpad; 4035 } 4036 } 4037 4038 size = mtd->oobsize - (oob - chip->oob_poi); 4039 if (size) { 4040 ret = nand_write_data_op(chip, oob, size, false); 4041 if (ret) 4042 return ret; 4043 } 4044 4045 return nand_prog_page_end_op(chip); 4046 } 4047 /** 4048 * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function 4049 * @chip: nand chip info structure 4050 * @buf: data buffer 4051 * @oob_required: must write chip->oob_poi to OOB 4052 * @page: page number to write 4053 */ 4054 static int nand_write_page_swecc(struct nand_chip *chip, const uint8_t *buf, 4055 int oob_required, int page) 4056 { 4057 struct mtd_info *mtd = nand_to_mtd(chip); 4058 int i, eccsize = chip->ecc.size, ret; 4059 int eccbytes = chip->ecc.bytes; 4060 int eccsteps = chip->ecc.steps; 4061 uint8_t *ecc_calc = chip->ecc.calc_buf; 4062 const uint8_t *p = buf; 4063 4064 /* Software ECC calculation */ 4065 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) 4066 chip->ecc.calculate(chip, p, &ecc_calc[i]); 4067 4068 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, 4069 chip->ecc.total); 4070 if (ret) 4071 return ret; 4072 4073 return chip->ecc.write_page_raw(chip, buf, 1, page); 4074 } 4075 4076 /** 4077 * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function 4078 * @chip: nand chip info structure 4079 * @buf: data buffer 4080 * @oob_required: must write chip->oob_poi to OOB 4081 * @page: page number to write 4082 */ 4083 static int nand_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf, 4084 int oob_required, int page) 4085 { 4086 struct mtd_info *mtd = nand_to_mtd(chip); 4087 int i, eccsize = chip->ecc.size, ret; 4088 int eccbytes = chip->ecc.bytes; 4089 int eccsteps = chip->ecc.steps; 4090 uint8_t *ecc_calc = chip->ecc.calc_buf; 4091 const uint8_t *p = buf; 4092 4093 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0); 4094 if (ret) 4095 return ret; 4096 4097 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 4098 chip->ecc.hwctl(chip, NAND_ECC_WRITE); 4099 4100 ret = nand_write_data_op(chip, p, eccsize, false); 4101 if (ret) 4102 return ret; 4103 4104 chip->ecc.calculate(chip, p, &ecc_calc[i]); 4105 } 4106 4107 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, 4108 chip->ecc.total); 4109 if (ret) 4110 return ret; 4111 4112 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false); 4113 if (ret) 4114 return ret; 4115 4116 return nand_prog_page_end_op(chip); 4117 } 4118 4119 4120 /** 4121 * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write 4122 * @chip: nand chip info structure 4123 * @offset: column address of subpage within the page 4124 * @data_len: data length 4125 * @buf: data buffer 4126 * @oob_required: must write chip->oob_poi to OOB 4127 * @page: page number to write 4128 */ 4129 static int nand_write_subpage_hwecc(struct nand_chip *chip, uint32_t offset, 4130 uint32_t data_len, const uint8_t *buf, 4131 int oob_required, int page) 4132 { 4133 struct mtd_info *mtd = nand_to_mtd(chip); 4134 uint8_t *oob_buf = chip->oob_poi; 4135 uint8_t *ecc_calc = chip->ecc.calc_buf; 4136 int ecc_size = chip->ecc.size; 4137 int ecc_bytes = chip->ecc.bytes; 4138 int ecc_steps = chip->ecc.steps; 4139 uint32_t start_step = offset / ecc_size; 4140 uint32_t end_step = (offset + data_len - 1) / ecc_size; 4141 int oob_bytes = mtd->oobsize / ecc_steps; 4142 int step, ret; 4143 4144 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0); 4145 if (ret) 4146 return ret; 4147 4148 for (step = 0; step < ecc_steps; step++) { 4149 /* configure controller for WRITE access */ 4150 chip->ecc.hwctl(chip, NAND_ECC_WRITE); 4151 4152 /* write data (untouched subpages already masked by 0xFF) */ 4153 ret = nand_write_data_op(chip, buf, ecc_size, false); 4154 if (ret) 4155 return ret; 4156 4157 /* mask ECC of un-touched subpages by padding 0xFF */ 4158 if ((step < start_step) || (step > end_step)) 4159 memset(ecc_calc, 0xff, ecc_bytes); 4160 else 4161 chip->ecc.calculate(chip, buf, ecc_calc); 4162 4163 /* mask OOB of un-touched subpages by padding 0xFF */ 4164 /* if oob_required, preserve OOB metadata of written subpage */ 4165 if (!oob_required || (step < start_step) || (step > end_step)) 4166 memset(oob_buf, 0xff, oob_bytes); 4167 4168 buf += ecc_size; 4169 ecc_calc += ecc_bytes; 4170 oob_buf += oob_bytes; 4171 } 4172 4173 /* copy calculated ECC for whole page to chip->buffer->oob */ 4174 /* this include masked-value(0xFF) for unwritten subpages */ 4175 ecc_calc = chip->ecc.calc_buf; 4176 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, 4177 chip->ecc.total); 4178 if (ret) 4179 return ret; 4180 4181 /* write OOB buffer to NAND device */ 4182 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false); 4183 if (ret) 4184 return ret; 4185 4186 return nand_prog_page_end_op(chip); 4187 } 4188 4189 4190 /** 4191 * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write 4192 * @chip: nand chip info structure 4193 * @buf: data buffer 4194 * @oob_required: must write chip->oob_poi to OOB 4195 * @page: page number to write 4196 * 4197 * The hw generator calculates the error syndrome automatically. Therefore we 4198 * need a special oob layout and handling. 4199 */ 4200 static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf, 4201 int oob_required, int page) 4202 { 4203 struct mtd_info *mtd = nand_to_mtd(chip); 4204 int i, eccsize = chip->ecc.size; 4205 int eccbytes = chip->ecc.bytes; 4206 int eccsteps = chip->ecc.steps; 4207 const uint8_t *p = buf; 4208 uint8_t *oob = chip->oob_poi; 4209 int ret; 4210 4211 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0); 4212 if (ret) 4213 return ret; 4214 4215 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 4216 chip->ecc.hwctl(chip, NAND_ECC_WRITE); 4217 4218 ret = nand_write_data_op(chip, p, eccsize, false); 4219 if (ret) 4220 return ret; 4221 4222 if (chip->ecc.prepad) { 4223 ret = nand_write_data_op(chip, oob, chip->ecc.prepad, 4224 false); 4225 if (ret) 4226 return ret; 4227 4228 oob += chip->ecc.prepad; 4229 } 4230 4231 chip->ecc.calculate(chip, p, oob); 4232 4233 ret = nand_write_data_op(chip, oob, eccbytes, false); 4234 if (ret) 4235 return ret; 4236 4237 oob += eccbytes; 4238 4239 if (chip->ecc.postpad) { 4240 ret = nand_write_data_op(chip, oob, chip->ecc.postpad, 4241 false); 4242 if (ret) 4243 return ret; 4244 4245 oob += chip->ecc.postpad; 4246 } 4247 } 4248 4249 /* Calculate remaining oob bytes */ 4250 i = mtd->oobsize - (oob - chip->oob_poi); 4251 if (i) { 4252 ret = nand_write_data_op(chip, oob, i, false); 4253 if (ret) 4254 return ret; 4255 } 4256 4257 return nand_prog_page_end_op(chip); 4258 } 4259 4260 /** 4261 * nand_write_page - write one page 4262 * @chip: NAND chip descriptor 4263 * @offset: address offset within the page 4264 * @data_len: length of actual data to be written 4265 * @buf: the data to write 4266 * @oob_required: must write chip->oob_poi to OOB 4267 * @page: page number to write 4268 * @raw: use _raw version of write_page 4269 */ 4270 static int nand_write_page(struct nand_chip *chip, uint32_t offset, 4271 int data_len, const uint8_t *buf, int oob_required, 4272 int page, int raw) 4273 { 4274 struct mtd_info *mtd = nand_to_mtd(chip); 4275 int status, subpage; 4276 4277 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && 4278 chip->ecc.write_subpage) 4279 subpage = offset || (data_len < mtd->writesize); 4280 else 4281 subpage = 0; 4282 4283 if (unlikely(raw)) 4284 status = chip->ecc.write_page_raw(chip, buf, oob_required, 4285 page); 4286 else if (subpage) 4287 status = chip->ecc.write_subpage(chip, offset, data_len, buf, 4288 oob_required, page); 4289 else 4290 status = chip->ecc.write_page(chip, buf, oob_required, page); 4291 4292 if (status < 0) 4293 return status; 4294 4295 return 0; 4296 } 4297 4298 #define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0) 4299 4300 /** 4301 * nand_do_write_ops - [INTERN] NAND write with ECC 4302 * @chip: NAND chip object 4303 * @to: offset to write to 4304 * @ops: oob operations description structure 4305 * 4306 * NAND write with ECC. 4307 */ 4308 static int nand_do_write_ops(struct nand_chip *chip, loff_t to, 4309 struct mtd_oob_ops *ops) 4310 { 4311 struct mtd_info *mtd = nand_to_mtd(chip); 4312 int chipnr, realpage, page, column; 4313 uint32_t writelen = ops->len; 4314 4315 uint32_t oobwritelen = ops->ooblen; 4316 uint32_t oobmaxlen = mtd_oobavail(mtd, ops); 4317 4318 uint8_t *oob = ops->oobbuf; 4319 uint8_t *buf = ops->datbuf; 4320 int ret; 4321 int oob_required = oob ? 1 : 0; 4322 4323 ops->retlen = 0; 4324 if (!writelen) 4325 return 0; 4326 4327 /* Reject writes, which are not page aligned */ 4328 if (NOTALIGNED(to) || NOTALIGNED(ops->len)) { 4329 pr_notice("%s: attempt to write non page aligned data\n", 4330 __func__); 4331 return -EINVAL; 4332 } 4333 4334 /* Check if the region is secured */ 4335 if (nand_region_is_secured(chip, to, writelen)) 4336 return -EIO; 4337 4338 column = to & (mtd->writesize - 1); 4339 4340 chipnr = (int)(to >> chip->chip_shift); 4341 nand_select_target(chip, chipnr); 4342 4343 /* Check, if it is write protected */ 4344 if (nand_check_wp(chip)) { 4345 ret = -EIO; 4346 goto err_out; 4347 } 4348 4349 realpage = (int)(to >> chip->page_shift); 4350 page = realpage & chip->pagemask; 4351 4352 /* Invalidate the page cache, when we write to the cached page */ 4353 if (to <= ((loff_t)chip->pagecache.page << chip->page_shift) && 4354 ((loff_t)chip->pagecache.page << chip->page_shift) < (to + ops->len)) 4355 chip->pagecache.page = -1; 4356 4357 /* Don't allow multipage oob writes with offset */ 4358 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) { 4359 ret = -EINVAL; 4360 goto err_out; 4361 } 4362 4363 while (1) { 4364 int bytes = mtd->writesize; 4365 uint8_t *wbuf = buf; 4366 int use_bounce_buf; 4367 int part_pagewr = (column || writelen < mtd->writesize); 4368 4369 if (part_pagewr) 4370 use_bounce_buf = 1; 4371 else if (chip->options & NAND_USES_DMA) 4372 use_bounce_buf = !virt_addr_valid(buf) || 4373 !IS_ALIGNED((unsigned long)buf, 4374 chip->buf_align); 4375 else 4376 use_bounce_buf = 0; 4377 4378 /* 4379 * Copy the data from the initial buffer when doing partial page 4380 * writes or when a bounce buffer is required. 4381 */ 4382 if (use_bounce_buf) { 4383 pr_debug("%s: using write bounce buffer for buf@%p\n", 4384 __func__, buf); 4385 if (part_pagewr) 4386 bytes = min_t(int, bytes - column, writelen); 4387 wbuf = nand_get_data_buf(chip); 4388 memset(wbuf, 0xff, mtd->writesize); 4389 memcpy(&wbuf[column], buf, bytes); 4390 } 4391 4392 if (unlikely(oob)) { 4393 size_t len = min(oobwritelen, oobmaxlen); 4394 oob = nand_fill_oob(chip, oob, len, ops); 4395 oobwritelen -= len; 4396 } else { 4397 /* We still need to erase leftover OOB data */ 4398 memset(chip->oob_poi, 0xff, mtd->oobsize); 4399 } 4400 4401 ret = nand_write_page(chip, column, bytes, wbuf, 4402 oob_required, page, 4403 (ops->mode == MTD_OPS_RAW)); 4404 if (ret) 4405 break; 4406 4407 writelen -= bytes; 4408 if (!writelen) 4409 break; 4410 4411 column = 0; 4412 buf += bytes; 4413 realpage++; 4414 4415 page = realpage & chip->pagemask; 4416 /* Check, if we cross a chip boundary */ 4417 if (!page) { 4418 chipnr++; 4419 nand_deselect_target(chip); 4420 nand_select_target(chip, chipnr); 4421 } 4422 } 4423 4424 ops->retlen = ops->len - writelen; 4425 if (unlikely(oob)) 4426 ops->oobretlen = ops->ooblen; 4427 4428 err_out: 4429 nand_deselect_target(chip); 4430 return ret; 4431 } 4432 4433 /** 4434 * panic_nand_write - [MTD Interface] NAND write with ECC 4435 * @mtd: MTD device structure 4436 * @to: offset to write to 4437 * @len: number of bytes to write 4438 * @retlen: pointer to variable to store the number of written bytes 4439 * @buf: the data to write 4440 * 4441 * NAND write with ECC. Used when performing writes in interrupt context, this 4442 * may for example be called by mtdoops when writing an oops while in panic. 4443 */ 4444 static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len, 4445 size_t *retlen, const uint8_t *buf) 4446 { 4447 struct nand_chip *chip = mtd_to_nand(mtd); 4448 int chipnr = (int)(to >> chip->chip_shift); 4449 struct mtd_oob_ops ops; 4450 int ret; 4451 4452 nand_select_target(chip, chipnr); 4453 4454 /* Wait for the device to get ready */ 4455 panic_nand_wait(chip, 400); 4456 4457 memset(&ops, 0, sizeof(ops)); 4458 ops.len = len; 4459 ops.datbuf = (uint8_t *)buf; 4460 ops.mode = MTD_OPS_PLACE_OOB; 4461 4462 ret = nand_do_write_ops(chip, to, &ops); 4463 4464 *retlen = ops.retlen; 4465 return ret; 4466 } 4467 4468 /** 4469 * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band 4470 * @mtd: MTD device structure 4471 * @to: offset to write to 4472 * @ops: oob operation description structure 4473 */ 4474 static int nand_write_oob(struct mtd_info *mtd, loff_t to, 4475 struct mtd_oob_ops *ops) 4476 { 4477 struct nand_chip *chip = mtd_to_nand(mtd); 4478 int ret = 0; 4479 4480 ops->retlen = 0; 4481 4482 nand_get_device(chip); 4483 4484 switch (ops->mode) { 4485 case MTD_OPS_PLACE_OOB: 4486 case MTD_OPS_AUTO_OOB: 4487 case MTD_OPS_RAW: 4488 break; 4489 4490 default: 4491 goto out; 4492 } 4493 4494 if (!ops->datbuf) 4495 ret = nand_do_write_oob(chip, to, ops); 4496 else 4497 ret = nand_do_write_ops(chip, to, ops); 4498 4499 out: 4500 nand_release_device(chip); 4501 return ret; 4502 } 4503 4504 /** 4505 * nand_erase - [MTD Interface] erase block(s) 4506 * @mtd: MTD device structure 4507 * @instr: erase instruction 4508 * 4509 * Erase one ore more blocks. 4510 */ 4511 static int nand_erase(struct mtd_info *mtd, struct erase_info *instr) 4512 { 4513 return nand_erase_nand(mtd_to_nand(mtd), instr, 0); 4514 } 4515 4516 /** 4517 * nand_erase_nand - [INTERN] erase block(s) 4518 * @chip: NAND chip object 4519 * @instr: erase instruction 4520 * @allowbbt: allow erasing the bbt area 4521 * 4522 * Erase one ore more blocks. 4523 */ 4524 int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr, 4525 int allowbbt) 4526 { 4527 int page, pages_per_block, ret, chipnr; 4528 loff_t len; 4529 4530 pr_debug("%s: start = 0x%012llx, len = %llu\n", 4531 __func__, (unsigned long long)instr->addr, 4532 (unsigned long long)instr->len); 4533 4534 if (check_offs_len(chip, instr->addr, instr->len)) 4535 return -EINVAL; 4536 4537 /* Check if the region is secured */ 4538 if (nand_region_is_secured(chip, instr->addr, instr->len)) 4539 return -EIO; 4540 4541 /* Grab the lock and see if the device is available */ 4542 nand_get_device(chip); 4543 4544 /* Shift to get first page */ 4545 page = (int)(instr->addr >> chip->page_shift); 4546 chipnr = (int)(instr->addr >> chip->chip_shift); 4547 4548 /* Calculate pages in each block */ 4549 pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift); 4550 4551 /* Select the NAND device */ 4552 nand_select_target(chip, chipnr); 4553 4554 /* Check, if it is write protected */ 4555 if (nand_check_wp(chip)) { 4556 pr_debug("%s: device is write protected!\n", 4557 __func__); 4558 ret = -EIO; 4559 goto erase_exit; 4560 } 4561 4562 /* Loop through the pages */ 4563 len = instr->len; 4564 4565 while (len) { 4566 loff_t ofs = (loff_t)page << chip->page_shift; 4567 4568 /* Check if we have a bad block, we do not erase bad blocks! */ 4569 if (nand_block_checkbad(chip, ((loff_t) page) << 4570 chip->page_shift, allowbbt)) { 4571 pr_warn("%s: attempt to erase a bad block at 0x%08llx\n", 4572 __func__, (unsigned long long)ofs); 4573 ret = -EIO; 4574 goto erase_exit; 4575 } 4576 4577 /* 4578 * Invalidate the page cache, if we erase the block which 4579 * contains the current cached page. 4580 */ 4581 if (page <= chip->pagecache.page && chip->pagecache.page < 4582 (page + pages_per_block)) 4583 chip->pagecache.page = -1; 4584 4585 ret = nand_erase_op(chip, (page & chip->pagemask) >> 4586 (chip->phys_erase_shift - chip->page_shift)); 4587 if (ret) { 4588 pr_debug("%s: failed erase, page 0x%08x\n", 4589 __func__, page); 4590 instr->fail_addr = ofs; 4591 goto erase_exit; 4592 } 4593 4594 /* Increment page address and decrement length */ 4595 len -= (1ULL << chip->phys_erase_shift); 4596 page += pages_per_block; 4597 4598 /* Check, if we cross a chip boundary */ 4599 if (len && !(page & chip->pagemask)) { 4600 chipnr++; 4601 nand_deselect_target(chip); 4602 nand_select_target(chip, chipnr); 4603 } 4604 } 4605 4606 ret = 0; 4607 erase_exit: 4608 4609 /* Deselect and wake up anyone waiting on the device */ 4610 nand_deselect_target(chip); 4611 nand_release_device(chip); 4612 4613 /* Return more or less happy */ 4614 return ret; 4615 } 4616 4617 /** 4618 * nand_sync - [MTD Interface] sync 4619 * @mtd: MTD device structure 4620 * 4621 * Sync is actually a wait for chip ready function. 4622 */ 4623 static void nand_sync(struct mtd_info *mtd) 4624 { 4625 struct nand_chip *chip = mtd_to_nand(mtd); 4626 4627 pr_debug("%s: called\n", __func__); 4628 4629 /* Grab the lock and see if the device is available */ 4630 nand_get_device(chip); 4631 /* Release it and go back */ 4632 nand_release_device(chip); 4633 } 4634 4635 /** 4636 * nand_block_isbad - [MTD Interface] Check if block at offset is bad 4637 * @mtd: MTD device structure 4638 * @offs: offset relative to mtd start 4639 */ 4640 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs) 4641 { 4642 struct nand_chip *chip = mtd_to_nand(mtd); 4643 int chipnr = (int)(offs >> chip->chip_shift); 4644 int ret; 4645 4646 /* Select the NAND device */ 4647 nand_get_device(chip); 4648 4649 nand_select_target(chip, chipnr); 4650 4651 ret = nand_block_checkbad(chip, offs, 0); 4652 4653 nand_deselect_target(chip); 4654 nand_release_device(chip); 4655 4656 return ret; 4657 } 4658 4659 /** 4660 * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad 4661 * @mtd: MTD device structure 4662 * @ofs: offset relative to mtd start 4663 */ 4664 static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs) 4665 { 4666 int ret; 4667 4668 ret = nand_block_isbad(mtd, ofs); 4669 if (ret) { 4670 /* If it was bad already, return success and do nothing */ 4671 if (ret > 0) 4672 return 0; 4673 return ret; 4674 } 4675 4676 return nand_block_markbad_lowlevel(mtd_to_nand(mtd), ofs); 4677 } 4678 4679 /** 4680 * nand_suspend - [MTD Interface] Suspend the NAND flash 4681 * @mtd: MTD device structure 4682 * 4683 * Returns 0 for success or negative error code otherwise. 4684 */ 4685 static int nand_suspend(struct mtd_info *mtd) 4686 { 4687 struct nand_chip *chip = mtd_to_nand(mtd); 4688 int ret = 0; 4689 4690 mutex_lock(&chip->lock); 4691 if (chip->ops.suspend) 4692 ret = chip->ops.suspend(chip); 4693 if (!ret) 4694 chip->suspended = 1; 4695 mutex_unlock(&chip->lock); 4696 4697 return ret; 4698 } 4699 4700 /** 4701 * nand_resume - [MTD Interface] Resume the NAND flash 4702 * @mtd: MTD device structure 4703 */ 4704 static void nand_resume(struct mtd_info *mtd) 4705 { 4706 struct nand_chip *chip = mtd_to_nand(mtd); 4707 4708 scoped_guard(mutex, &chip->lock) { 4709 if (chip->suspended) { 4710 if (chip->ops.resume) 4711 chip->ops.resume(chip); 4712 chip->suspended = 0; 4713 } else { 4714 pr_err("%s called for a chip which is not in suspended state\n", 4715 __func__); 4716 } 4717 } 4718 4719 wake_up_all(&chip->resume_wq); 4720 } 4721 4722 /** 4723 * nand_shutdown - [MTD Interface] Finish the current NAND operation and 4724 * prevent further operations 4725 * @mtd: MTD device structure 4726 */ 4727 static void nand_shutdown(struct mtd_info *mtd) 4728 { 4729 nand_suspend(mtd); 4730 } 4731 4732 /** 4733 * nand_lock - [MTD Interface] Lock the NAND flash 4734 * @mtd: MTD device structure 4735 * @ofs: offset byte address 4736 * @len: number of bytes to lock (must be a multiple of block/page size) 4737 */ 4738 static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 4739 { 4740 struct nand_chip *chip = mtd_to_nand(mtd); 4741 int ret; 4742 4743 if (!chip->ops.lock_area) 4744 return -ENOTSUPP; 4745 4746 nand_get_device(chip); 4747 ret = chip->ops.lock_area(chip, ofs, len); 4748 nand_release_device(chip); 4749 4750 return ret; 4751 } 4752 4753 /** 4754 * nand_unlock - [MTD Interface] Unlock the NAND flash 4755 * @mtd: MTD device structure 4756 * @ofs: offset byte address 4757 * @len: number of bytes to unlock (must be a multiple of block/page size) 4758 */ 4759 static int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 4760 { 4761 struct nand_chip *chip = mtd_to_nand(mtd); 4762 int ret; 4763 4764 if (!chip->ops.unlock_area) 4765 return -ENOTSUPP; 4766 4767 nand_get_device(chip); 4768 ret = chip->ops.unlock_area(chip, ofs, len); 4769 nand_release_device(chip); 4770 4771 return ret; 4772 } 4773 4774 /* Set default functions */ 4775 static void nand_set_defaults(struct nand_chip *chip) 4776 { 4777 /* If no controller is provided, use the dummy, legacy one. */ 4778 if (!chip->controller) { 4779 chip->controller = &chip->legacy.dummy_controller; 4780 nand_controller_init(chip->controller); 4781 } 4782 4783 nand_legacy_set_defaults(chip); 4784 4785 if (!chip->buf_align) 4786 chip->buf_align = 1; 4787 } 4788 4789 /* Sanitize ONFI strings so we can safely print them */ 4790 void sanitize_string(uint8_t *s, size_t len) 4791 { 4792 ssize_t i; 4793 4794 /* Null terminate */ 4795 s[len - 1] = 0; 4796 4797 /* Remove non printable chars */ 4798 for (i = 0; i < len - 1; i++) { 4799 if (s[i] < ' ' || s[i] > 127) 4800 s[i] = '?'; 4801 } 4802 4803 /* Remove trailing spaces */ 4804 strim(s); 4805 } 4806 4807 /* 4808 * nand_id_has_period - Check if an ID string has a given wraparound period 4809 * @id_data: the ID string 4810 * @arrlen: the length of the @id_data array 4811 * @period: the period of repitition 4812 * 4813 * Check if an ID string is repeated within a given sequence of bytes at 4814 * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a 4815 * period of 3). This is a helper function for nand_id_len(). Returns non-zero 4816 * if the repetition has a period of @period; otherwise, returns zero. 4817 */ 4818 static int nand_id_has_period(u8 *id_data, int arrlen, int period) 4819 { 4820 int i, j; 4821 for (i = 0; i < period; i++) 4822 for (j = i + period; j < arrlen; j += period) 4823 if (id_data[i] != id_data[j]) 4824 return 0; 4825 return 1; 4826 } 4827 4828 /* 4829 * nand_id_len - Get the length of an ID string returned by CMD_READID 4830 * @id_data: the ID string 4831 * @arrlen: the length of the @id_data array 4832 4833 * Returns the length of the ID string, according to known wraparound/trailing 4834 * zero patterns. If no pattern exists, returns the length of the array. 4835 */ 4836 static int nand_id_len(u8 *id_data, int arrlen) 4837 { 4838 int last_nonzero, period; 4839 4840 /* Find last non-zero byte */ 4841 for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--) 4842 if (id_data[last_nonzero]) 4843 break; 4844 4845 /* All zeros */ 4846 if (last_nonzero < 0) 4847 return 0; 4848 4849 /* Calculate wraparound period */ 4850 for (period = 1; period < arrlen; period++) 4851 if (nand_id_has_period(id_data, arrlen, period)) 4852 break; 4853 4854 /* There's a repeated pattern */ 4855 if (period < arrlen) 4856 return period; 4857 4858 /* There are trailing zeros */ 4859 if (last_nonzero < arrlen - 1) 4860 return last_nonzero + 1; 4861 4862 /* No pattern detected */ 4863 return arrlen; 4864 } 4865 4866 /* Extract the bits of per cell from the 3rd byte of the extended ID */ 4867 static int nand_get_bits_per_cell(u8 cellinfo) 4868 { 4869 int bits; 4870 4871 bits = cellinfo & NAND_CI_CELLTYPE_MSK; 4872 bits >>= NAND_CI_CELLTYPE_SHIFT; 4873 return bits + 1; 4874 } 4875 4876 /* 4877 * Many new NAND share similar device ID codes, which represent the size of the 4878 * chip. The rest of the parameters must be decoded according to generic or 4879 * manufacturer-specific "extended ID" decoding patterns. 4880 */ 4881 void nand_decode_ext_id(struct nand_chip *chip) 4882 { 4883 struct nand_memory_organization *memorg; 4884 struct mtd_info *mtd = nand_to_mtd(chip); 4885 int extid; 4886 u8 *id_data = chip->id.data; 4887 4888 memorg = nanddev_get_memorg(&chip->base); 4889 4890 /* The 3rd id byte holds MLC / multichip data */ 4891 memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]); 4892 /* The 4th id byte is the important one */ 4893 extid = id_data[3]; 4894 4895 /* Calc pagesize */ 4896 memorg->pagesize = 1024 << (extid & 0x03); 4897 mtd->writesize = memorg->pagesize; 4898 extid >>= 2; 4899 /* Calc oobsize */ 4900 memorg->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9); 4901 mtd->oobsize = memorg->oobsize; 4902 extid >>= 2; 4903 /* Calc blocksize. Blocksize is multiples of 64KiB */ 4904 memorg->pages_per_eraseblock = ((64 * 1024) << (extid & 0x03)) / 4905 memorg->pagesize; 4906 mtd->erasesize = (64 * 1024) << (extid & 0x03); 4907 extid >>= 2; 4908 /* Get buswidth information */ 4909 if (extid & 0x1) 4910 chip->options |= NAND_BUSWIDTH_16; 4911 } 4912 EXPORT_SYMBOL_GPL(nand_decode_ext_id); 4913 4914 /* 4915 * Old devices have chip data hardcoded in the device ID table. nand_decode_id 4916 * decodes a matching ID table entry and assigns the MTD size parameters for 4917 * the chip. 4918 */ 4919 static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type) 4920 { 4921 struct mtd_info *mtd = nand_to_mtd(chip); 4922 struct nand_memory_organization *memorg; 4923 4924 memorg = nanddev_get_memorg(&chip->base); 4925 4926 memorg->pages_per_eraseblock = type->erasesize / type->pagesize; 4927 mtd->erasesize = type->erasesize; 4928 memorg->pagesize = type->pagesize; 4929 mtd->writesize = memorg->pagesize; 4930 memorg->oobsize = memorg->pagesize / 32; 4931 mtd->oobsize = memorg->oobsize; 4932 4933 /* All legacy ID NAND are small-page, SLC */ 4934 memorg->bits_per_cell = 1; 4935 } 4936 4937 /* 4938 * Set the bad block marker/indicator (BBM/BBI) patterns according to some 4939 * heuristic patterns using various detected parameters (e.g., manufacturer, 4940 * page size, cell-type information). 4941 */ 4942 static void nand_decode_bbm_options(struct nand_chip *chip) 4943 { 4944 struct mtd_info *mtd = nand_to_mtd(chip); 4945 4946 /* Set the bad block position */ 4947 if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16)) 4948 chip->badblockpos = NAND_BBM_POS_LARGE; 4949 else 4950 chip->badblockpos = NAND_BBM_POS_SMALL; 4951 } 4952 4953 static inline bool is_full_id_nand(struct nand_flash_dev *type) 4954 { 4955 return type->id_len; 4956 } 4957 4958 static bool find_full_id_nand(struct nand_chip *chip, 4959 struct nand_flash_dev *type) 4960 { 4961 struct nand_device *base = &chip->base; 4962 struct nand_ecc_props requirements; 4963 struct mtd_info *mtd = nand_to_mtd(chip); 4964 struct nand_memory_organization *memorg; 4965 u8 *id_data = chip->id.data; 4966 4967 memorg = nanddev_get_memorg(&chip->base); 4968 4969 if (!strncmp(type->id, id_data, type->id_len)) { 4970 memorg->pagesize = type->pagesize; 4971 mtd->writesize = memorg->pagesize; 4972 memorg->pages_per_eraseblock = type->erasesize / 4973 type->pagesize; 4974 mtd->erasesize = type->erasesize; 4975 memorg->oobsize = type->oobsize; 4976 mtd->oobsize = memorg->oobsize; 4977 4978 memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]); 4979 memorg->eraseblocks_per_lun = 4980 DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20, 4981 memorg->pagesize * 4982 memorg->pages_per_eraseblock); 4983 chip->options |= type->options; 4984 requirements.strength = NAND_ECC_STRENGTH(type); 4985 requirements.step_size = NAND_ECC_STEP(type); 4986 nanddev_set_ecc_requirements(base, &requirements); 4987 4988 chip->parameters.model = kstrdup(type->name, GFP_KERNEL); 4989 if (!chip->parameters.model) 4990 return false; 4991 4992 return true; 4993 } 4994 return false; 4995 } 4996 4997 /* 4998 * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC 4999 * compliant and does not have a full-id or legacy-id entry in the nand_ids 5000 * table. 5001 */ 5002 static void nand_manufacturer_detect(struct nand_chip *chip) 5003 { 5004 /* 5005 * Try manufacturer detection if available and use 5006 * nand_decode_ext_id() otherwise. 5007 */ 5008 if (chip->manufacturer.desc && chip->manufacturer.desc->ops && 5009 chip->manufacturer.desc->ops->detect) { 5010 struct nand_memory_organization *memorg; 5011 5012 memorg = nanddev_get_memorg(&chip->base); 5013 5014 /* The 3rd id byte holds MLC / multichip data */ 5015 memorg->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]); 5016 chip->manufacturer.desc->ops->detect(chip); 5017 } else { 5018 nand_decode_ext_id(chip); 5019 } 5020 } 5021 5022 /* 5023 * Manufacturer initialization. This function is called for all NANDs including 5024 * ONFI and JEDEC compliant ones. 5025 * Manufacturer drivers should put all their specific initialization code in 5026 * their ->init() hook. 5027 */ 5028 static int nand_manufacturer_init(struct nand_chip *chip) 5029 { 5030 if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops || 5031 !chip->manufacturer.desc->ops->init) 5032 return 0; 5033 5034 return chip->manufacturer.desc->ops->init(chip); 5035 } 5036 5037 /* 5038 * Manufacturer cleanup. This function is called for all NANDs including 5039 * ONFI and JEDEC compliant ones. 5040 * Manufacturer drivers should put all their specific cleanup code in their 5041 * ->cleanup() hook. 5042 */ 5043 static void nand_manufacturer_cleanup(struct nand_chip *chip) 5044 { 5045 /* Release manufacturer private data */ 5046 if (chip->manufacturer.desc && chip->manufacturer.desc->ops && 5047 chip->manufacturer.desc->ops->cleanup) 5048 chip->manufacturer.desc->ops->cleanup(chip); 5049 } 5050 5051 static const char * 5052 nand_manufacturer_name(const struct nand_manufacturer_desc *manufacturer_desc) 5053 { 5054 return manufacturer_desc ? manufacturer_desc->name : "Unknown"; 5055 } 5056 5057 static void rawnand_check_data_only_read_support(struct nand_chip *chip) 5058 { 5059 /* Use an arbitrary size for the check */ 5060 if (!nand_read_data_op(chip, NULL, SZ_512, true, true)) 5061 chip->controller->supported_op.data_only_read = 1; 5062 } 5063 5064 static void rawnand_early_check_supported_ops(struct nand_chip *chip) 5065 { 5066 /* The supported_op fields should not be set by individual drivers */ 5067 WARN_ON_ONCE(chip->controller->supported_op.data_only_read); 5068 5069 if (!nand_has_exec_op(chip)) 5070 return; 5071 5072 rawnand_check_data_only_read_support(chip); 5073 } 5074 5075 static void rawnand_check_cont_read_support(struct nand_chip *chip) 5076 { 5077 struct mtd_info *mtd = nand_to_mtd(chip); 5078 5079 if (!chip->parameters.supports_read_cache) 5080 return; 5081 5082 if (chip->read_retries) 5083 return; 5084 5085 if (!nand_lp_exec_cont_read_page_op(chip, 0, 0, NULL, 5086 mtd->writesize, true)) 5087 chip->controller->supported_op.cont_read = 1; 5088 } 5089 5090 static void rawnand_late_check_supported_ops(struct nand_chip *chip) 5091 { 5092 /* The supported_op fields should not be set by individual drivers */ 5093 WARN_ON_ONCE(chip->controller->supported_op.cont_read); 5094 5095 /* 5096 * Too many devices do not support sequential cached reads with on-die 5097 * ECC correction enabled, so in this case refuse to perform the 5098 * automation. 5099 */ 5100 if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE) 5101 return; 5102 5103 if (!nand_has_exec_op(chip)) 5104 return; 5105 5106 /* 5107 * For now, continuous reads can only be used with the core page helpers. 5108 * This can be extended later. 5109 */ 5110 if (!(chip->ecc.read_page == nand_read_page_hwecc || 5111 chip->ecc.read_page == nand_read_page_syndrome || 5112 chip->ecc.read_page == nand_read_page_swecc)) 5113 return; 5114 5115 rawnand_check_cont_read_support(chip); 5116 } 5117 5118 /* 5119 * Get the flash and manufacturer id and lookup if the type is supported. 5120 */ 5121 static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type) 5122 { 5123 const struct nand_manufacturer_desc *manufacturer_desc; 5124 struct mtd_info *mtd = nand_to_mtd(chip); 5125 struct nand_memory_organization *memorg; 5126 int busw, ret; 5127 u8 *id_data = chip->id.data; 5128 u8 maf_id, dev_id; 5129 u64 targetsize; 5130 5131 /* 5132 * Let's start by initializing memorg fields that might be left 5133 * unassigned by the ID-based detection logic. 5134 */ 5135 memorg = nanddev_get_memorg(&chip->base); 5136 memorg->planes_per_lun = 1; 5137 memorg->luns_per_target = 1; 5138 5139 /* 5140 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx) 5141 * after power-up. 5142 */ 5143 ret = nand_reset(chip, 0); 5144 if (ret) 5145 return ret; 5146 5147 /* Select the device */ 5148 nand_select_target(chip, 0); 5149 5150 rawnand_early_check_supported_ops(chip); 5151 5152 /* Send the command for reading device ID */ 5153 ret = nand_readid_op(chip, 0, id_data, 2); 5154 if (ret) 5155 return ret; 5156 5157 /* Read manufacturer and device IDs */ 5158 maf_id = id_data[0]; 5159 dev_id = id_data[1]; 5160 5161 /* 5162 * Try again to make sure, as some systems the bus-hold or other 5163 * interface concerns can cause random data which looks like a 5164 * possibly credible NAND flash to appear. If the two results do 5165 * not match, ignore the device completely. 5166 */ 5167 5168 /* Read entire ID string */ 5169 ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data)); 5170 if (ret) 5171 return ret; 5172 5173 if (id_data[0] != maf_id || id_data[1] != dev_id) { 5174 pr_info("second ID read did not match %02x,%02x against %02x,%02x\n", 5175 maf_id, dev_id, id_data[0], id_data[1]); 5176 return -ENODEV; 5177 } 5178 5179 chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data)); 5180 5181 /* Try to identify manufacturer */ 5182 manufacturer_desc = nand_get_manufacturer_desc(maf_id); 5183 chip->manufacturer.desc = manufacturer_desc; 5184 5185 if (!type) 5186 type = nand_flash_ids; 5187 5188 /* 5189 * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic 5190 * override it. 5191 * This is required to make sure initial NAND bus width set by the 5192 * NAND controller driver is coherent with the real NAND bus width 5193 * (extracted by auto-detection code). 5194 */ 5195 busw = chip->options & NAND_BUSWIDTH_16; 5196 5197 /* 5198 * The flag is only set (never cleared), reset it to its default value 5199 * before starting auto-detection. 5200 */ 5201 chip->options &= ~NAND_BUSWIDTH_16; 5202 5203 for (; type->name != NULL; type++) { 5204 if (is_full_id_nand(type)) { 5205 if (find_full_id_nand(chip, type)) 5206 goto ident_done; 5207 } else if (dev_id == type->dev_id) { 5208 break; 5209 } 5210 } 5211 5212 if (!type->name || !type->pagesize) { 5213 /* Check if the chip is ONFI compliant */ 5214 ret = nand_onfi_detect(chip); 5215 if (ret < 0) 5216 return ret; 5217 else if (ret) 5218 goto ident_done; 5219 5220 /* Check if the chip is JEDEC compliant */ 5221 ret = nand_jedec_detect(chip); 5222 if (ret < 0) 5223 return ret; 5224 else if (ret) 5225 goto ident_done; 5226 } 5227 5228 if (!type->name) 5229 return -ENODEV; 5230 5231 chip->parameters.model = kstrdup(type->name, GFP_KERNEL); 5232 if (!chip->parameters.model) 5233 return -ENOMEM; 5234 5235 if (!type->pagesize) 5236 nand_manufacturer_detect(chip); 5237 else 5238 nand_decode_id(chip, type); 5239 5240 /* Get chip options */ 5241 chip->options |= type->options; 5242 5243 memorg->eraseblocks_per_lun = 5244 DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20, 5245 memorg->pagesize * 5246 memorg->pages_per_eraseblock); 5247 5248 ident_done: 5249 if (!mtd->name) 5250 mtd->name = chip->parameters.model; 5251 5252 if (chip->options & NAND_BUSWIDTH_AUTO) { 5253 WARN_ON(busw & NAND_BUSWIDTH_16); 5254 nand_set_defaults(chip); 5255 } else if (busw != (chip->options & NAND_BUSWIDTH_16)) { 5256 /* 5257 * Check, if buswidth is correct. Hardware drivers should set 5258 * chip correct! 5259 */ 5260 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n", 5261 maf_id, dev_id); 5262 pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc), 5263 mtd->name); 5264 pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8, 5265 (chip->options & NAND_BUSWIDTH_16) ? 16 : 8); 5266 ret = -EINVAL; 5267 5268 goto free_detect_allocation; 5269 } 5270 5271 nand_decode_bbm_options(chip); 5272 5273 /* Calculate the address shift from the page size */ 5274 chip->page_shift = ffs(mtd->writesize) - 1; 5275 /* Convert chipsize to number of pages per chip -1 */ 5276 targetsize = nanddev_target_size(&chip->base); 5277 chip->pagemask = (targetsize >> chip->page_shift) - 1; 5278 5279 chip->bbt_erase_shift = chip->phys_erase_shift = 5280 ffs(mtd->erasesize) - 1; 5281 if (targetsize & 0xffffffff) 5282 chip->chip_shift = ffs((unsigned)targetsize) - 1; 5283 else { 5284 chip->chip_shift = ffs((unsigned)(targetsize >> 32)); 5285 chip->chip_shift += 32 - 1; 5286 } 5287 5288 if (chip->chip_shift - chip->page_shift > 16) 5289 chip->options |= NAND_ROW_ADDR_3; 5290 5291 chip->badblockbits = 8; 5292 5293 nand_legacy_adjust_cmdfunc(chip); 5294 5295 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n", 5296 maf_id, dev_id); 5297 pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc), 5298 chip->parameters.model); 5299 pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n", 5300 (int)(targetsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC", 5301 mtd->erasesize >> 10, mtd->writesize, mtd->oobsize); 5302 return 0; 5303 5304 free_detect_allocation: 5305 kfree(chip->parameters.model); 5306 5307 return ret; 5308 } 5309 5310 static enum nand_ecc_engine_type 5311 of_get_rawnand_ecc_engine_type_legacy(struct device_node *np) 5312 { 5313 enum nand_ecc_legacy_mode { 5314 NAND_ECC_INVALID, 5315 NAND_ECC_NONE, 5316 NAND_ECC_SOFT, 5317 NAND_ECC_SOFT_BCH, 5318 NAND_ECC_HW, 5319 NAND_ECC_HW_SYNDROME, 5320 NAND_ECC_ON_DIE, 5321 }; 5322 const char * const nand_ecc_legacy_modes[] = { 5323 [NAND_ECC_NONE] = "none", 5324 [NAND_ECC_SOFT] = "soft", 5325 [NAND_ECC_SOFT_BCH] = "soft_bch", 5326 [NAND_ECC_HW] = "hw", 5327 [NAND_ECC_HW_SYNDROME] = "hw_syndrome", 5328 [NAND_ECC_ON_DIE] = "on-die", 5329 }; 5330 enum nand_ecc_legacy_mode eng_type; 5331 const char *pm; 5332 int err; 5333 5334 err = of_property_read_string(np, "nand-ecc-mode", &pm); 5335 if (err) 5336 return NAND_ECC_ENGINE_TYPE_INVALID; 5337 5338 for (eng_type = NAND_ECC_NONE; 5339 eng_type < ARRAY_SIZE(nand_ecc_legacy_modes); eng_type++) { 5340 if (!strcasecmp(pm, nand_ecc_legacy_modes[eng_type])) { 5341 switch (eng_type) { 5342 case NAND_ECC_NONE: 5343 return NAND_ECC_ENGINE_TYPE_NONE; 5344 case NAND_ECC_SOFT: 5345 case NAND_ECC_SOFT_BCH: 5346 return NAND_ECC_ENGINE_TYPE_SOFT; 5347 case NAND_ECC_HW: 5348 case NAND_ECC_HW_SYNDROME: 5349 return NAND_ECC_ENGINE_TYPE_ON_HOST; 5350 case NAND_ECC_ON_DIE: 5351 return NAND_ECC_ENGINE_TYPE_ON_DIE; 5352 default: 5353 break; 5354 } 5355 } 5356 } 5357 5358 return NAND_ECC_ENGINE_TYPE_INVALID; 5359 } 5360 5361 static enum nand_ecc_placement 5362 of_get_rawnand_ecc_placement_legacy(struct device_node *np) 5363 { 5364 const char *pm; 5365 int err; 5366 5367 err = of_property_read_string(np, "nand-ecc-mode", &pm); 5368 if (!err) { 5369 if (!strcasecmp(pm, "hw_syndrome")) 5370 return NAND_ECC_PLACEMENT_INTERLEAVED; 5371 } 5372 5373 return NAND_ECC_PLACEMENT_UNKNOWN; 5374 } 5375 5376 static enum nand_ecc_algo of_get_rawnand_ecc_algo_legacy(struct device_node *np) 5377 { 5378 const char *pm; 5379 int err; 5380 5381 err = of_property_read_string(np, "nand-ecc-mode", &pm); 5382 if (!err) { 5383 if (!strcasecmp(pm, "soft")) 5384 return NAND_ECC_ALGO_HAMMING; 5385 else if (!strcasecmp(pm, "soft_bch")) 5386 return NAND_ECC_ALGO_BCH; 5387 } 5388 5389 return NAND_ECC_ALGO_UNKNOWN; 5390 } 5391 5392 static void of_get_nand_ecc_legacy_user_config(struct nand_chip *chip) 5393 { 5394 struct device_node *dn = nand_get_flash_node(chip); 5395 struct nand_ecc_props *user_conf = &chip->base.ecc.user_conf; 5396 5397 if (user_conf->engine_type == NAND_ECC_ENGINE_TYPE_INVALID) 5398 user_conf->engine_type = of_get_rawnand_ecc_engine_type_legacy(dn); 5399 5400 if (user_conf->algo == NAND_ECC_ALGO_UNKNOWN) 5401 user_conf->algo = of_get_rawnand_ecc_algo_legacy(dn); 5402 5403 if (user_conf->placement == NAND_ECC_PLACEMENT_UNKNOWN) 5404 user_conf->placement = of_get_rawnand_ecc_placement_legacy(dn); 5405 } 5406 5407 static int of_get_nand_bus_width(struct nand_chip *chip) 5408 { 5409 struct device_node *dn = nand_get_flash_node(chip); 5410 u32 val; 5411 int ret; 5412 5413 ret = of_property_read_u32(dn, "nand-bus-width", &val); 5414 if (ret == -EINVAL) 5415 /* Buswidth defaults to 8 if the property does not exist .*/ 5416 return 0; 5417 else if (ret) 5418 return ret; 5419 5420 if (val == 16) 5421 chip->options |= NAND_BUSWIDTH_16; 5422 else if (val != 8) 5423 return -EINVAL; 5424 return 0; 5425 } 5426 5427 static int of_get_nand_secure_regions(struct nand_chip *chip) 5428 { 5429 struct device_node *dn = nand_get_flash_node(chip); 5430 struct property *prop; 5431 int nr_elem, i, j; 5432 5433 /* Only proceed if the "secure-regions" property is present in DT */ 5434 prop = of_find_property(dn, "secure-regions", NULL); 5435 if (!prop) 5436 return 0; 5437 5438 nr_elem = of_property_count_elems_of_size(dn, "secure-regions", sizeof(u64)); 5439 if (nr_elem <= 0) 5440 return nr_elem; 5441 5442 chip->nr_secure_regions = nr_elem / 2; 5443 chip->secure_regions = kzalloc_objs(*chip->secure_regions, 5444 chip->nr_secure_regions); 5445 if (!chip->secure_regions) 5446 return -ENOMEM; 5447 5448 for (i = 0, j = 0; i < chip->nr_secure_regions; i++, j += 2) { 5449 of_property_read_u64_index(dn, "secure-regions", j, 5450 &chip->secure_regions[i].offset); 5451 of_property_read_u64_index(dn, "secure-regions", j + 1, 5452 &chip->secure_regions[i].size); 5453 } 5454 5455 return 0; 5456 } 5457 5458 /** 5459 * rawnand_dt_parse_gpio_cs - Parse the gpio-cs property of a controller 5460 * @dev: Device that will be parsed. Also used for managed allocations. 5461 * @cs_array: Array of GPIO desc pointers allocated on success 5462 * @ncs_array: Number of entries in @cs_array updated on success. 5463 * @return 0 on success, an error otherwise. 5464 */ 5465 int rawnand_dt_parse_gpio_cs(struct device *dev, struct gpio_desc ***cs_array, 5466 unsigned int *ncs_array) 5467 { 5468 struct gpio_desc **descs; 5469 int ndescs, i; 5470 5471 ndescs = gpiod_count(dev, "cs"); 5472 if (ndescs < 0) { 5473 dev_dbg(dev, "No valid cs-gpios property\n"); 5474 return 0; 5475 } 5476 5477 descs = devm_kcalloc(dev, ndescs, sizeof(*descs), GFP_KERNEL); 5478 if (!descs) 5479 return -ENOMEM; 5480 5481 for (i = 0; i < ndescs; i++) { 5482 descs[i] = gpiod_get_index_optional(dev, "cs", i, 5483 GPIOD_OUT_HIGH); 5484 if (IS_ERR(descs[i])) 5485 return PTR_ERR(descs[i]); 5486 } 5487 5488 *ncs_array = ndescs; 5489 *cs_array = descs; 5490 5491 return 0; 5492 } 5493 EXPORT_SYMBOL(rawnand_dt_parse_gpio_cs); 5494 5495 static int rawnand_dt_init(struct nand_chip *chip) 5496 { 5497 struct nand_device *nand = mtd_to_nanddev(nand_to_mtd(chip)); 5498 struct device_node *dn = nand_get_flash_node(chip); 5499 int ret; 5500 5501 if (!dn) 5502 return 0; 5503 5504 ret = of_get_nand_bus_width(chip); 5505 if (ret) 5506 return ret; 5507 5508 if (of_property_read_bool(dn, "nand-is-boot-medium")) 5509 chip->options |= NAND_IS_BOOT_MEDIUM; 5510 5511 if (of_property_read_bool(dn, "nand-on-flash-bbt")) 5512 chip->bbt_options |= NAND_BBT_USE_FLASH; 5513 5514 of_get_nand_ecc_user_config(nand); 5515 of_get_nand_ecc_legacy_user_config(chip); 5516 5517 /* 5518 * If neither the user nor the NAND controller have requested a specific 5519 * ECC engine type, we will default to NAND_ECC_ENGINE_TYPE_ON_HOST. 5520 */ 5521 nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; 5522 5523 /* 5524 * Use the user requested engine type, unless there is none, in this 5525 * case default to the NAND controller choice, otherwise fallback to 5526 * the raw NAND default one. 5527 */ 5528 if (nand->ecc.user_conf.engine_type != NAND_ECC_ENGINE_TYPE_INVALID) 5529 chip->ecc.engine_type = nand->ecc.user_conf.engine_type; 5530 if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_INVALID) 5531 chip->ecc.engine_type = nand->ecc.defaults.engine_type; 5532 5533 chip->ecc.placement = nand->ecc.user_conf.placement; 5534 chip->ecc.algo = nand->ecc.user_conf.algo; 5535 chip->ecc.strength = nand->ecc.user_conf.strength; 5536 chip->ecc.size = nand->ecc.user_conf.step_size; 5537 5538 return 0; 5539 } 5540 5541 /** 5542 * nand_scan_ident - Scan for the NAND device 5543 * @chip: NAND chip object 5544 * @maxchips: number of chips to scan for 5545 * @table: alternative NAND ID table 5546 * 5547 * This is the first phase of the normal nand_scan() function. It reads the 5548 * flash ID and sets up MTD fields accordingly. 5549 * 5550 * This helper used to be called directly from controller drivers that needed 5551 * to tweak some ECC-related parameters before nand_scan_tail(). This separation 5552 * prevented dynamic allocations during this phase which was unconvenient and 5553 * as been banned for the benefit of the ->init_ecc()/cleanup_ecc() hooks. 5554 */ 5555 static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips, 5556 struct nand_flash_dev *table) 5557 { 5558 struct mtd_info *mtd = nand_to_mtd(chip); 5559 struct nand_memory_organization *memorg; 5560 int nand_maf_id, nand_dev_id; 5561 unsigned int i; 5562 int ret; 5563 5564 memorg = nanddev_get_memorg(&chip->base); 5565 5566 /* Assume all dies are deselected when we enter nand_scan_ident(). */ 5567 chip->cur_cs = -1; 5568 5569 mutex_init(&chip->lock); 5570 init_waitqueue_head(&chip->resume_wq); 5571 5572 /* Enforce the right timings for reset/detection */ 5573 chip->current_interface_config = nand_get_reset_interface_config(); 5574 5575 ret = rawnand_dt_init(chip); 5576 if (ret) 5577 return ret; 5578 5579 if (!mtd->name && mtd->dev.parent) 5580 mtd->name = dev_name(mtd->dev.parent); 5581 5582 /* Set the default functions */ 5583 nand_set_defaults(chip); 5584 5585 ret = nand_legacy_check_hooks(chip); 5586 if (ret) 5587 return ret; 5588 5589 memorg->ntargets = maxchips; 5590 5591 /* Read the flash type */ 5592 ret = nand_detect(chip, table); 5593 if (ret) { 5594 if (!(chip->options & NAND_SCAN_SILENT_NODEV)) 5595 pr_warn("No NAND device found\n"); 5596 nand_deselect_target(chip); 5597 return ret; 5598 } 5599 5600 nand_maf_id = chip->id.data[0]; 5601 nand_dev_id = chip->id.data[1]; 5602 5603 nand_deselect_target(chip); 5604 5605 /* Check for a chip array */ 5606 for (i = 1; i < maxchips; i++) { 5607 u8 id[2]; 5608 5609 /* See comment in nand_get_flash_type for reset */ 5610 ret = nand_reset(chip, i); 5611 if (ret) 5612 break; 5613 5614 nand_select_target(chip, i); 5615 /* Send the command for reading device ID */ 5616 ret = nand_readid_op(chip, 0, id, sizeof(id)); 5617 if (ret) 5618 break; 5619 /* Read manufacturer and device IDs */ 5620 if (nand_maf_id != id[0] || nand_dev_id != id[1]) { 5621 nand_deselect_target(chip); 5622 break; 5623 } 5624 nand_deselect_target(chip); 5625 } 5626 if (i > 1) 5627 pr_info("%d chips detected\n", i); 5628 5629 /* Store the number of chips and calc total size for mtd */ 5630 memorg->ntargets = i; 5631 mtd->size = i * nanddev_target_size(&chip->base); 5632 5633 return 0; 5634 } 5635 5636 static void nand_scan_ident_cleanup(struct nand_chip *chip) 5637 { 5638 kfree(chip->parameters.model); 5639 kfree(chip->parameters.onfi); 5640 } 5641 5642 int rawnand_sw_hamming_init(struct nand_chip *chip) 5643 { 5644 struct nand_ecc_sw_hamming_conf *engine_conf; 5645 struct nand_device *base = &chip->base; 5646 int ret; 5647 5648 base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; 5649 base->ecc.user_conf.algo = NAND_ECC_ALGO_HAMMING; 5650 base->ecc.user_conf.strength = chip->ecc.strength; 5651 base->ecc.user_conf.step_size = chip->ecc.size; 5652 5653 ret = nand_ecc_sw_hamming_init_ctx(base); 5654 if (ret) 5655 return ret; 5656 5657 engine_conf = base->ecc.ctx.priv; 5658 5659 if (chip->ecc.options & NAND_ECC_SOFT_HAMMING_SM_ORDER) 5660 engine_conf->sm_order = true; 5661 5662 chip->ecc.size = base->ecc.ctx.conf.step_size; 5663 chip->ecc.strength = base->ecc.ctx.conf.strength; 5664 chip->ecc.total = base->ecc.ctx.total; 5665 chip->ecc.steps = nanddev_get_ecc_nsteps(base); 5666 chip->ecc.bytes = base->ecc.ctx.total / nanddev_get_ecc_nsteps(base); 5667 5668 return 0; 5669 } 5670 EXPORT_SYMBOL(rawnand_sw_hamming_init); 5671 5672 int rawnand_sw_hamming_calculate(struct nand_chip *chip, 5673 const unsigned char *buf, 5674 unsigned char *code) 5675 { 5676 struct nand_device *base = &chip->base; 5677 5678 return nand_ecc_sw_hamming_calculate(base, buf, code); 5679 } 5680 EXPORT_SYMBOL(rawnand_sw_hamming_calculate); 5681 5682 int rawnand_sw_hamming_correct(struct nand_chip *chip, 5683 unsigned char *buf, 5684 unsigned char *read_ecc, 5685 unsigned char *calc_ecc) 5686 { 5687 struct nand_device *base = &chip->base; 5688 5689 return nand_ecc_sw_hamming_correct(base, buf, read_ecc, calc_ecc); 5690 } 5691 EXPORT_SYMBOL(rawnand_sw_hamming_correct); 5692 5693 void rawnand_sw_hamming_cleanup(struct nand_chip *chip) 5694 { 5695 struct nand_device *base = &chip->base; 5696 5697 nand_ecc_sw_hamming_cleanup_ctx(base); 5698 } 5699 EXPORT_SYMBOL(rawnand_sw_hamming_cleanup); 5700 5701 int rawnand_sw_bch_init(struct nand_chip *chip) 5702 { 5703 struct nand_device *base = &chip->base; 5704 const struct nand_ecc_props *ecc_conf = nanddev_get_ecc_conf(base); 5705 int ret; 5706 5707 base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; 5708 base->ecc.user_conf.algo = NAND_ECC_ALGO_BCH; 5709 base->ecc.user_conf.step_size = chip->ecc.size; 5710 base->ecc.user_conf.strength = chip->ecc.strength; 5711 5712 ret = nand_ecc_sw_bch_init_ctx(base); 5713 if (ret) 5714 return ret; 5715 5716 chip->ecc.size = ecc_conf->step_size; 5717 chip->ecc.strength = ecc_conf->strength; 5718 chip->ecc.total = base->ecc.ctx.total; 5719 chip->ecc.steps = nanddev_get_ecc_nsteps(base); 5720 chip->ecc.bytes = base->ecc.ctx.total / nanddev_get_ecc_nsteps(base); 5721 5722 return 0; 5723 } 5724 EXPORT_SYMBOL(rawnand_sw_bch_init); 5725 5726 static int rawnand_sw_bch_calculate(struct nand_chip *chip, 5727 const unsigned char *buf, 5728 unsigned char *code) 5729 { 5730 struct nand_device *base = &chip->base; 5731 5732 return nand_ecc_sw_bch_calculate(base, buf, code); 5733 } 5734 5735 int rawnand_sw_bch_correct(struct nand_chip *chip, unsigned char *buf, 5736 unsigned char *read_ecc, unsigned char *calc_ecc) 5737 { 5738 struct nand_device *base = &chip->base; 5739 5740 return nand_ecc_sw_bch_correct(base, buf, read_ecc, calc_ecc); 5741 } 5742 EXPORT_SYMBOL(rawnand_sw_bch_correct); 5743 5744 void rawnand_sw_bch_cleanup(struct nand_chip *chip) 5745 { 5746 struct nand_device *base = &chip->base; 5747 5748 nand_ecc_sw_bch_cleanup_ctx(base); 5749 } 5750 EXPORT_SYMBOL(rawnand_sw_bch_cleanup); 5751 5752 static int nand_set_ecc_on_host_ops(struct nand_chip *chip) 5753 { 5754 struct nand_ecc_ctrl *ecc = &chip->ecc; 5755 5756 switch (ecc->placement) { 5757 case NAND_ECC_PLACEMENT_UNKNOWN: 5758 case NAND_ECC_PLACEMENT_OOB: 5759 /* Use standard hwecc read page function? */ 5760 if (!ecc->read_page) 5761 ecc->read_page = nand_read_page_hwecc; 5762 if (!ecc->write_page) 5763 ecc->write_page = nand_write_page_hwecc; 5764 if (!ecc->read_page_raw) 5765 ecc->read_page_raw = nand_read_page_raw; 5766 if (!ecc->write_page_raw) 5767 ecc->write_page_raw = nand_write_page_raw; 5768 if (!ecc->read_oob) 5769 ecc->read_oob = nand_read_oob_std; 5770 if (!ecc->write_oob) 5771 ecc->write_oob = nand_write_oob_std; 5772 if (!ecc->read_subpage) 5773 ecc->read_subpage = nand_read_subpage; 5774 if (!ecc->write_subpage && ecc->hwctl && ecc->calculate) 5775 ecc->write_subpage = nand_write_subpage_hwecc; 5776 fallthrough; 5777 5778 case NAND_ECC_PLACEMENT_INTERLEAVED: 5779 if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) && 5780 (!ecc->read_page || 5781 ecc->read_page == nand_read_page_hwecc || 5782 !ecc->write_page || 5783 ecc->write_page == nand_write_page_hwecc)) { 5784 WARN(1, "No ECC functions supplied; hardware ECC not possible\n"); 5785 return -EINVAL; 5786 } 5787 /* Use standard syndrome read/write page function? */ 5788 if (!ecc->read_page) 5789 ecc->read_page = nand_read_page_syndrome; 5790 if (!ecc->write_page) 5791 ecc->write_page = nand_write_page_syndrome; 5792 if (!ecc->read_page_raw) 5793 ecc->read_page_raw = nand_read_page_raw_syndrome; 5794 if (!ecc->write_page_raw) 5795 ecc->write_page_raw = nand_write_page_raw_syndrome; 5796 if (!ecc->read_oob) 5797 ecc->read_oob = nand_read_oob_syndrome; 5798 if (!ecc->write_oob) 5799 ecc->write_oob = nand_write_oob_syndrome; 5800 break; 5801 5802 default: 5803 pr_warn("Invalid NAND_ECC_PLACEMENT %d\n", 5804 ecc->placement); 5805 return -EINVAL; 5806 } 5807 5808 return 0; 5809 } 5810 5811 static int nand_set_ecc_soft_ops(struct nand_chip *chip) 5812 { 5813 struct mtd_info *mtd = nand_to_mtd(chip); 5814 struct nand_device *nanddev = mtd_to_nanddev(mtd); 5815 struct nand_ecc_ctrl *ecc = &chip->ecc; 5816 int ret; 5817 5818 if (WARN_ON(ecc->engine_type != NAND_ECC_ENGINE_TYPE_SOFT)) 5819 return -EINVAL; 5820 5821 switch (ecc->algo) { 5822 case NAND_ECC_ALGO_HAMMING: 5823 ecc->calculate = rawnand_sw_hamming_calculate; 5824 ecc->correct = rawnand_sw_hamming_correct; 5825 ecc->read_page = nand_read_page_swecc; 5826 ecc->read_subpage = nand_read_subpage; 5827 ecc->write_page = nand_write_page_swecc; 5828 if (!ecc->read_page_raw) 5829 ecc->read_page_raw = nand_read_page_raw; 5830 if (!ecc->write_page_raw) 5831 ecc->write_page_raw = nand_write_page_raw; 5832 ecc->read_oob = nand_read_oob_std; 5833 ecc->write_oob = nand_write_oob_std; 5834 if (!ecc->size) 5835 ecc->size = 256; 5836 ecc->bytes = 3; 5837 ecc->strength = 1; 5838 5839 if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC)) 5840 ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER; 5841 5842 ret = rawnand_sw_hamming_init(chip); 5843 if (ret) { 5844 WARN(1, "Hamming ECC initialization failed!\n"); 5845 return ret; 5846 } 5847 5848 return 0; 5849 case NAND_ECC_ALGO_BCH: 5850 if (!IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)) { 5851 WARN(1, "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n"); 5852 return -EINVAL; 5853 } 5854 ecc->calculate = rawnand_sw_bch_calculate; 5855 ecc->correct = rawnand_sw_bch_correct; 5856 ecc->read_page = nand_read_page_swecc; 5857 ecc->read_subpage = nand_read_subpage; 5858 ecc->write_page = nand_write_page_swecc; 5859 if (!ecc->read_page_raw) 5860 ecc->read_page_raw = nand_read_page_raw; 5861 if (!ecc->write_page_raw) 5862 ecc->write_page_raw = nand_write_page_raw; 5863 ecc->read_oob = nand_read_oob_std; 5864 ecc->write_oob = nand_write_oob_std; 5865 5866 /* 5867 * We can only maximize ECC config when the default layout is 5868 * used, otherwise we don't know how many bytes can really be 5869 * used. 5870 */ 5871 if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH && 5872 mtd->ooblayout != nand_get_large_page_ooblayout()) 5873 nanddev->ecc.user_conf.flags &= ~NAND_ECC_MAXIMIZE_STRENGTH; 5874 5875 ret = rawnand_sw_bch_init(chip); 5876 if (ret) { 5877 WARN(1, "BCH ECC initialization failed!\n"); 5878 return ret; 5879 } 5880 5881 return 0; 5882 default: 5883 WARN(1, "Unsupported ECC algorithm!\n"); 5884 return -EINVAL; 5885 } 5886 } 5887 5888 /** 5889 * nand_check_ecc_caps - check the sanity of preset ECC settings 5890 * @chip: nand chip info structure 5891 * @caps: ECC caps info structure 5892 * @oobavail: OOB size that the ECC engine can use 5893 * 5894 * When ECC step size and strength are already set, check if they are supported 5895 * by the controller and the calculated ECC bytes fit within the chip's OOB. 5896 * On success, the calculated ECC bytes is set. 5897 */ 5898 static int 5899 nand_check_ecc_caps(struct nand_chip *chip, 5900 const struct nand_ecc_caps *caps, int oobavail) 5901 { 5902 struct mtd_info *mtd = nand_to_mtd(chip); 5903 const struct nand_ecc_step_info *stepinfo; 5904 int preset_step = chip->ecc.size; 5905 int preset_strength = chip->ecc.strength; 5906 int ecc_bytes, nsteps = mtd->writesize / preset_step; 5907 int i, j; 5908 5909 for (i = 0; i < caps->nstepinfos; i++) { 5910 stepinfo = &caps->stepinfos[i]; 5911 5912 if (stepinfo->stepsize != preset_step) 5913 continue; 5914 5915 for (j = 0; j < stepinfo->nstrengths; j++) { 5916 if (stepinfo->strengths[j] != preset_strength) 5917 continue; 5918 5919 ecc_bytes = caps->calc_ecc_bytes(preset_step, 5920 preset_strength); 5921 if (WARN_ON_ONCE(ecc_bytes < 0)) 5922 return ecc_bytes; 5923 5924 if (ecc_bytes * nsteps > oobavail) { 5925 pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB", 5926 preset_step, preset_strength); 5927 return -ENOSPC; 5928 } 5929 5930 chip->ecc.bytes = ecc_bytes; 5931 5932 return 0; 5933 } 5934 } 5935 5936 pr_err("ECC (step, strength) = (%d, %d) not supported on this controller", 5937 preset_step, preset_strength); 5938 5939 return -ENOTSUPP; 5940 } 5941 5942 /** 5943 * nand_match_ecc_req - meet the chip's requirement with least ECC bytes 5944 * @chip: nand chip info structure 5945 * @caps: ECC engine caps info structure 5946 * @oobavail: OOB size that the ECC engine can use 5947 * 5948 * If a chip's ECC requirement is provided, try to meet it with the least 5949 * number of ECC bytes (i.e. with the largest number of OOB-free bytes). 5950 * On success, the chosen ECC settings are set. 5951 */ 5952 static int 5953 nand_match_ecc_req(struct nand_chip *chip, 5954 const struct nand_ecc_caps *caps, int oobavail) 5955 { 5956 const struct nand_ecc_props *requirements = 5957 nanddev_get_ecc_requirements(&chip->base); 5958 struct mtd_info *mtd = nand_to_mtd(chip); 5959 const struct nand_ecc_step_info *stepinfo; 5960 int req_step = requirements->step_size; 5961 int req_strength = requirements->strength; 5962 int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total; 5963 int best_step = 0, best_strength = 0, best_ecc_bytes = 0; 5964 int best_ecc_bytes_total = INT_MAX; 5965 int i, j; 5966 5967 /* No information provided by the NAND chip */ 5968 if (!req_step || !req_strength) 5969 return -ENOTSUPP; 5970 5971 /* number of correctable bits the chip requires in a page */ 5972 req_corr = mtd->writesize / req_step * req_strength; 5973 5974 for (i = 0; i < caps->nstepinfos; i++) { 5975 stepinfo = &caps->stepinfos[i]; 5976 step_size = stepinfo->stepsize; 5977 5978 for (j = 0; j < stepinfo->nstrengths; j++) { 5979 strength = stepinfo->strengths[j]; 5980 5981 /* 5982 * If both step size and strength are smaller than the 5983 * chip's requirement, it is not easy to compare the 5984 * resulted reliability. 5985 */ 5986 if (step_size < req_step && strength < req_strength) 5987 continue; 5988 5989 if (mtd->writesize % step_size) 5990 continue; 5991 5992 nsteps = mtd->writesize / step_size; 5993 5994 ecc_bytes = caps->calc_ecc_bytes(step_size, strength); 5995 if (WARN_ON_ONCE(ecc_bytes < 0)) 5996 continue; 5997 ecc_bytes_total = ecc_bytes * nsteps; 5998 5999 if (ecc_bytes_total > oobavail || 6000 strength * nsteps < req_corr) 6001 continue; 6002 6003 /* 6004 * We assume the best is to meet the chip's requrement 6005 * with the least number of ECC bytes. 6006 */ 6007 if (ecc_bytes_total < best_ecc_bytes_total) { 6008 best_ecc_bytes_total = ecc_bytes_total; 6009 best_step = step_size; 6010 best_strength = strength; 6011 best_ecc_bytes = ecc_bytes; 6012 } 6013 } 6014 } 6015 6016 if (best_ecc_bytes_total == INT_MAX) 6017 return -ENOTSUPP; 6018 6019 chip->ecc.size = best_step; 6020 chip->ecc.strength = best_strength; 6021 chip->ecc.bytes = best_ecc_bytes; 6022 6023 return 0; 6024 } 6025 6026 /** 6027 * nand_maximize_ecc - choose the max ECC strength available 6028 * @chip: nand chip info structure 6029 * @caps: ECC engine caps info structure 6030 * @oobavail: OOB size that the ECC engine can use 6031 * 6032 * Choose the max ECC strength that is supported on the controller, and can fit 6033 * within the chip's OOB. On success, the chosen ECC settings are set. 6034 */ 6035 static int 6036 nand_maximize_ecc(struct nand_chip *chip, 6037 const struct nand_ecc_caps *caps, int oobavail) 6038 { 6039 struct mtd_info *mtd = nand_to_mtd(chip); 6040 const struct nand_ecc_step_info *stepinfo; 6041 int step_size, strength, nsteps, ecc_bytes, corr; 6042 int best_corr = 0; 6043 int best_step = 0; 6044 int best_strength = 0, best_ecc_bytes = 0; 6045 int i, j; 6046 6047 for (i = 0; i < caps->nstepinfos; i++) { 6048 stepinfo = &caps->stepinfos[i]; 6049 step_size = stepinfo->stepsize; 6050 6051 /* If chip->ecc.size is already set, respect it */ 6052 if (chip->ecc.size && step_size != chip->ecc.size) 6053 continue; 6054 6055 for (j = 0; j < stepinfo->nstrengths; j++) { 6056 strength = stepinfo->strengths[j]; 6057 6058 if (mtd->writesize % step_size) 6059 continue; 6060 6061 nsteps = mtd->writesize / step_size; 6062 6063 ecc_bytes = caps->calc_ecc_bytes(step_size, strength); 6064 if (WARN_ON_ONCE(ecc_bytes < 0)) 6065 continue; 6066 6067 if (ecc_bytes * nsteps > oobavail) 6068 continue; 6069 6070 corr = strength * nsteps; 6071 6072 /* 6073 * If the number of correctable bits is the same, 6074 * bigger step_size has more reliability. 6075 */ 6076 if (corr > best_corr || 6077 (corr == best_corr && step_size > best_step)) { 6078 best_corr = corr; 6079 best_step = step_size; 6080 best_strength = strength; 6081 best_ecc_bytes = ecc_bytes; 6082 } 6083 } 6084 } 6085 6086 if (!best_corr) 6087 return -ENOTSUPP; 6088 6089 chip->ecc.size = best_step; 6090 chip->ecc.strength = best_strength; 6091 chip->ecc.bytes = best_ecc_bytes; 6092 6093 return 0; 6094 } 6095 6096 /** 6097 * nand_ecc_choose_conf - Set the ECC strength and ECC step size 6098 * @chip: nand chip info structure 6099 * @caps: ECC engine caps info structure 6100 * @oobavail: OOB size that the ECC engine can use 6101 * 6102 * Choose the ECC configuration according to following logic. 6103 * 6104 * 1. If both ECC step size and ECC strength are already set (usually by DT) 6105 * then check if it is supported by this controller. 6106 * 2. If the user provided the nand-ecc-maximize property, then select maximum 6107 * ECC strength. 6108 * 3. Otherwise, try to match the ECC step size and ECC strength closest 6109 * to the chip's requirement. If available OOB size can't fit the chip 6110 * requirement then fallback to the maximum ECC step size and ECC strength. 6111 * 6112 * On success, the chosen ECC settings are set. 6113 */ 6114 int nand_ecc_choose_conf(struct nand_chip *chip, 6115 const struct nand_ecc_caps *caps, int oobavail) 6116 { 6117 struct mtd_info *mtd = nand_to_mtd(chip); 6118 struct nand_device *nanddev = mtd_to_nanddev(mtd); 6119 6120 if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize)) 6121 return -EINVAL; 6122 6123 if (chip->ecc.size && chip->ecc.strength) 6124 return nand_check_ecc_caps(chip, caps, oobavail); 6125 6126 if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH) 6127 return nand_maximize_ecc(chip, caps, oobavail); 6128 6129 if (!nand_match_ecc_req(chip, caps, oobavail)) 6130 return 0; 6131 6132 return nand_maximize_ecc(chip, caps, oobavail); 6133 } 6134 EXPORT_SYMBOL_GPL(nand_ecc_choose_conf); 6135 6136 static int rawnand_erase(struct nand_device *nand, const struct nand_pos *pos) 6137 { 6138 struct nand_chip *chip = container_of(nand, struct nand_chip, 6139 base); 6140 unsigned int eb = nanddev_pos_to_row(nand, pos); 6141 int ret; 6142 6143 eb >>= nand->rowconv.eraseblock_addr_shift; 6144 6145 nand_select_target(chip, pos->target); 6146 ret = nand_erase_op(chip, eb); 6147 nand_deselect_target(chip); 6148 6149 return ret; 6150 } 6151 6152 static int rawnand_markbad(struct nand_device *nand, 6153 const struct nand_pos *pos) 6154 { 6155 struct nand_chip *chip = container_of(nand, struct nand_chip, 6156 base); 6157 6158 return nand_markbad_bbm(chip, nanddev_pos_to_offs(nand, pos)); 6159 } 6160 6161 static bool rawnand_isbad(struct nand_device *nand, const struct nand_pos *pos) 6162 { 6163 struct nand_chip *chip = container_of(nand, struct nand_chip, 6164 base); 6165 int ret; 6166 6167 nand_select_target(chip, pos->target); 6168 ret = nand_isbad_bbm(chip, nanddev_pos_to_offs(nand, pos)); 6169 nand_deselect_target(chip); 6170 6171 return ret; 6172 } 6173 6174 static const struct nand_ops rawnand_ops = { 6175 .erase = rawnand_erase, 6176 .markbad = rawnand_markbad, 6177 .isbad = rawnand_isbad, 6178 }; 6179 6180 /** 6181 * nand_scan_tail - Scan for the NAND device 6182 * @chip: NAND chip object 6183 * 6184 * This is the second phase of the normal nand_scan() function. It fills out 6185 * all the uninitialized function pointers with the defaults and scans for a 6186 * bad block table if appropriate. 6187 */ 6188 static int nand_scan_tail(struct nand_chip *chip) 6189 { 6190 struct mtd_info *mtd = nand_to_mtd(chip); 6191 struct nand_device *base = &chip->base; 6192 struct nand_ecc_ctrl *ecc = &chip->ecc; 6193 int ret, i; 6194 6195 /* New bad blocks should be marked in OOB, flash-based BBT, or both */ 6196 if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) && 6197 !(chip->bbt_options & NAND_BBT_USE_FLASH))) { 6198 return -EINVAL; 6199 } 6200 6201 chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL); 6202 if (!chip->data_buf) 6203 return -ENOMEM; 6204 6205 /* 6206 * FIXME: some NAND manufacturer drivers expect the first die to be 6207 * selected when manufacturer->init() is called. They should be fixed 6208 * to explictly select the relevant die when interacting with the NAND 6209 * chip. 6210 */ 6211 nand_select_target(chip, 0); 6212 ret = nand_manufacturer_init(chip); 6213 nand_deselect_target(chip); 6214 if (ret) 6215 goto err_free_buf; 6216 6217 /* Set the internal oob buffer location, just after the page data */ 6218 chip->oob_poi = chip->data_buf + mtd->writesize; 6219 6220 /* 6221 * If no default placement scheme is given, select an appropriate one. 6222 */ 6223 if (!mtd->ooblayout && 6224 !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT && 6225 ecc->algo == NAND_ECC_ALGO_BCH) && 6226 !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT && 6227 ecc->algo == NAND_ECC_ALGO_HAMMING)) { 6228 switch (mtd->oobsize) { 6229 case 8: 6230 case 16: 6231 mtd_set_ooblayout(mtd, nand_get_small_page_ooblayout()); 6232 break; 6233 case 64: 6234 case 128: 6235 mtd_set_ooblayout(mtd, 6236 nand_get_large_page_hamming_ooblayout()); 6237 break; 6238 default: 6239 /* 6240 * Expose the whole OOB area to users if ECC_NONE 6241 * is passed. We could do that for all kind of 6242 * ->oobsize, but we must keep the old large/small 6243 * page with ECC layout when ->oobsize <= 128 for 6244 * compatibility reasons. 6245 */ 6246 if (ecc->engine_type == NAND_ECC_ENGINE_TYPE_NONE) { 6247 mtd_set_ooblayout(mtd, 6248 nand_get_large_page_ooblayout()); 6249 break; 6250 } 6251 6252 WARN(1, "No oob scheme defined for oobsize %d\n", 6253 mtd->oobsize); 6254 ret = -EINVAL; 6255 goto err_nand_manuf_cleanup; 6256 } 6257 } 6258 6259 /* 6260 * Check ECC mode, default to software if 3byte/512byte hardware ECC is 6261 * selected and we have 256 byte pagesize fallback to software ECC 6262 */ 6263 6264 switch (ecc->engine_type) { 6265 case NAND_ECC_ENGINE_TYPE_ON_HOST: 6266 ret = nand_set_ecc_on_host_ops(chip); 6267 if (ret) 6268 goto err_nand_manuf_cleanup; 6269 6270 if (mtd->writesize >= ecc->size) { 6271 if (!ecc->strength) { 6272 WARN(1, "Driver must set ecc.strength when using hardware ECC\n"); 6273 ret = -EINVAL; 6274 goto err_nand_manuf_cleanup; 6275 } 6276 break; 6277 } 6278 pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n", 6279 ecc->size, mtd->writesize); 6280 ecc->engine_type = NAND_ECC_ENGINE_TYPE_SOFT; 6281 ecc->algo = NAND_ECC_ALGO_HAMMING; 6282 fallthrough; 6283 6284 case NAND_ECC_ENGINE_TYPE_SOFT: 6285 ret = nand_set_ecc_soft_ops(chip); 6286 if (ret) 6287 goto err_nand_manuf_cleanup; 6288 break; 6289 6290 case NAND_ECC_ENGINE_TYPE_ON_DIE: 6291 if (!ecc->read_page || !ecc->write_page) { 6292 WARN(1, "No ECC functions supplied; on-die ECC not possible\n"); 6293 ret = -EINVAL; 6294 goto err_nand_manuf_cleanup; 6295 } 6296 if (!ecc->read_oob) 6297 ecc->read_oob = nand_read_oob_std; 6298 if (!ecc->write_oob) 6299 ecc->write_oob = nand_write_oob_std; 6300 break; 6301 6302 case NAND_ECC_ENGINE_TYPE_NONE: 6303 pr_warn("NAND_ECC_ENGINE_TYPE_NONE selected by board driver. This is not recommended!\n"); 6304 ecc->read_page = nand_read_page_raw; 6305 ecc->write_page = nand_write_page_raw; 6306 ecc->read_oob = nand_read_oob_std; 6307 ecc->read_page_raw = nand_read_page_raw; 6308 ecc->write_page_raw = nand_write_page_raw; 6309 ecc->write_oob = nand_write_oob_std; 6310 ecc->size = mtd->writesize; 6311 ecc->bytes = 0; 6312 ecc->strength = 0; 6313 break; 6314 6315 default: 6316 WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->engine_type); 6317 ret = -EINVAL; 6318 goto err_nand_manuf_cleanup; 6319 } 6320 6321 if (ecc->correct || ecc->calculate) { 6322 ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL); 6323 ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL); 6324 if (!ecc->calc_buf || !ecc->code_buf) { 6325 ret = -ENOMEM; 6326 goto err_nand_manuf_cleanup; 6327 } 6328 } 6329 6330 /* For many systems, the standard OOB write also works for raw */ 6331 if (!ecc->read_oob_raw) 6332 ecc->read_oob_raw = ecc->read_oob; 6333 if (!ecc->write_oob_raw) 6334 ecc->write_oob_raw = ecc->write_oob; 6335 6336 /* Propagate ECC info to the generic NAND and MTD layers */ 6337 mtd->ecc_strength = ecc->strength; 6338 if (!base->ecc.ctx.conf.strength) 6339 base->ecc.ctx.conf.strength = ecc->strength; 6340 mtd->ecc_step_size = ecc->size; 6341 if (!base->ecc.ctx.conf.step_size) 6342 base->ecc.ctx.conf.step_size = ecc->size; 6343 6344 /* 6345 * Set the number of read / write steps for one page depending on ECC 6346 * mode. 6347 */ 6348 if (!ecc->steps) 6349 ecc->steps = mtd->writesize / ecc->size; 6350 if (!base->ecc.ctx.nsteps) 6351 base->ecc.ctx.nsteps = ecc->steps; 6352 6353 /* 6354 * Validity check: Warn if ECC parameters are not compatible with page size. 6355 * Due to the custom handling of ECC blocks in certain controllers the check 6356 * may result in an expected failure. 6357 */ 6358 if (ecc->steps * ecc->size != mtd->writesize) 6359 pr_warn("ECC parameters may be invalid in reference to underlying NAND chip\n"); 6360 6361 if (!ecc->total) { 6362 ecc->total = ecc->steps * ecc->bytes; 6363 chip->base.ecc.ctx.total = ecc->total; 6364 } 6365 6366 if (ecc->total > mtd->oobsize) { 6367 WARN(1, "Total number of ECC bytes exceeded oobsize\n"); 6368 ret = -EINVAL; 6369 goto err_nand_manuf_cleanup; 6370 } 6371 6372 /* 6373 * The number of bytes available for a client to place data into 6374 * the out of band area. 6375 */ 6376 ret = mtd_ooblayout_count_freebytes(mtd); 6377 if (ret < 0) 6378 ret = 0; 6379 6380 mtd->oobavail = ret; 6381 6382 /* ECC sanity check: warn if it's too weak */ 6383 if (!nand_ecc_is_strong_enough(&chip->base)) 6384 pr_warn("WARNING: %s: the ECC used on your system (%db/%dB) is too weak compared to the one required by the NAND chip (%db/%dB)\n", 6385 mtd->name, chip->ecc.strength, chip->ecc.size, 6386 nanddev_get_ecc_requirements(&chip->base)->strength, 6387 nanddev_get_ecc_requirements(&chip->base)->step_size); 6388 6389 /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */ 6390 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) { 6391 switch (ecc->steps) { 6392 case 2: 6393 mtd->subpage_sft = 1; 6394 break; 6395 case 4: 6396 case 8: 6397 case 16: 6398 mtd->subpage_sft = 2; 6399 break; 6400 } 6401 } 6402 chip->subpagesize = mtd->writesize >> mtd->subpage_sft; 6403 6404 /* Invalidate the pagebuffer reference */ 6405 chip->pagecache.page = -1; 6406 6407 /* Large page NAND with SOFT_ECC should support subpage reads */ 6408 switch (ecc->engine_type) { 6409 case NAND_ECC_ENGINE_TYPE_SOFT: 6410 if (chip->page_shift > 9) 6411 chip->options |= NAND_SUBPAGE_READ; 6412 break; 6413 6414 default: 6415 break; 6416 } 6417 6418 ret = nanddev_init(&chip->base, &rawnand_ops, mtd->owner); 6419 if (ret) 6420 goto err_nand_manuf_cleanup; 6421 6422 /* Adjust the MTD_CAP_ flags when NAND_ROM is set. */ 6423 if (chip->options & NAND_ROM) 6424 mtd->flags = MTD_CAP_ROM; 6425 6426 /* Fill in remaining MTD driver data */ 6427 mtd->_erase = nand_erase; 6428 mtd->_point = NULL; 6429 mtd->_unpoint = NULL; 6430 mtd->_panic_write = panic_nand_write; 6431 mtd->_read_oob = nand_read_oob; 6432 mtd->_write_oob = nand_write_oob; 6433 mtd->_sync = nand_sync; 6434 mtd->_lock = nand_lock; 6435 mtd->_unlock = nand_unlock; 6436 mtd->_suspend = nand_suspend; 6437 mtd->_resume = nand_resume; 6438 mtd->_reboot = nand_shutdown; 6439 mtd->_block_isreserved = nand_block_isreserved; 6440 mtd->_block_isbad = nand_block_isbad; 6441 mtd->_block_markbad = nand_block_markbad; 6442 mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks; 6443 6444 /* 6445 * Initialize bitflip_threshold to its default prior scan_bbt() call. 6446 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be 6447 * properly set. 6448 */ 6449 if (!mtd->bitflip_threshold) 6450 mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4); 6451 6452 /* Find the fastest data interface for this chip */ 6453 ret = nand_choose_interface_config(chip); 6454 if (ret) 6455 goto err_nanddev_cleanup; 6456 6457 /* Enter fastest possible mode on all dies. */ 6458 for (i = 0; i < nanddev_ntargets(&chip->base); i++) { 6459 ret = nand_setup_interface(chip, i); 6460 if (ret) 6461 goto err_free_interface_config; 6462 } 6463 6464 rawnand_late_check_supported_ops(chip); 6465 6466 /* 6467 * Look for secure regions in the NAND chip. These regions are supposed 6468 * to be protected by a secure element like Trustzone. So the read/write 6469 * accesses to these regions will be blocked in the runtime by this 6470 * driver. 6471 */ 6472 ret = of_get_nand_secure_regions(chip); 6473 if (ret) 6474 goto err_free_interface_config; 6475 6476 /* Check, if we should skip the bad block table scan */ 6477 if (chip->options & NAND_SKIP_BBTSCAN) 6478 return 0; 6479 6480 /* Build bad block table */ 6481 ret = nand_create_bbt(chip); 6482 if (ret) 6483 goto err_free_secure_regions; 6484 6485 return 0; 6486 6487 err_free_secure_regions: 6488 kfree(chip->secure_regions); 6489 6490 err_free_interface_config: 6491 kfree(chip->best_interface_config); 6492 6493 err_nanddev_cleanup: 6494 nanddev_cleanup(&chip->base); 6495 6496 err_nand_manuf_cleanup: 6497 nand_manufacturer_cleanup(chip); 6498 6499 err_free_buf: 6500 kfree(chip->data_buf); 6501 kfree(ecc->code_buf); 6502 kfree(ecc->calc_buf); 6503 6504 return ret; 6505 } 6506 6507 static int nand_attach(struct nand_chip *chip) 6508 { 6509 if (chip->controller->ops && chip->controller->ops->attach_chip) 6510 return chip->controller->ops->attach_chip(chip); 6511 6512 return 0; 6513 } 6514 6515 static void nand_detach(struct nand_chip *chip) 6516 { 6517 if (chip->controller->ops && chip->controller->ops->detach_chip) 6518 chip->controller->ops->detach_chip(chip); 6519 } 6520 6521 /** 6522 * nand_scan_with_ids - [NAND Interface] Scan for the NAND device 6523 * @chip: NAND chip object 6524 * @maxchips: number of chips to scan for. 6525 * @ids: optional flash IDs table 6526 * 6527 * This fills out all the uninitialized function pointers with the defaults. 6528 * The flash ID is read and the mtd/chip structures are filled with the 6529 * appropriate values. 6530 */ 6531 int nand_scan_with_ids(struct nand_chip *chip, unsigned int maxchips, 6532 struct nand_flash_dev *ids) 6533 { 6534 int ret; 6535 6536 if (!maxchips) 6537 return -EINVAL; 6538 6539 ret = nand_scan_ident(chip, maxchips, ids); 6540 if (ret) 6541 return ret; 6542 6543 ret = nand_attach(chip); 6544 if (ret) 6545 goto cleanup_ident; 6546 6547 ret = nand_scan_tail(chip); 6548 if (ret) 6549 goto detach_chip; 6550 6551 return 0; 6552 6553 detach_chip: 6554 nand_detach(chip); 6555 cleanup_ident: 6556 nand_scan_ident_cleanup(chip); 6557 6558 return ret; 6559 } 6560 EXPORT_SYMBOL(nand_scan_with_ids); 6561 6562 /** 6563 * nand_cleanup - [NAND Interface] Free resources held by the NAND device 6564 * @chip: NAND chip object 6565 */ 6566 void nand_cleanup(struct nand_chip *chip) 6567 { 6568 if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT) { 6569 if (chip->ecc.algo == NAND_ECC_ALGO_HAMMING) 6570 rawnand_sw_hamming_cleanup(chip); 6571 else if (chip->ecc.algo == NAND_ECC_ALGO_BCH) 6572 rawnand_sw_bch_cleanup(chip); 6573 } 6574 6575 nanddev_cleanup(&chip->base); 6576 6577 /* Free secure regions data */ 6578 kfree(chip->secure_regions); 6579 6580 /* Free bad block table memory */ 6581 kfree(chip->bbt); 6582 kfree(chip->data_buf); 6583 kfree(chip->ecc.code_buf); 6584 kfree(chip->ecc.calc_buf); 6585 6586 /* Free bad block descriptor memory */ 6587 if (chip->badblock_pattern && chip->badblock_pattern->options 6588 & NAND_BBT_DYNAMICSTRUCT) 6589 kfree(chip->badblock_pattern); 6590 6591 /* Free the data interface */ 6592 kfree(chip->best_interface_config); 6593 6594 /* Free manufacturer priv data. */ 6595 nand_manufacturer_cleanup(chip); 6596 6597 /* Free controller specific allocations after chip identification */ 6598 nand_detach(chip); 6599 6600 /* Free identification phase allocations */ 6601 nand_scan_ident_cleanup(chip); 6602 } 6603 6604 EXPORT_SYMBOL_GPL(nand_cleanup); 6605 6606 MODULE_LICENSE("GPL"); 6607 MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>"); 6608 MODULE_AUTHOR("Thomas Gleixner <tglx@kernel.org>"); 6609 MODULE_DESCRIPTION("Generic NAND flash driver code"); 6610