1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2016-2017 Micron Technology, Inc. 4 * 5 * Authors: 6 * Peter Pan <peterpandong@micron.com> 7 * Boris Brezillon <boris.brezillon@bootlin.com> 8 */ 9 10 #define pr_fmt(fmt) "spi-nand: " fmt 11 12 #include <linux/device.h> 13 #include <linux/jiffies.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/mtd/spinand.h> 17 #include <linux/of.h> 18 #include <linux/slab.h> 19 #include <linux/string.h> 20 #include <linux/spi/spi.h> 21 #include <linux/spi/spi-mem.h> 22 23 int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val) 24 { 25 struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(reg, 26 spinand->scratchbuf); 27 int ret; 28 29 ret = spi_mem_exec_op(spinand->spimem, &op); 30 if (ret) 31 return ret; 32 33 *val = *spinand->scratchbuf; 34 return 0; 35 } 36 37 int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val) 38 { 39 struct spi_mem_op op = SPINAND_SET_FEATURE_1S_1S_1S_OP(reg, 40 spinand->scratchbuf); 41 42 *spinand->scratchbuf = val; 43 return spi_mem_exec_op(spinand->spimem, &op); 44 } 45 46 static int spinand_read_status(struct spinand_device *spinand, u8 *status) 47 { 48 return spinand_read_reg_op(spinand, REG_STATUS, status); 49 } 50 51 static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg) 52 { 53 struct nand_device *nand = spinand_to_nand(spinand); 54 55 if (WARN_ON(spinand->cur_target < 0 || 56 spinand->cur_target >= nand->memorg.ntargets)) 57 return -EINVAL; 58 59 *cfg = spinand->cfg_cache[spinand->cur_target]; 60 return 0; 61 } 62 63 static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg) 64 { 65 struct nand_device *nand = spinand_to_nand(spinand); 66 int ret; 67 68 if (WARN_ON(spinand->cur_target < 0 || 69 spinand->cur_target >= nand->memorg.ntargets)) 70 return -EINVAL; 71 72 if (spinand->cfg_cache[spinand->cur_target] == cfg) 73 return 0; 74 75 ret = spinand_write_reg_op(spinand, REG_CFG, cfg); 76 if (ret) 77 return ret; 78 79 spinand->cfg_cache[spinand->cur_target] = cfg; 80 return 0; 81 } 82 83 /** 84 * spinand_upd_cfg() - Update the configuration register 85 * @spinand: the spinand device 86 * @mask: the mask encoding the bits to update in the config reg 87 * @val: the new value to apply 88 * 89 * Update the configuration register. 90 * 91 * Return: 0 on success, a negative error code otherwise. 92 */ 93 int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val) 94 { 95 int ret; 96 u8 cfg; 97 98 ret = spinand_get_cfg(spinand, &cfg); 99 if (ret) 100 return ret; 101 102 cfg &= ~mask; 103 cfg |= val; 104 105 return spinand_set_cfg(spinand, cfg); 106 } 107 108 /** 109 * spinand_select_target() - Select a specific NAND target/die 110 * @spinand: the spinand device 111 * @target: the target/die to select 112 * 113 * Select a new target/die. If chip only has one die, this function is a NOOP. 114 * 115 * Return: 0 on success, a negative error code otherwise. 116 */ 117 int spinand_select_target(struct spinand_device *spinand, unsigned int target) 118 { 119 struct nand_device *nand = spinand_to_nand(spinand); 120 int ret; 121 122 if (WARN_ON(target >= nand->memorg.ntargets)) 123 return -EINVAL; 124 125 if (spinand->cur_target == target) 126 return 0; 127 128 if (nand->memorg.ntargets == 1) { 129 spinand->cur_target = target; 130 return 0; 131 } 132 133 ret = spinand->select_target(spinand, target); 134 if (ret) 135 return ret; 136 137 spinand->cur_target = target; 138 return 0; 139 } 140 141 static int spinand_read_cfg(struct spinand_device *spinand) 142 { 143 struct nand_device *nand = spinand_to_nand(spinand); 144 unsigned int target; 145 int ret; 146 147 for (target = 0; target < nand->memorg.ntargets; target++) { 148 ret = spinand_select_target(spinand, target); 149 if (ret) 150 return ret; 151 152 /* 153 * We use spinand_read_reg_op() instead of spinand_get_cfg() 154 * here to bypass the config cache. 155 */ 156 ret = spinand_read_reg_op(spinand, REG_CFG, 157 &spinand->cfg_cache[target]); 158 if (ret) 159 return ret; 160 } 161 162 return 0; 163 } 164 165 static int spinand_init_cfg_cache(struct spinand_device *spinand) 166 { 167 struct nand_device *nand = spinand_to_nand(spinand); 168 struct device *dev = &spinand->spimem->spi->dev; 169 170 spinand->cfg_cache = devm_kcalloc(dev, 171 nand->memorg.ntargets, 172 sizeof(*spinand->cfg_cache), 173 GFP_KERNEL); 174 if (!spinand->cfg_cache) 175 return -ENOMEM; 176 177 return 0; 178 } 179 180 static int spinand_init_quad_enable(struct spinand_device *spinand) 181 { 182 bool enable = false; 183 184 if (!(spinand->flags & SPINAND_HAS_QE_BIT)) 185 return 0; 186 187 if (spinand->op_templates.read_cache->data.buswidth == 4 || 188 spinand->op_templates.write_cache->data.buswidth == 4 || 189 spinand->op_templates.update_cache->data.buswidth == 4) 190 enable = true; 191 192 return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE, 193 enable ? CFG_QUAD_ENABLE : 0); 194 } 195 196 static int spinand_ecc_enable(struct spinand_device *spinand, 197 bool enable) 198 { 199 return spinand_upd_cfg(spinand, CFG_ECC_ENABLE, 200 enable ? CFG_ECC_ENABLE : 0); 201 } 202 203 static int spinand_cont_read_enable(struct spinand_device *spinand, 204 bool enable) 205 { 206 return spinand->set_cont_read(spinand, enable); 207 } 208 209 static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status) 210 { 211 struct nand_device *nand = spinand_to_nand(spinand); 212 213 if (spinand->eccinfo.get_status) 214 return spinand->eccinfo.get_status(spinand, status); 215 216 switch (status & STATUS_ECC_MASK) { 217 case STATUS_ECC_NO_BITFLIPS: 218 return 0; 219 220 case STATUS_ECC_HAS_BITFLIPS: 221 /* 222 * We have no way to know exactly how many bitflips have been 223 * fixed, so let's return the maximum possible value so that 224 * wear-leveling layers move the data immediately. 225 */ 226 return nanddev_get_ecc_conf(nand)->strength; 227 228 case STATUS_ECC_UNCOR_ERROR: 229 return -EBADMSG; 230 231 default: 232 break; 233 } 234 235 return -EINVAL; 236 } 237 238 static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section, 239 struct mtd_oob_region *region) 240 { 241 return -ERANGE; 242 } 243 244 static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section, 245 struct mtd_oob_region *region) 246 { 247 if (section) 248 return -ERANGE; 249 250 /* Reserve 2 bytes for the BBM. */ 251 region->offset = 2; 252 region->length = 62; 253 254 return 0; 255 } 256 257 static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = { 258 .ecc = spinand_noecc_ooblayout_ecc, 259 .free = spinand_noecc_ooblayout_free, 260 }; 261 262 static int spinand_ondie_ecc_init_ctx(struct nand_device *nand) 263 { 264 struct spinand_device *spinand = nand_to_spinand(nand); 265 struct mtd_info *mtd = nanddev_to_mtd(nand); 266 struct spinand_ondie_ecc_conf *engine_conf; 267 268 nand->ecc.ctx.conf.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE; 269 nand->ecc.ctx.conf.step_size = nand->ecc.requirements.step_size; 270 nand->ecc.ctx.conf.strength = nand->ecc.requirements.strength; 271 272 engine_conf = kzalloc(sizeof(*engine_conf), GFP_KERNEL); 273 if (!engine_conf) 274 return -ENOMEM; 275 276 nand->ecc.ctx.priv = engine_conf; 277 278 if (spinand->eccinfo.ooblayout) 279 mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout); 280 else 281 mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout); 282 283 return 0; 284 } 285 286 static void spinand_ondie_ecc_cleanup_ctx(struct nand_device *nand) 287 { 288 kfree(nand->ecc.ctx.priv); 289 } 290 291 static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand, 292 struct nand_page_io_req *req) 293 { 294 struct spinand_device *spinand = nand_to_spinand(nand); 295 bool enable = (req->mode != MTD_OPS_RAW); 296 297 if (!enable && spinand->flags & SPINAND_NO_RAW_ACCESS) 298 return -EOPNOTSUPP; 299 300 memset(spinand->oobbuf, 0xff, nanddev_per_page_oobsize(nand)); 301 302 /* Only enable or disable the engine */ 303 return spinand_ecc_enable(spinand, enable); 304 } 305 306 static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand, 307 struct nand_page_io_req *req) 308 { 309 struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv; 310 struct spinand_device *spinand = nand_to_spinand(nand); 311 struct mtd_info *mtd = spinand_to_mtd(spinand); 312 int ret; 313 314 if (req->mode == MTD_OPS_RAW) 315 return 0; 316 317 /* Nothing to do when finishing a page write */ 318 if (req->type == NAND_PAGE_WRITE) 319 return 0; 320 321 /* Finish a page read: check the status, report errors/bitflips */ 322 ret = spinand_check_ecc_status(spinand, engine_conf->status); 323 if (ret == -EBADMSG) { 324 mtd->ecc_stats.failed++; 325 } else if (ret > 0) { 326 unsigned int pages; 327 328 /* 329 * Continuous reads don't allow us to get the detail, 330 * so we may exagerate the actual number of corrected bitflips. 331 */ 332 if (!req->continuous) 333 pages = 1; 334 else 335 pages = req->datalen / nanddev_page_size(nand); 336 337 mtd->ecc_stats.corrected += ret * pages; 338 } 339 340 return ret; 341 } 342 343 static const struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = { 344 .init_ctx = spinand_ondie_ecc_init_ctx, 345 .cleanup_ctx = spinand_ondie_ecc_cleanup_ctx, 346 .prepare_io_req = spinand_ondie_ecc_prepare_io_req, 347 .finish_io_req = spinand_ondie_ecc_finish_io_req, 348 }; 349 350 static struct nand_ecc_engine spinand_ondie_ecc_engine = { 351 .ops = &spinand_ondie_ecc_engine_ops, 352 }; 353 354 static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status) 355 { 356 struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv; 357 358 if (nand->ecc.ctx.conf.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE && 359 engine_conf) 360 engine_conf->status = status; 361 } 362 363 int spinand_write_enable_op(struct spinand_device *spinand) 364 { 365 struct spi_mem_op op = SPINAND_WR_EN_DIS_1S_0_0_OP(true); 366 367 return spi_mem_exec_op(spinand->spimem, &op); 368 } 369 370 static int spinand_load_page_op(struct spinand_device *spinand, 371 const struct nand_page_io_req *req) 372 { 373 struct nand_device *nand = spinand_to_nand(spinand); 374 unsigned int row = nanddev_pos_to_row(nand, &req->pos); 375 struct spi_mem_op op = SPINAND_PAGE_READ_1S_1S_0_OP(row); 376 377 return spi_mem_exec_op(spinand->spimem, &op); 378 } 379 380 static int spinand_read_from_cache_op(struct spinand_device *spinand, 381 const struct nand_page_io_req *req) 382 { 383 struct nand_device *nand = spinand_to_nand(spinand); 384 struct mtd_info *mtd = spinand_to_mtd(spinand); 385 struct spi_mem_dirmap_desc *rdesc; 386 unsigned int nbytes = 0; 387 void *buf = NULL; 388 u16 column = 0; 389 ssize_t ret; 390 391 if (req->datalen) { 392 buf = spinand->databuf; 393 if (!req->continuous) 394 nbytes = nanddev_page_size(nand); 395 else 396 nbytes = round_up(req->dataoffs + req->datalen, 397 nanddev_page_size(nand)); 398 column = 0; 399 } 400 401 if (req->ooblen) { 402 nbytes += nanddev_per_page_oobsize(nand); 403 if (!buf) { 404 buf = spinand->oobbuf; 405 column = nanddev_page_size(nand); 406 } 407 } 408 409 if (req->mode == MTD_OPS_RAW) 410 rdesc = spinand->dirmaps[req->pos.plane].rdesc; 411 else 412 rdesc = spinand->dirmaps[req->pos.plane].rdesc_ecc; 413 414 if (spinand->flags & SPINAND_HAS_READ_PLANE_SELECT_BIT) 415 column |= req->pos.plane << fls(nanddev_page_size(nand)); 416 417 while (nbytes) { 418 ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf); 419 if (ret < 0) 420 return ret; 421 422 if (!ret || ret > nbytes) 423 return -EIO; 424 425 nbytes -= ret; 426 column += ret; 427 buf += ret; 428 429 /* 430 * Dirmap accesses are allowed to toggle the CS. 431 * Toggling the CS during a continuous read is forbidden. 432 */ 433 if (nbytes && req->continuous) 434 return -EIO; 435 } 436 437 if (req->datalen) 438 memcpy(req->databuf.in, spinand->databuf + req->dataoffs, 439 req->datalen); 440 441 if (req->ooblen) { 442 if (req->mode == MTD_OPS_AUTO_OOB) 443 mtd_ooblayout_get_databytes(mtd, req->oobbuf.in, 444 spinand->oobbuf, 445 req->ooboffs, 446 req->ooblen); 447 else 448 memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs, 449 req->ooblen); 450 } 451 452 return 0; 453 } 454 455 static int spinand_write_to_cache_op(struct spinand_device *spinand, 456 const struct nand_page_io_req *req) 457 { 458 struct nand_device *nand = spinand_to_nand(spinand); 459 struct mtd_info *mtd = spinand_to_mtd(spinand); 460 struct spi_mem_dirmap_desc *wdesc; 461 unsigned int nbytes, column = 0; 462 void *buf = spinand->databuf; 463 ssize_t ret; 464 465 /* 466 * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset 467 * the cache content to 0xFF (depends on vendor implementation), so we 468 * must fill the page cache entirely even if we only want to program 469 * the data portion of the page, otherwise we might corrupt the BBM or 470 * user data previously programmed in OOB area. 471 * 472 * Only reset the data buffer manually, the OOB buffer is prepared by 473 * ECC engines ->prepare_io_req() callback. 474 */ 475 nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand); 476 memset(spinand->databuf, 0xff, nanddev_page_size(nand)); 477 478 if (req->datalen) 479 memcpy(spinand->databuf + req->dataoffs, req->databuf.out, 480 req->datalen); 481 482 if (req->ooblen) { 483 if (req->mode == MTD_OPS_AUTO_OOB) 484 mtd_ooblayout_set_databytes(mtd, req->oobbuf.out, 485 spinand->oobbuf, 486 req->ooboffs, 487 req->ooblen); 488 else 489 memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out, 490 req->ooblen); 491 } 492 493 if (req->mode == MTD_OPS_RAW) 494 wdesc = spinand->dirmaps[req->pos.plane].wdesc; 495 else 496 wdesc = spinand->dirmaps[req->pos.plane].wdesc_ecc; 497 498 if (spinand->flags & SPINAND_HAS_PROG_PLANE_SELECT_BIT) 499 column |= req->pos.plane << fls(nanddev_page_size(nand)); 500 501 while (nbytes) { 502 ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf); 503 if (ret < 0) 504 return ret; 505 506 if (!ret || ret > nbytes) 507 return -EIO; 508 509 nbytes -= ret; 510 column += ret; 511 buf += ret; 512 } 513 514 return 0; 515 } 516 517 static int spinand_program_op(struct spinand_device *spinand, 518 const struct nand_page_io_req *req) 519 { 520 struct nand_device *nand = spinand_to_nand(spinand); 521 unsigned int row = nanddev_pos_to_row(nand, &req->pos); 522 struct spi_mem_op op = SPINAND_PROG_EXEC_1S_1S_0_OP(row); 523 524 return spi_mem_exec_op(spinand->spimem, &op); 525 } 526 527 static int spinand_erase_op(struct spinand_device *spinand, 528 const struct nand_pos *pos) 529 { 530 struct nand_device *nand = spinand_to_nand(spinand); 531 unsigned int row = nanddev_pos_to_row(nand, pos); 532 struct spi_mem_op op = SPINAND_BLK_ERASE_1S_1S_0_OP(row); 533 534 return spi_mem_exec_op(spinand->spimem, &op); 535 } 536 537 /** 538 * spinand_wait() - Poll memory device status 539 * @spinand: the spinand device 540 * @initial_delay_us: delay in us before starting to poll 541 * @poll_delay_us: time to sleep between reads in us 542 * @s: the pointer to variable to store the value of REG_STATUS 543 * 544 * This function polls a status register (REG_STATUS) and returns when 545 * the STATUS_READY bit is 0 or when the timeout has expired. 546 * 547 * Return: 0 on success, a negative error code otherwise. 548 */ 549 int spinand_wait(struct spinand_device *spinand, unsigned long initial_delay_us, 550 unsigned long poll_delay_us, u8 *s) 551 { 552 struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(REG_STATUS, 553 spinand->scratchbuf); 554 u8 status; 555 int ret; 556 557 ret = spi_mem_poll_status(spinand->spimem, &op, STATUS_BUSY, 0, 558 initial_delay_us, 559 poll_delay_us, 560 SPINAND_WAITRDY_TIMEOUT_MS); 561 if (ret) 562 return ret; 563 564 status = *spinand->scratchbuf; 565 if (!(status & STATUS_BUSY)) 566 goto out; 567 568 /* 569 * Extra read, just in case the STATUS_READY bit has changed 570 * since our last check 571 */ 572 ret = spinand_read_status(spinand, &status); 573 if (ret) 574 return ret; 575 576 out: 577 if (s) 578 *s = status; 579 580 return status & STATUS_BUSY ? -ETIMEDOUT : 0; 581 } 582 583 static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr, 584 u8 ndummy, u8 *buf) 585 { 586 struct spi_mem_op op = SPINAND_READID_1S_1S_1S_OP( 587 naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN); 588 int ret; 589 590 ret = spi_mem_exec_op(spinand->spimem, &op); 591 if (!ret) 592 memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN); 593 594 return ret; 595 } 596 597 static int spinand_reset_op(struct spinand_device *spinand) 598 { 599 struct spi_mem_op op = SPINAND_RESET_1S_0_0_OP; 600 int ret; 601 602 ret = spi_mem_exec_op(spinand->spimem, &op); 603 if (ret) 604 return ret; 605 606 return spinand_wait(spinand, 607 SPINAND_RESET_INITIAL_DELAY_US, 608 SPINAND_RESET_POLL_DELAY_US, 609 NULL); 610 } 611 612 static int spinand_lock_block(struct spinand_device *spinand, u8 lock) 613 { 614 return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock); 615 } 616 617 /** 618 * spinand_read_page() - Read a page 619 * @spinand: the spinand device 620 * @req: the I/O request 621 * 622 * Return: 0 or a positive number of bitflips corrected on success. 623 * A negative error code otherwise. 624 */ 625 int spinand_read_page(struct spinand_device *spinand, 626 const struct nand_page_io_req *req) 627 { 628 struct nand_device *nand = spinand_to_nand(spinand); 629 u8 status; 630 int ret; 631 632 ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req); 633 if (ret) 634 return ret; 635 636 ret = spinand_load_page_op(spinand, req); 637 if (ret) 638 return ret; 639 640 ret = spinand_wait(spinand, 641 SPINAND_READ_INITIAL_DELAY_US, 642 SPINAND_READ_POLL_DELAY_US, 643 &status); 644 if (ret < 0) 645 return ret; 646 647 spinand_ondie_ecc_save_status(nand, status); 648 649 ret = spinand_read_from_cache_op(spinand, req); 650 if (ret) 651 return ret; 652 653 return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req); 654 } 655 656 /** 657 * spinand_write_page() - Write a page 658 * @spinand: the spinand device 659 * @req: the I/O request 660 * 661 * Return: 0 or a positive number of bitflips corrected on success. 662 * A negative error code otherwise. 663 */ 664 int spinand_write_page(struct spinand_device *spinand, 665 const struct nand_page_io_req *req) 666 { 667 struct nand_device *nand = spinand_to_nand(spinand); 668 u8 status; 669 int ret; 670 671 ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req); 672 if (ret) 673 return ret; 674 675 ret = spinand_write_enable_op(spinand); 676 if (ret) 677 return ret; 678 679 ret = spinand_write_to_cache_op(spinand, req); 680 if (ret) 681 return ret; 682 683 ret = spinand_program_op(spinand, req); 684 if (ret) 685 return ret; 686 687 ret = spinand_wait(spinand, 688 SPINAND_WRITE_INITIAL_DELAY_US, 689 SPINAND_WRITE_POLL_DELAY_US, 690 &status); 691 if (ret) 692 return ret; 693 694 if (status & STATUS_PROG_FAILED) 695 return -EIO; 696 697 return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req); 698 } 699 700 static int spinand_mtd_regular_page_read(struct mtd_info *mtd, loff_t from, 701 struct mtd_oob_ops *ops, 702 unsigned int *max_bitflips) 703 { 704 struct spinand_device *spinand = mtd_to_spinand(mtd); 705 struct nand_device *nand = mtd_to_nanddev(mtd); 706 struct mtd_ecc_stats old_stats; 707 struct nand_io_iter iter; 708 bool disable_ecc = false; 709 bool ecc_failed = false; 710 unsigned int retry_mode = 0; 711 int ret; 712 713 old_stats = mtd->ecc_stats; 714 715 if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout) 716 disable_ecc = true; 717 718 nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) { 719 if (disable_ecc) 720 iter.req.mode = MTD_OPS_RAW; 721 722 ret = spinand_select_target(spinand, iter.req.pos.target); 723 if (ret) 724 break; 725 726 read_retry: 727 ret = spinand_read_page(spinand, &iter.req); 728 if (ret < 0 && ret != -EBADMSG) 729 break; 730 731 if (ret == -EBADMSG && spinand->set_read_retry) { 732 if (spinand->read_retries && (++retry_mode <= spinand->read_retries)) { 733 ret = spinand->set_read_retry(spinand, retry_mode); 734 if (ret < 0) { 735 spinand->set_read_retry(spinand, 0); 736 return ret; 737 } 738 739 /* Reset ecc_stats; retry */ 740 mtd->ecc_stats = old_stats; 741 goto read_retry; 742 } else { 743 /* No more retry modes; real failure */ 744 ecc_failed = true; 745 } 746 } else if (ret == -EBADMSG) { 747 ecc_failed = true; 748 } else { 749 *max_bitflips = max_t(unsigned int, *max_bitflips, ret); 750 } 751 752 ret = 0; 753 ops->retlen += iter.req.datalen; 754 ops->oobretlen += iter.req.ooblen; 755 756 /* Reset to retry mode 0 */ 757 if (retry_mode) { 758 retry_mode = 0; 759 ret = spinand->set_read_retry(spinand, retry_mode); 760 if (ret < 0) 761 return ret; 762 } 763 } 764 765 if (ecc_failed && !ret) 766 ret = -EBADMSG; 767 768 return ret; 769 } 770 771 static int spinand_mtd_continuous_page_read(struct mtd_info *mtd, loff_t from, 772 struct mtd_oob_ops *ops, 773 unsigned int *max_bitflips) 774 { 775 struct spinand_device *spinand = mtd_to_spinand(mtd); 776 struct nand_device *nand = mtd_to_nanddev(mtd); 777 struct nand_io_iter iter; 778 u8 status; 779 int ret; 780 781 ret = spinand_cont_read_enable(spinand, true); 782 if (ret) 783 return ret; 784 785 /* 786 * The cache is divided into two halves. While one half of the cache has 787 * the requested data, the other half is loaded with the next chunk of data. 788 * Therefore, the host can read out the data continuously from page to page. 789 * Each data read must be a multiple of 4-bytes and full pages should be read; 790 * otherwise, the data output might get out of sequence from one read command 791 * to another. 792 */ 793 nanddev_io_for_each_block(nand, NAND_PAGE_READ, from, ops, &iter) { 794 ret = spinand_select_target(spinand, iter.req.pos.target); 795 if (ret) 796 goto end_cont_read; 797 798 ret = nand_ecc_prepare_io_req(nand, &iter.req); 799 if (ret) 800 goto end_cont_read; 801 802 ret = spinand_load_page_op(spinand, &iter.req); 803 if (ret) 804 goto end_cont_read; 805 806 ret = spinand_wait(spinand, SPINAND_READ_INITIAL_DELAY_US, 807 SPINAND_READ_POLL_DELAY_US, NULL); 808 if (ret < 0) 809 goto end_cont_read; 810 811 ret = spinand_read_from_cache_op(spinand, &iter.req); 812 if (ret) 813 goto end_cont_read; 814 815 ops->retlen += iter.req.datalen; 816 817 ret = spinand_read_status(spinand, &status); 818 if (ret) 819 goto end_cont_read; 820 821 spinand_ondie_ecc_save_status(nand, status); 822 823 ret = nand_ecc_finish_io_req(nand, &iter.req); 824 if (ret < 0) 825 goto end_cont_read; 826 827 *max_bitflips = max_t(unsigned int, *max_bitflips, ret); 828 ret = 0; 829 } 830 831 end_cont_read: 832 /* 833 * Once all the data has been read out, the host can either pull CS# 834 * high and wait for tRST or manually clear the bit in the configuration 835 * register to terminate the continuous read operation. We have no 836 * guarantee the SPI controller drivers will effectively deassert the CS 837 * when we expect them to, so take the register based approach. 838 */ 839 spinand_cont_read_enable(spinand, false); 840 841 return ret; 842 } 843 844 static void spinand_cont_read_init(struct spinand_device *spinand) 845 { 846 struct nand_device *nand = spinand_to_nand(spinand); 847 enum nand_ecc_engine_type engine_type = nand->ecc.ctx.conf.engine_type; 848 849 /* OOBs cannot be retrieved so external/on-host ECC engine won't work */ 850 if (spinand->set_cont_read && 851 (engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE || 852 engine_type == NAND_ECC_ENGINE_TYPE_NONE)) { 853 spinand->cont_read_possible = true; 854 } 855 } 856 857 static bool spinand_use_cont_read(struct mtd_info *mtd, loff_t from, 858 struct mtd_oob_ops *ops) 859 { 860 struct nand_device *nand = mtd_to_nanddev(mtd); 861 struct spinand_device *spinand = nand_to_spinand(nand); 862 struct nand_pos start_pos, end_pos; 863 864 if (!spinand->cont_read_possible) 865 return false; 866 867 /* OOBs won't be retrieved */ 868 if (ops->ooblen || ops->oobbuf) 869 return false; 870 871 nanddev_offs_to_pos(nand, from, &start_pos); 872 nanddev_offs_to_pos(nand, from + ops->len - 1, &end_pos); 873 874 /* 875 * Continuous reads never cross LUN boundaries. Some devices don't 876 * support crossing planes boundaries. Some devices don't even support 877 * crossing blocks boundaries. The common case being to read through UBI, 878 * we will very rarely read two consequent blocks or more, so it is safer 879 * and easier (can be improved) to only enable continuous reads when 880 * reading within the same erase block. 881 */ 882 if (start_pos.target != end_pos.target || 883 start_pos.plane != end_pos.plane || 884 start_pos.eraseblock != end_pos.eraseblock) 885 return false; 886 887 return start_pos.page < end_pos.page; 888 } 889 890 static int spinand_mtd_read(struct mtd_info *mtd, loff_t from, 891 struct mtd_oob_ops *ops) 892 { 893 struct spinand_device *spinand = mtd_to_spinand(mtd); 894 struct mtd_ecc_stats old_stats; 895 unsigned int max_bitflips = 0; 896 int ret; 897 898 mutex_lock(&spinand->lock); 899 900 old_stats = mtd->ecc_stats; 901 902 if (spinand_use_cont_read(mtd, from, ops)) 903 ret = spinand_mtd_continuous_page_read(mtd, from, ops, &max_bitflips); 904 else 905 ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips); 906 907 if (ops->stats) { 908 ops->stats->uncorrectable_errors += 909 mtd->ecc_stats.failed - old_stats.failed; 910 ops->stats->corrected_bitflips += 911 mtd->ecc_stats.corrected - old_stats.corrected; 912 } 913 914 mutex_unlock(&spinand->lock); 915 916 return ret ? ret : max_bitflips; 917 } 918 919 static int spinand_mtd_write(struct mtd_info *mtd, loff_t to, 920 struct mtd_oob_ops *ops) 921 { 922 struct spinand_device *spinand = mtd_to_spinand(mtd); 923 struct nand_device *nand = mtd_to_nanddev(mtd); 924 struct nand_io_iter iter; 925 bool disable_ecc = false; 926 int ret = 0; 927 928 if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout) 929 disable_ecc = true; 930 931 mutex_lock(&spinand->lock); 932 933 nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) { 934 if (disable_ecc) 935 iter.req.mode = MTD_OPS_RAW; 936 937 ret = spinand_select_target(spinand, iter.req.pos.target); 938 if (ret) 939 break; 940 941 ret = spinand_write_page(spinand, &iter.req); 942 if (ret) 943 break; 944 945 ops->retlen += iter.req.datalen; 946 ops->oobretlen += iter.req.ooblen; 947 } 948 949 mutex_unlock(&spinand->lock); 950 951 return ret; 952 } 953 954 static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos) 955 { 956 struct spinand_device *spinand = nand_to_spinand(nand); 957 u8 marker[2] = { }; 958 struct nand_page_io_req req = { 959 .pos = *pos, 960 .ooblen = sizeof(marker), 961 .ooboffs = 0, 962 .oobbuf.in = marker, 963 .mode = MTD_OPS_RAW, 964 }; 965 int ret; 966 967 spinand_select_target(spinand, pos->target); 968 969 ret = spinand_read_page(spinand, &req); 970 if (ret == -EOPNOTSUPP) { 971 /* Retry with ECC in case raw access is not supported */ 972 req.mode = MTD_OPS_PLACE_OOB; 973 spinand_read_page(spinand, &req); 974 } 975 976 if (marker[0] != 0xff || marker[1] != 0xff) 977 return true; 978 979 return false; 980 } 981 982 static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs) 983 { 984 struct nand_device *nand = mtd_to_nanddev(mtd); 985 struct spinand_device *spinand = nand_to_spinand(nand); 986 struct nand_pos pos; 987 int ret; 988 989 nanddev_offs_to_pos(nand, offs, &pos); 990 mutex_lock(&spinand->lock); 991 ret = nanddev_isbad(nand, &pos); 992 mutex_unlock(&spinand->lock); 993 994 return ret; 995 } 996 997 static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos) 998 { 999 struct spinand_device *spinand = nand_to_spinand(nand); 1000 u8 marker[2] = { }; 1001 struct nand_page_io_req req = { 1002 .pos = *pos, 1003 .ooboffs = 0, 1004 .ooblen = sizeof(marker), 1005 .oobbuf.out = marker, 1006 .mode = MTD_OPS_RAW, 1007 }; 1008 int ret; 1009 1010 ret = spinand_select_target(spinand, pos->target); 1011 if (ret) 1012 return ret; 1013 1014 ret = spinand_write_page(spinand, &req); 1015 if (ret == -EOPNOTSUPP) { 1016 /* Retry with ECC in case raw access is not supported */ 1017 req.mode = MTD_OPS_PLACE_OOB; 1018 ret = spinand_write_page(spinand, &req); 1019 } 1020 1021 return ret; 1022 } 1023 1024 static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs) 1025 { 1026 struct nand_device *nand = mtd_to_nanddev(mtd); 1027 struct spinand_device *spinand = nand_to_spinand(nand); 1028 struct nand_pos pos; 1029 int ret; 1030 1031 nanddev_offs_to_pos(nand, offs, &pos); 1032 mutex_lock(&spinand->lock); 1033 ret = nanddev_markbad(nand, &pos); 1034 mutex_unlock(&spinand->lock); 1035 1036 return ret; 1037 } 1038 1039 static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos) 1040 { 1041 struct spinand_device *spinand = nand_to_spinand(nand); 1042 u8 status; 1043 int ret; 1044 1045 ret = spinand_select_target(spinand, pos->target); 1046 if (ret) 1047 return ret; 1048 1049 ret = spinand_write_enable_op(spinand); 1050 if (ret) 1051 return ret; 1052 1053 ret = spinand_erase_op(spinand, pos); 1054 if (ret) 1055 return ret; 1056 1057 ret = spinand_wait(spinand, 1058 SPINAND_ERASE_INITIAL_DELAY_US, 1059 SPINAND_ERASE_POLL_DELAY_US, 1060 &status); 1061 1062 if (!ret && (status & STATUS_ERASE_FAILED)) 1063 ret = -EIO; 1064 1065 return ret; 1066 } 1067 1068 static int spinand_mtd_erase(struct mtd_info *mtd, 1069 struct erase_info *einfo) 1070 { 1071 struct spinand_device *spinand = mtd_to_spinand(mtd); 1072 int ret; 1073 1074 mutex_lock(&spinand->lock); 1075 ret = nanddev_mtd_erase(mtd, einfo); 1076 mutex_unlock(&spinand->lock); 1077 1078 return ret; 1079 } 1080 1081 static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs) 1082 { 1083 struct spinand_device *spinand = mtd_to_spinand(mtd); 1084 struct nand_device *nand = mtd_to_nanddev(mtd); 1085 struct nand_pos pos; 1086 int ret; 1087 1088 nanddev_offs_to_pos(nand, offs, &pos); 1089 mutex_lock(&spinand->lock); 1090 ret = nanddev_isreserved(nand, &pos); 1091 mutex_unlock(&spinand->lock); 1092 1093 return ret; 1094 } 1095 1096 static int spinand_create_dirmap(struct spinand_device *spinand, 1097 unsigned int plane) 1098 { 1099 struct nand_device *nand = spinand_to_nand(spinand); 1100 struct spi_mem_dirmap_info info = { 1101 .length = nanddev_page_size(nand) + 1102 nanddev_per_page_oobsize(nand), 1103 }; 1104 struct spi_mem_dirmap_desc *desc; 1105 1106 if (spinand->cont_read_possible) 1107 info.length = nanddev_eraseblock_size(nand); 1108 1109 /* The plane number is passed in MSB just above the column address */ 1110 info.offset = plane << fls(nand->memorg.pagesize); 1111 1112 info.op_tmpl = *spinand->op_templates.update_cache; 1113 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, 1114 spinand->spimem, &info); 1115 if (IS_ERR(desc)) 1116 return PTR_ERR(desc); 1117 1118 spinand->dirmaps[plane].wdesc = desc; 1119 1120 info.op_tmpl = *spinand->op_templates.read_cache; 1121 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, 1122 spinand->spimem, &info); 1123 if (IS_ERR(desc)) 1124 return PTR_ERR(desc); 1125 1126 spinand->dirmaps[plane].rdesc = desc; 1127 1128 if (nand->ecc.engine->integration != NAND_ECC_ENGINE_INTEGRATION_PIPELINED) { 1129 spinand->dirmaps[plane].wdesc_ecc = spinand->dirmaps[plane].wdesc; 1130 spinand->dirmaps[plane].rdesc_ecc = spinand->dirmaps[plane].rdesc; 1131 1132 return 0; 1133 } 1134 1135 info.op_tmpl = *spinand->op_templates.update_cache; 1136 info.op_tmpl.data.ecc = true; 1137 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, 1138 spinand->spimem, &info); 1139 if (IS_ERR(desc)) 1140 return PTR_ERR(desc); 1141 1142 spinand->dirmaps[plane].wdesc_ecc = desc; 1143 1144 info.op_tmpl = *spinand->op_templates.read_cache; 1145 info.op_tmpl.data.ecc = true; 1146 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, 1147 spinand->spimem, &info); 1148 if (IS_ERR(desc)) 1149 return PTR_ERR(desc); 1150 1151 spinand->dirmaps[plane].rdesc_ecc = desc; 1152 1153 return 0; 1154 } 1155 1156 static int spinand_create_dirmaps(struct spinand_device *spinand) 1157 { 1158 struct nand_device *nand = spinand_to_nand(spinand); 1159 int i, ret; 1160 1161 spinand->dirmaps = devm_kzalloc(&spinand->spimem->spi->dev, 1162 sizeof(*spinand->dirmaps) * 1163 nand->memorg.planes_per_lun, 1164 GFP_KERNEL); 1165 if (!spinand->dirmaps) 1166 return -ENOMEM; 1167 1168 for (i = 0; i < nand->memorg.planes_per_lun; i++) { 1169 ret = spinand_create_dirmap(spinand, i); 1170 if (ret) 1171 return ret; 1172 } 1173 1174 return 0; 1175 } 1176 1177 static const struct nand_ops spinand_ops = { 1178 .erase = spinand_erase, 1179 .markbad = spinand_markbad, 1180 .isbad = spinand_isbad, 1181 }; 1182 1183 static const struct spinand_manufacturer *spinand_manufacturers[] = { 1184 &alliancememory_spinand_manufacturer, 1185 &ato_spinand_manufacturer, 1186 &esmt_c8_spinand_manufacturer, 1187 &foresee_spinand_manufacturer, 1188 &gigadevice_spinand_manufacturer, 1189 ¯onix_spinand_manufacturer, 1190 µn_spinand_manufacturer, 1191 ¶gon_spinand_manufacturer, 1192 &skyhigh_spinand_manufacturer, 1193 &toshiba_spinand_manufacturer, 1194 &winbond_spinand_manufacturer, 1195 &xtx_spinand_manufacturer, 1196 }; 1197 1198 static int spinand_manufacturer_match(struct spinand_device *spinand, 1199 enum spinand_readid_method rdid_method) 1200 { 1201 u8 *id = spinand->id.data; 1202 unsigned int i; 1203 int ret; 1204 1205 for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) { 1206 const struct spinand_manufacturer *manufacturer = 1207 spinand_manufacturers[i]; 1208 1209 if (id[0] != manufacturer->id) 1210 continue; 1211 1212 ret = spinand_match_and_init(spinand, 1213 manufacturer->chips, 1214 manufacturer->nchips, 1215 rdid_method); 1216 if (ret < 0) 1217 continue; 1218 1219 spinand->manufacturer = manufacturer; 1220 return 0; 1221 } 1222 return -EOPNOTSUPP; 1223 } 1224 1225 static int spinand_id_detect(struct spinand_device *spinand) 1226 { 1227 u8 *id = spinand->id.data; 1228 int ret; 1229 1230 ret = spinand_read_id_op(spinand, 0, 0, id); 1231 if (ret) 1232 return ret; 1233 ret = spinand_manufacturer_match(spinand, SPINAND_READID_METHOD_OPCODE); 1234 if (!ret) 1235 return 0; 1236 1237 ret = spinand_read_id_op(spinand, 1, 0, id); 1238 if (ret) 1239 return ret; 1240 ret = spinand_manufacturer_match(spinand, 1241 SPINAND_READID_METHOD_OPCODE_ADDR); 1242 if (!ret) 1243 return 0; 1244 1245 ret = spinand_read_id_op(spinand, 0, 1, id); 1246 if (ret) 1247 return ret; 1248 ret = spinand_manufacturer_match(spinand, 1249 SPINAND_READID_METHOD_OPCODE_DUMMY); 1250 1251 return ret; 1252 } 1253 1254 static int spinand_manufacturer_init(struct spinand_device *spinand) 1255 { 1256 int ret; 1257 1258 if (spinand->manufacturer->ops->init) { 1259 ret = spinand->manufacturer->ops->init(spinand); 1260 if (ret) 1261 return ret; 1262 } 1263 1264 if (spinand->configure_chip) { 1265 ret = spinand->configure_chip(spinand); 1266 if (ret) 1267 return ret; 1268 } 1269 1270 return 0; 1271 } 1272 1273 static void spinand_manufacturer_cleanup(struct spinand_device *spinand) 1274 { 1275 /* Release manufacturer private data */ 1276 if (spinand->manufacturer->ops->cleanup) 1277 return spinand->manufacturer->ops->cleanup(spinand); 1278 } 1279 1280 static const struct spi_mem_op * 1281 spinand_select_op_variant(struct spinand_device *spinand, 1282 const struct spinand_op_variants *variants) 1283 { 1284 struct nand_device *nand = spinand_to_nand(spinand); 1285 const struct spi_mem_op *best_variant = NULL; 1286 u64 best_op_duration_ns = ULLONG_MAX; 1287 unsigned int i; 1288 1289 for (i = 0; i < variants->nops; i++) { 1290 struct spi_mem_op op = variants->ops[i]; 1291 u64 op_duration_ns = 0; 1292 unsigned int nbytes; 1293 int ret; 1294 1295 nbytes = nanddev_per_page_oobsize(nand) + 1296 nanddev_page_size(nand); 1297 1298 while (nbytes) { 1299 op.data.nbytes = nbytes; 1300 ret = spi_mem_adjust_op_size(spinand->spimem, &op); 1301 if (ret) 1302 break; 1303 1304 spi_mem_adjust_op_freq(spinand->spimem, &op); 1305 1306 if (!spi_mem_supports_op(spinand->spimem, &op)) 1307 break; 1308 1309 nbytes -= op.data.nbytes; 1310 1311 op_duration_ns += spi_mem_calc_op_duration(spinand->spimem, &op); 1312 } 1313 1314 if (!nbytes && op_duration_ns < best_op_duration_ns) { 1315 best_op_duration_ns = op_duration_ns; 1316 best_variant = &variants->ops[i]; 1317 } 1318 } 1319 1320 return best_variant; 1321 } 1322 1323 /** 1324 * spinand_match_and_init() - Try to find a match between a device ID and an 1325 * entry in a spinand_info table 1326 * @spinand: SPI NAND object 1327 * @table: SPI NAND device description table 1328 * @table_size: size of the device description table 1329 * @rdid_method: read id method to match 1330 * 1331 * Match between a device ID retrieved through the READ_ID command and an 1332 * entry in the SPI NAND description table. If a match is found, the spinand 1333 * object will be initialized with information provided by the matching 1334 * spinand_info entry. 1335 * 1336 * Return: 0 on success, a negative error code otherwise. 1337 */ 1338 int spinand_match_and_init(struct spinand_device *spinand, 1339 const struct spinand_info *table, 1340 unsigned int table_size, 1341 enum spinand_readid_method rdid_method) 1342 { 1343 u8 *id = spinand->id.data; 1344 struct nand_device *nand = spinand_to_nand(spinand); 1345 unsigned int i; 1346 1347 for (i = 0; i < table_size; i++) { 1348 const struct spinand_info *info = &table[i]; 1349 const struct spi_mem_op *op; 1350 1351 if (rdid_method != info->devid.method) 1352 continue; 1353 1354 if (memcmp(id + 1, info->devid.id, info->devid.len)) 1355 continue; 1356 1357 nand->memorg = table[i].memorg; 1358 nanddev_set_ecc_requirements(nand, &table[i].eccreq); 1359 spinand->eccinfo = table[i].eccinfo; 1360 spinand->flags = table[i].flags; 1361 spinand->id.len = 1 + table[i].devid.len; 1362 spinand->select_target = table[i].select_target; 1363 spinand->configure_chip = table[i].configure_chip; 1364 spinand->set_cont_read = table[i].set_cont_read; 1365 spinand->fact_otp = &table[i].fact_otp; 1366 spinand->user_otp = &table[i].user_otp; 1367 spinand->read_retries = table[i].read_retries; 1368 spinand->set_read_retry = table[i].set_read_retry; 1369 1370 op = spinand_select_op_variant(spinand, 1371 info->op_variants.read_cache); 1372 if (!op) 1373 return -ENOTSUPP; 1374 1375 spinand->op_templates.read_cache = op; 1376 1377 op = spinand_select_op_variant(spinand, 1378 info->op_variants.write_cache); 1379 if (!op) 1380 return -ENOTSUPP; 1381 1382 spinand->op_templates.write_cache = op; 1383 1384 op = spinand_select_op_variant(spinand, 1385 info->op_variants.update_cache); 1386 spinand->op_templates.update_cache = op; 1387 1388 return 0; 1389 } 1390 1391 return -ENOTSUPP; 1392 } 1393 1394 static int spinand_detect(struct spinand_device *spinand) 1395 { 1396 struct device *dev = &spinand->spimem->spi->dev; 1397 struct nand_device *nand = spinand_to_nand(spinand); 1398 int ret; 1399 1400 ret = spinand_reset_op(spinand); 1401 if (ret) 1402 return ret; 1403 1404 ret = spinand_id_detect(spinand); 1405 if (ret) { 1406 dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN, 1407 spinand->id.data); 1408 return ret; 1409 } 1410 1411 if (nand->memorg.ntargets > 1 && !spinand->select_target) { 1412 dev_err(dev, 1413 "SPI NANDs with more than one die must implement ->select_target()\n"); 1414 return -EINVAL; 1415 } 1416 1417 dev_info(&spinand->spimem->spi->dev, 1418 "%s SPI NAND was found.\n", spinand->manufacturer->name); 1419 dev_info(&spinand->spimem->spi->dev, 1420 "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n", 1421 nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10, 1422 nanddev_page_size(nand), nanddev_per_page_oobsize(nand)); 1423 1424 return 0; 1425 } 1426 1427 static int spinand_init_flash(struct spinand_device *spinand) 1428 { 1429 struct device *dev = &spinand->spimem->spi->dev; 1430 struct nand_device *nand = spinand_to_nand(spinand); 1431 int ret, i; 1432 1433 ret = spinand_read_cfg(spinand); 1434 if (ret) 1435 return ret; 1436 1437 ret = spinand_init_quad_enable(spinand); 1438 if (ret) 1439 return ret; 1440 1441 ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0); 1442 if (ret) 1443 return ret; 1444 1445 ret = spinand_manufacturer_init(spinand); 1446 if (ret) { 1447 dev_err(dev, 1448 "Failed to initialize the SPI NAND chip (err = %d)\n", 1449 ret); 1450 return ret; 1451 } 1452 1453 /* After power up, all blocks are locked, so unlock them here. */ 1454 for (i = 0; i < nand->memorg.ntargets; i++) { 1455 ret = spinand_select_target(spinand, i); 1456 if (ret) 1457 break; 1458 1459 ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED); 1460 if (ret) 1461 break; 1462 } 1463 1464 if (ret) 1465 spinand_manufacturer_cleanup(spinand); 1466 1467 return ret; 1468 } 1469 1470 static void spinand_mtd_resume(struct mtd_info *mtd) 1471 { 1472 struct spinand_device *spinand = mtd_to_spinand(mtd); 1473 int ret; 1474 1475 ret = spinand_reset_op(spinand); 1476 if (ret) 1477 return; 1478 1479 ret = spinand_init_flash(spinand); 1480 if (ret) 1481 return; 1482 1483 spinand_ecc_enable(spinand, false); 1484 } 1485 1486 static int spinand_init(struct spinand_device *spinand) 1487 { 1488 struct device *dev = &spinand->spimem->spi->dev; 1489 struct mtd_info *mtd = spinand_to_mtd(spinand); 1490 struct nand_device *nand = mtd_to_nanddev(mtd); 1491 int ret; 1492 1493 /* 1494 * We need a scratch buffer because the spi_mem interface requires that 1495 * buf passed in spi_mem_op->data.buf be DMA-able. 1496 */ 1497 spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL); 1498 if (!spinand->scratchbuf) 1499 return -ENOMEM; 1500 1501 ret = spinand_detect(spinand); 1502 if (ret) 1503 goto err_free_bufs; 1504 1505 /* 1506 * Use kzalloc() instead of devm_kzalloc() here, because some drivers 1507 * may use this buffer for DMA access. 1508 * Memory allocated by devm_ does not guarantee DMA-safe alignment. 1509 */ 1510 spinand->databuf = kzalloc(nanddev_eraseblock_size(nand), 1511 GFP_KERNEL); 1512 if (!spinand->databuf) { 1513 ret = -ENOMEM; 1514 goto err_free_bufs; 1515 } 1516 1517 spinand->oobbuf = spinand->databuf + nanddev_page_size(nand); 1518 1519 ret = spinand_init_cfg_cache(spinand); 1520 if (ret) 1521 goto err_free_bufs; 1522 1523 ret = spinand_init_flash(spinand); 1524 if (ret) 1525 goto err_free_bufs; 1526 1527 ret = nanddev_init(nand, &spinand_ops, THIS_MODULE); 1528 if (ret) 1529 goto err_manuf_cleanup; 1530 1531 /* SPI-NAND default ECC engine is on-die */ 1532 nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE; 1533 nand->ecc.ondie_engine = &spinand_ondie_ecc_engine; 1534 1535 spinand_ecc_enable(spinand, false); 1536 ret = nanddev_ecc_engine_init(nand); 1537 if (ret) 1538 goto err_cleanup_nanddev; 1539 1540 /* 1541 * Continuous read can only be enabled with an on-die ECC engine, so the 1542 * ECC initialization must have happened previously. 1543 */ 1544 spinand_cont_read_init(spinand); 1545 1546 mtd->_read_oob = spinand_mtd_read; 1547 mtd->_write_oob = spinand_mtd_write; 1548 mtd->_block_isbad = spinand_mtd_block_isbad; 1549 mtd->_block_markbad = spinand_mtd_block_markbad; 1550 mtd->_block_isreserved = spinand_mtd_block_isreserved; 1551 mtd->_erase = spinand_mtd_erase; 1552 mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks; 1553 mtd->_resume = spinand_mtd_resume; 1554 1555 if (spinand_user_otp_size(spinand) || spinand_fact_otp_size(spinand)) { 1556 ret = spinand_set_mtd_otp_ops(spinand); 1557 if (ret) 1558 goto err_cleanup_ecc_engine; 1559 } 1560 1561 if (nand->ecc.engine) { 1562 ret = mtd_ooblayout_count_freebytes(mtd); 1563 if (ret < 0) 1564 goto err_cleanup_ecc_engine; 1565 } 1566 1567 mtd->oobavail = ret; 1568 1569 /* Propagate ECC information to mtd_info */ 1570 mtd->ecc_strength = nanddev_get_ecc_conf(nand)->strength; 1571 mtd->ecc_step_size = nanddev_get_ecc_conf(nand)->step_size; 1572 mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4); 1573 1574 ret = spinand_create_dirmaps(spinand); 1575 if (ret) { 1576 dev_err(dev, 1577 "Failed to create direct mappings for read/write operations (err = %d)\n", 1578 ret); 1579 goto err_cleanup_ecc_engine; 1580 } 1581 1582 return 0; 1583 1584 err_cleanup_ecc_engine: 1585 nanddev_ecc_engine_cleanup(nand); 1586 1587 err_cleanup_nanddev: 1588 nanddev_cleanup(nand); 1589 1590 err_manuf_cleanup: 1591 spinand_manufacturer_cleanup(spinand); 1592 1593 err_free_bufs: 1594 kfree(spinand->databuf); 1595 kfree(spinand->scratchbuf); 1596 return ret; 1597 } 1598 1599 static void spinand_cleanup(struct spinand_device *spinand) 1600 { 1601 struct nand_device *nand = spinand_to_nand(spinand); 1602 1603 nanddev_ecc_engine_cleanup(nand); 1604 nanddev_cleanup(nand); 1605 spinand_manufacturer_cleanup(spinand); 1606 kfree(spinand->databuf); 1607 kfree(spinand->scratchbuf); 1608 } 1609 1610 static int spinand_probe(struct spi_mem *mem) 1611 { 1612 struct spinand_device *spinand; 1613 struct mtd_info *mtd; 1614 int ret; 1615 1616 spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand), 1617 GFP_KERNEL); 1618 if (!spinand) 1619 return -ENOMEM; 1620 1621 spinand->spimem = mem; 1622 spi_mem_set_drvdata(mem, spinand); 1623 spinand_set_of_node(spinand, mem->spi->dev.of_node); 1624 mutex_init(&spinand->lock); 1625 mtd = spinand_to_mtd(spinand); 1626 mtd->dev.parent = &mem->spi->dev; 1627 1628 ret = spinand_init(spinand); 1629 if (ret) 1630 return ret; 1631 1632 ret = mtd_device_register(mtd, NULL, 0); 1633 if (ret) 1634 goto err_spinand_cleanup; 1635 1636 return 0; 1637 1638 err_spinand_cleanup: 1639 spinand_cleanup(spinand); 1640 1641 return ret; 1642 } 1643 1644 static int spinand_remove(struct spi_mem *mem) 1645 { 1646 struct spinand_device *spinand; 1647 struct mtd_info *mtd; 1648 int ret; 1649 1650 spinand = spi_mem_get_drvdata(mem); 1651 mtd = spinand_to_mtd(spinand); 1652 1653 ret = mtd_device_unregister(mtd); 1654 if (ret) 1655 return ret; 1656 1657 spinand_cleanup(spinand); 1658 1659 return 0; 1660 } 1661 1662 static const struct spi_device_id spinand_ids[] = { 1663 { .name = "spi-nand" }, 1664 { /* sentinel */ }, 1665 }; 1666 MODULE_DEVICE_TABLE(spi, spinand_ids); 1667 1668 #ifdef CONFIG_OF 1669 static const struct of_device_id spinand_of_ids[] = { 1670 { .compatible = "spi-nand" }, 1671 { /* sentinel */ }, 1672 }; 1673 MODULE_DEVICE_TABLE(of, spinand_of_ids); 1674 #endif 1675 1676 static struct spi_mem_driver spinand_drv = { 1677 .spidrv = { 1678 .id_table = spinand_ids, 1679 .driver = { 1680 .name = "spi-nand", 1681 .of_match_table = of_match_ptr(spinand_of_ids), 1682 }, 1683 }, 1684 .probe = spinand_probe, 1685 .remove = spinand_remove, 1686 }; 1687 module_spi_mem_driver(spinand_drv); 1688 1689 MODULE_DESCRIPTION("SPI NAND framework"); 1690 MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>"); 1691 MODULE_LICENSE("GPL v2"); 1692