1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2016-2017 Micron Technology, Inc. 4 * 5 * Authors: 6 * Peter Pan <peterpandong@micron.com> 7 * Boris Brezillon <boris.brezillon@bootlin.com> 8 */ 9 10 #define pr_fmt(fmt) "spi-nand: " fmt 11 12 #include <linux/device.h> 13 #include <linux/jiffies.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/mtd/spinand.h> 17 #include <linux/of.h> 18 #include <linux/slab.h> 19 #include <linux/string.h> 20 #include <linux/spi/spi.h> 21 #include <linux/spi/spi-mem.h> 22 23 static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val) 24 { 25 struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg, 26 spinand->scratchbuf); 27 int ret; 28 29 ret = spi_mem_exec_op(spinand->spimem, &op); 30 if (ret) 31 return ret; 32 33 *val = *spinand->scratchbuf; 34 return 0; 35 } 36 37 int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val) 38 { 39 struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg, 40 spinand->scratchbuf); 41 42 *spinand->scratchbuf = val; 43 return spi_mem_exec_op(spinand->spimem, &op); 44 } 45 46 static int spinand_read_status(struct spinand_device *spinand, u8 *status) 47 { 48 return spinand_read_reg_op(spinand, REG_STATUS, status); 49 } 50 51 static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg) 52 { 53 struct nand_device *nand = spinand_to_nand(spinand); 54 55 if (WARN_ON(spinand->cur_target < 0 || 56 spinand->cur_target >= nand->memorg.ntargets)) 57 return -EINVAL; 58 59 *cfg = spinand->cfg_cache[spinand->cur_target]; 60 return 0; 61 } 62 63 static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg) 64 { 65 struct nand_device *nand = spinand_to_nand(spinand); 66 int ret; 67 68 if (WARN_ON(spinand->cur_target < 0 || 69 spinand->cur_target >= nand->memorg.ntargets)) 70 return -EINVAL; 71 72 if (spinand->cfg_cache[spinand->cur_target] == cfg) 73 return 0; 74 75 ret = spinand_write_reg_op(spinand, REG_CFG, cfg); 76 if (ret) 77 return ret; 78 79 spinand->cfg_cache[spinand->cur_target] = cfg; 80 return 0; 81 } 82 83 /** 84 * spinand_upd_cfg() - Update the configuration register 85 * @spinand: the spinand device 86 * @mask: the mask encoding the bits to update in the config reg 87 * @val: the new value to apply 88 * 89 * Update the configuration register. 90 * 91 * Return: 0 on success, a negative error code otherwise. 92 */ 93 int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val) 94 { 95 int ret; 96 u8 cfg; 97 98 ret = spinand_get_cfg(spinand, &cfg); 99 if (ret) 100 return ret; 101 102 cfg &= ~mask; 103 cfg |= val; 104 105 return spinand_set_cfg(spinand, cfg); 106 } 107 108 /** 109 * spinand_select_target() - Select a specific NAND target/die 110 * @spinand: the spinand device 111 * @target: the target/die to select 112 * 113 * Select a new target/die. If chip only has one die, this function is a NOOP. 114 * 115 * Return: 0 on success, a negative error code otherwise. 116 */ 117 int spinand_select_target(struct spinand_device *spinand, unsigned int target) 118 { 119 struct nand_device *nand = spinand_to_nand(spinand); 120 int ret; 121 122 if (WARN_ON(target >= nand->memorg.ntargets)) 123 return -EINVAL; 124 125 if (spinand->cur_target == target) 126 return 0; 127 128 if (nand->memorg.ntargets == 1) { 129 spinand->cur_target = target; 130 return 0; 131 } 132 133 ret = spinand->select_target(spinand, target); 134 if (ret) 135 return ret; 136 137 spinand->cur_target = target; 138 return 0; 139 } 140 141 static int spinand_read_cfg(struct spinand_device *spinand) 142 { 143 struct nand_device *nand = spinand_to_nand(spinand); 144 unsigned int target; 145 int ret; 146 147 for (target = 0; target < nand->memorg.ntargets; target++) { 148 ret = spinand_select_target(spinand, target); 149 if (ret) 150 return ret; 151 152 /* 153 * We use spinand_read_reg_op() instead of spinand_get_cfg() 154 * here to bypass the config cache. 155 */ 156 ret = spinand_read_reg_op(spinand, REG_CFG, 157 &spinand->cfg_cache[target]); 158 if (ret) 159 return ret; 160 } 161 162 return 0; 163 } 164 165 static int spinand_init_cfg_cache(struct spinand_device *spinand) 166 { 167 struct nand_device *nand = spinand_to_nand(spinand); 168 struct device *dev = &spinand->spimem->spi->dev; 169 170 spinand->cfg_cache = devm_kcalloc(dev, 171 nand->memorg.ntargets, 172 sizeof(*spinand->cfg_cache), 173 GFP_KERNEL); 174 if (!spinand->cfg_cache) 175 return -ENOMEM; 176 177 return 0; 178 } 179 180 static int spinand_init_quad_enable(struct spinand_device *spinand) 181 { 182 bool enable = false; 183 184 if (!(spinand->flags & SPINAND_HAS_QE_BIT)) 185 return 0; 186 187 if (spinand->op_templates.read_cache->data.buswidth == 4 || 188 spinand->op_templates.write_cache->data.buswidth == 4 || 189 spinand->op_templates.update_cache->data.buswidth == 4) 190 enable = true; 191 192 return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE, 193 enable ? CFG_QUAD_ENABLE : 0); 194 } 195 196 static int spinand_ecc_enable(struct spinand_device *spinand, 197 bool enable) 198 { 199 return spinand_upd_cfg(spinand, CFG_ECC_ENABLE, 200 enable ? CFG_ECC_ENABLE : 0); 201 } 202 203 static int spinand_cont_read_enable(struct spinand_device *spinand, 204 bool enable) 205 { 206 return spinand->set_cont_read(spinand, enable); 207 } 208 209 static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status) 210 { 211 struct nand_device *nand = spinand_to_nand(spinand); 212 213 if (spinand->eccinfo.get_status) 214 return spinand->eccinfo.get_status(spinand, status); 215 216 switch (status & STATUS_ECC_MASK) { 217 case STATUS_ECC_NO_BITFLIPS: 218 return 0; 219 220 case STATUS_ECC_HAS_BITFLIPS: 221 /* 222 * We have no way to know exactly how many bitflips have been 223 * fixed, so let's return the maximum possible value so that 224 * wear-leveling layers move the data immediately. 225 */ 226 return nanddev_get_ecc_conf(nand)->strength; 227 228 case STATUS_ECC_UNCOR_ERROR: 229 return -EBADMSG; 230 231 default: 232 break; 233 } 234 235 return -EINVAL; 236 } 237 238 static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section, 239 struct mtd_oob_region *region) 240 { 241 return -ERANGE; 242 } 243 244 static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section, 245 struct mtd_oob_region *region) 246 { 247 if (section) 248 return -ERANGE; 249 250 /* Reserve 2 bytes for the BBM. */ 251 region->offset = 2; 252 region->length = 62; 253 254 return 0; 255 } 256 257 static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = { 258 .ecc = spinand_noecc_ooblayout_ecc, 259 .free = spinand_noecc_ooblayout_free, 260 }; 261 262 static int spinand_ondie_ecc_init_ctx(struct nand_device *nand) 263 { 264 struct spinand_device *spinand = nand_to_spinand(nand); 265 struct mtd_info *mtd = nanddev_to_mtd(nand); 266 struct spinand_ondie_ecc_conf *engine_conf; 267 268 nand->ecc.ctx.conf.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE; 269 nand->ecc.ctx.conf.step_size = nand->ecc.requirements.step_size; 270 nand->ecc.ctx.conf.strength = nand->ecc.requirements.strength; 271 272 engine_conf = kzalloc(sizeof(*engine_conf), GFP_KERNEL); 273 if (!engine_conf) 274 return -ENOMEM; 275 276 nand->ecc.ctx.priv = engine_conf; 277 278 if (spinand->eccinfo.ooblayout) 279 mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout); 280 else 281 mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout); 282 283 return 0; 284 } 285 286 static void spinand_ondie_ecc_cleanup_ctx(struct nand_device *nand) 287 { 288 kfree(nand->ecc.ctx.priv); 289 } 290 291 static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand, 292 struct nand_page_io_req *req) 293 { 294 struct spinand_device *spinand = nand_to_spinand(nand); 295 bool enable = (req->mode != MTD_OPS_RAW); 296 297 if (!enable && spinand->flags & SPINAND_NO_RAW_ACCESS) 298 return -EOPNOTSUPP; 299 300 memset(spinand->oobbuf, 0xff, nanddev_per_page_oobsize(nand)); 301 302 /* Only enable or disable the engine */ 303 return spinand_ecc_enable(spinand, enable); 304 } 305 306 static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand, 307 struct nand_page_io_req *req) 308 { 309 struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv; 310 struct spinand_device *spinand = nand_to_spinand(nand); 311 struct mtd_info *mtd = spinand_to_mtd(spinand); 312 int ret; 313 314 if (req->mode == MTD_OPS_RAW) 315 return 0; 316 317 /* Nothing to do when finishing a page write */ 318 if (req->type == NAND_PAGE_WRITE) 319 return 0; 320 321 /* Finish a page read: check the status, report errors/bitflips */ 322 ret = spinand_check_ecc_status(spinand, engine_conf->status); 323 if (ret == -EBADMSG) { 324 mtd->ecc_stats.failed++; 325 } else if (ret > 0) { 326 unsigned int pages; 327 328 /* 329 * Continuous reads don't allow us to get the detail, 330 * so we may exagerate the actual number of corrected bitflips. 331 */ 332 if (!req->continuous) 333 pages = 1; 334 else 335 pages = req->datalen / nanddev_page_size(nand); 336 337 mtd->ecc_stats.corrected += ret * pages; 338 } 339 340 return ret; 341 } 342 343 static const struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = { 344 .init_ctx = spinand_ondie_ecc_init_ctx, 345 .cleanup_ctx = spinand_ondie_ecc_cleanup_ctx, 346 .prepare_io_req = spinand_ondie_ecc_prepare_io_req, 347 .finish_io_req = spinand_ondie_ecc_finish_io_req, 348 }; 349 350 static struct nand_ecc_engine spinand_ondie_ecc_engine = { 351 .ops = &spinand_ondie_ecc_engine_ops, 352 }; 353 354 static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status) 355 { 356 struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv; 357 358 if (nand->ecc.ctx.conf.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE && 359 engine_conf) 360 engine_conf->status = status; 361 } 362 363 static int spinand_write_enable_op(struct spinand_device *spinand) 364 { 365 struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true); 366 367 return spi_mem_exec_op(spinand->spimem, &op); 368 } 369 370 static int spinand_load_page_op(struct spinand_device *spinand, 371 const struct nand_page_io_req *req) 372 { 373 struct nand_device *nand = spinand_to_nand(spinand); 374 unsigned int row = nanddev_pos_to_row(nand, &req->pos); 375 struct spi_mem_op op = SPINAND_PAGE_READ_OP(row); 376 377 return spi_mem_exec_op(spinand->spimem, &op); 378 } 379 380 static int spinand_read_from_cache_op(struct spinand_device *spinand, 381 const struct nand_page_io_req *req) 382 { 383 struct nand_device *nand = spinand_to_nand(spinand); 384 struct mtd_info *mtd = spinand_to_mtd(spinand); 385 struct spi_mem_dirmap_desc *rdesc; 386 unsigned int nbytes = 0; 387 void *buf = NULL; 388 u16 column = 0; 389 ssize_t ret; 390 391 if (req->datalen) { 392 buf = spinand->databuf; 393 if (!req->continuous) 394 nbytes = nanddev_page_size(nand); 395 else 396 nbytes = round_up(req->dataoffs + req->datalen, 397 nanddev_page_size(nand)); 398 column = 0; 399 } 400 401 if (req->ooblen) { 402 nbytes += nanddev_per_page_oobsize(nand); 403 if (!buf) { 404 buf = spinand->oobbuf; 405 column = nanddev_page_size(nand); 406 } 407 } 408 409 if (req->mode == MTD_OPS_RAW) 410 rdesc = spinand->dirmaps[req->pos.plane].rdesc; 411 else 412 rdesc = spinand->dirmaps[req->pos.plane].rdesc_ecc; 413 414 if (spinand->flags & SPINAND_HAS_READ_PLANE_SELECT_BIT) 415 column |= req->pos.plane << fls(nanddev_page_size(nand)); 416 417 while (nbytes) { 418 ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf); 419 if (ret < 0) 420 return ret; 421 422 if (!ret || ret > nbytes) 423 return -EIO; 424 425 nbytes -= ret; 426 column += ret; 427 buf += ret; 428 429 /* 430 * Dirmap accesses are allowed to toggle the CS. 431 * Toggling the CS during a continuous read is forbidden. 432 */ 433 if (nbytes && req->continuous) 434 return -EIO; 435 } 436 437 if (req->datalen) 438 memcpy(req->databuf.in, spinand->databuf + req->dataoffs, 439 req->datalen); 440 441 if (req->ooblen) { 442 if (req->mode == MTD_OPS_AUTO_OOB) 443 mtd_ooblayout_get_databytes(mtd, req->oobbuf.in, 444 spinand->oobbuf, 445 req->ooboffs, 446 req->ooblen); 447 else 448 memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs, 449 req->ooblen); 450 } 451 452 return 0; 453 } 454 455 static int spinand_write_to_cache_op(struct spinand_device *spinand, 456 const struct nand_page_io_req *req) 457 { 458 struct nand_device *nand = spinand_to_nand(spinand); 459 struct mtd_info *mtd = spinand_to_mtd(spinand); 460 struct spi_mem_dirmap_desc *wdesc; 461 unsigned int nbytes, column = 0; 462 void *buf = spinand->databuf; 463 ssize_t ret; 464 465 /* 466 * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset 467 * the cache content to 0xFF (depends on vendor implementation), so we 468 * must fill the page cache entirely even if we only want to program 469 * the data portion of the page, otherwise we might corrupt the BBM or 470 * user data previously programmed in OOB area. 471 * 472 * Only reset the data buffer manually, the OOB buffer is prepared by 473 * ECC engines ->prepare_io_req() callback. 474 */ 475 nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand); 476 memset(spinand->databuf, 0xff, nanddev_page_size(nand)); 477 478 if (req->datalen) 479 memcpy(spinand->databuf + req->dataoffs, req->databuf.out, 480 req->datalen); 481 482 if (req->ooblen) { 483 if (req->mode == MTD_OPS_AUTO_OOB) 484 mtd_ooblayout_set_databytes(mtd, req->oobbuf.out, 485 spinand->oobbuf, 486 req->ooboffs, 487 req->ooblen); 488 else 489 memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out, 490 req->ooblen); 491 } 492 493 if (req->mode == MTD_OPS_RAW) 494 wdesc = spinand->dirmaps[req->pos.plane].wdesc; 495 else 496 wdesc = spinand->dirmaps[req->pos.plane].wdesc_ecc; 497 498 if (spinand->flags & SPINAND_HAS_PROG_PLANE_SELECT_BIT) 499 column |= req->pos.plane << fls(nanddev_page_size(nand)); 500 501 while (nbytes) { 502 ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf); 503 if (ret < 0) 504 return ret; 505 506 if (!ret || ret > nbytes) 507 return -EIO; 508 509 nbytes -= ret; 510 column += ret; 511 buf += ret; 512 } 513 514 return 0; 515 } 516 517 static int spinand_program_op(struct spinand_device *spinand, 518 const struct nand_page_io_req *req) 519 { 520 struct nand_device *nand = spinand_to_nand(spinand); 521 unsigned int row = nanddev_pos_to_row(nand, &req->pos); 522 struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row); 523 524 return spi_mem_exec_op(spinand->spimem, &op); 525 } 526 527 static int spinand_erase_op(struct spinand_device *spinand, 528 const struct nand_pos *pos) 529 { 530 struct nand_device *nand = spinand_to_nand(spinand); 531 unsigned int row = nanddev_pos_to_row(nand, pos); 532 struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row); 533 534 return spi_mem_exec_op(spinand->spimem, &op); 535 } 536 537 static int spinand_wait(struct spinand_device *spinand, 538 unsigned long initial_delay_us, 539 unsigned long poll_delay_us, 540 u8 *s) 541 { 542 struct spi_mem_op op = SPINAND_GET_FEATURE_OP(REG_STATUS, 543 spinand->scratchbuf); 544 u8 status; 545 int ret; 546 547 ret = spi_mem_poll_status(spinand->spimem, &op, STATUS_BUSY, 0, 548 initial_delay_us, 549 poll_delay_us, 550 SPINAND_WAITRDY_TIMEOUT_MS); 551 if (ret) 552 return ret; 553 554 status = *spinand->scratchbuf; 555 if (!(status & STATUS_BUSY)) 556 goto out; 557 558 /* 559 * Extra read, just in case the STATUS_READY bit has changed 560 * since our last check 561 */ 562 ret = spinand_read_status(spinand, &status); 563 if (ret) 564 return ret; 565 566 out: 567 if (s) 568 *s = status; 569 570 return status & STATUS_BUSY ? -ETIMEDOUT : 0; 571 } 572 573 static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr, 574 u8 ndummy, u8 *buf) 575 { 576 struct spi_mem_op op = SPINAND_READID_OP( 577 naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN); 578 int ret; 579 580 ret = spi_mem_exec_op(spinand->spimem, &op); 581 if (!ret) 582 memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN); 583 584 return ret; 585 } 586 587 static int spinand_reset_op(struct spinand_device *spinand) 588 { 589 struct spi_mem_op op = SPINAND_RESET_OP; 590 int ret; 591 592 ret = spi_mem_exec_op(spinand->spimem, &op); 593 if (ret) 594 return ret; 595 596 return spinand_wait(spinand, 597 SPINAND_RESET_INITIAL_DELAY_US, 598 SPINAND_RESET_POLL_DELAY_US, 599 NULL); 600 } 601 602 static int spinand_lock_block(struct spinand_device *spinand, u8 lock) 603 { 604 return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock); 605 } 606 607 static int spinand_read_page(struct spinand_device *spinand, 608 const struct nand_page_io_req *req) 609 { 610 struct nand_device *nand = spinand_to_nand(spinand); 611 u8 status; 612 int ret; 613 614 ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req); 615 if (ret) 616 return ret; 617 618 ret = spinand_load_page_op(spinand, req); 619 if (ret) 620 return ret; 621 622 ret = spinand_wait(spinand, 623 SPINAND_READ_INITIAL_DELAY_US, 624 SPINAND_READ_POLL_DELAY_US, 625 &status); 626 if (ret < 0) 627 return ret; 628 629 spinand_ondie_ecc_save_status(nand, status); 630 631 ret = spinand_read_from_cache_op(spinand, req); 632 if (ret) 633 return ret; 634 635 return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req); 636 } 637 638 static int spinand_write_page(struct spinand_device *spinand, 639 const struct nand_page_io_req *req) 640 { 641 struct nand_device *nand = spinand_to_nand(spinand); 642 u8 status; 643 int ret; 644 645 ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req); 646 if (ret) 647 return ret; 648 649 ret = spinand_write_enable_op(spinand); 650 if (ret) 651 return ret; 652 653 ret = spinand_write_to_cache_op(spinand, req); 654 if (ret) 655 return ret; 656 657 ret = spinand_program_op(spinand, req); 658 if (ret) 659 return ret; 660 661 ret = spinand_wait(spinand, 662 SPINAND_WRITE_INITIAL_DELAY_US, 663 SPINAND_WRITE_POLL_DELAY_US, 664 &status); 665 if (!ret && (status & STATUS_PROG_FAILED)) 666 return -EIO; 667 668 return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req); 669 } 670 671 static int spinand_mtd_regular_page_read(struct mtd_info *mtd, loff_t from, 672 struct mtd_oob_ops *ops, 673 unsigned int *max_bitflips) 674 { 675 struct spinand_device *spinand = mtd_to_spinand(mtd); 676 struct nand_device *nand = mtd_to_nanddev(mtd); 677 struct nand_io_iter iter; 678 bool disable_ecc = false; 679 bool ecc_failed = false; 680 int ret; 681 682 if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout) 683 disable_ecc = true; 684 685 nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) { 686 if (disable_ecc) 687 iter.req.mode = MTD_OPS_RAW; 688 689 ret = spinand_select_target(spinand, iter.req.pos.target); 690 if (ret) 691 break; 692 693 ret = spinand_read_page(spinand, &iter.req); 694 if (ret < 0 && ret != -EBADMSG) 695 break; 696 697 if (ret == -EBADMSG) 698 ecc_failed = true; 699 else 700 *max_bitflips = max_t(unsigned int, *max_bitflips, ret); 701 702 ret = 0; 703 ops->retlen += iter.req.datalen; 704 ops->oobretlen += iter.req.ooblen; 705 } 706 707 if (ecc_failed && !ret) 708 ret = -EBADMSG; 709 710 return ret; 711 } 712 713 static int spinand_mtd_continuous_page_read(struct mtd_info *mtd, loff_t from, 714 struct mtd_oob_ops *ops, 715 unsigned int *max_bitflips) 716 { 717 struct spinand_device *spinand = mtd_to_spinand(mtd); 718 struct nand_device *nand = mtd_to_nanddev(mtd); 719 struct nand_io_iter iter; 720 u8 status; 721 int ret; 722 723 ret = spinand_cont_read_enable(spinand, true); 724 if (ret) 725 return ret; 726 727 /* 728 * The cache is divided into two halves. While one half of the cache has 729 * the requested data, the other half is loaded with the next chunk of data. 730 * Therefore, the host can read out the data continuously from page to page. 731 * Each data read must be a multiple of 4-bytes and full pages should be read; 732 * otherwise, the data output might get out of sequence from one read command 733 * to another. 734 */ 735 nanddev_io_for_each_block(nand, NAND_PAGE_READ, from, ops, &iter) { 736 ret = spinand_select_target(spinand, iter.req.pos.target); 737 if (ret) 738 goto end_cont_read; 739 740 ret = nand_ecc_prepare_io_req(nand, &iter.req); 741 if (ret) 742 goto end_cont_read; 743 744 ret = spinand_load_page_op(spinand, &iter.req); 745 if (ret) 746 goto end_cont_read; 747 748 ret = spinand_wait(spinand, SPINAND_READ_INITIAL_DELAY_US, 749 SPINAND_READ_POLL_DELAY_US, NULL); 750 if (ret < 0) 751 goto end_cont_read; 752 753 ret = spinand_read_from_cache_op(spinand, &iter.req); 754 if (ret) 755 goto end_cont_read; 756 757 ops->retlen += iter.req.datalen; 758 759 ret = spinand_read_status(spinand, &status); 760 if (ret) 761 goto end_cont_read; 762 763 spinand_ondie_ecc_save_status(nand, status); 764 765 ret = nand_ecc_finish_io_req(nand, &iter.req); 766 if (ret < 0) 767 goto end_cont_read; 768 769 *max_bitflips = max_t(unsigned int, *max_bitflips, ret); 770 ret = 0; 771 } 772 773 end_cont_read: 774 /* 775 * Once all the data has been read out, the host can either pull CS# 776 * high and wait for tRST or manually clear the bit in the configuration 777 * register to terminate the continuous read operation. We have no 778 * guarantee the SPI controller drivers will effectively deassert the CS 779 * when we expect them to, so take the register based approach. 780 */ 781 spinand_cont_read_enable(spinand, false); 782 783 return ret; 784 } 785 786 static void spinand_cont_read_init(struct spinand_device *spinand) 787 { 788 struct nand_device *nand = spinand_to_nand(spinand); 789 enum nand_ecc_engine_type engine_type = nand->ecc.ctx.conf.engine_type; 790 791 /* OOBs cannot be retrieved so external/on-host ECC engine won't work */ 792 if (spinand->set_cont_read && 793 (engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE || 794 engine_type == NAND_ECC_ENGINE_TYPE_NONE)) { 795 spinand->cont_read_possible = true; 796 } 797 } 798 799 static bool spinand_use_cont_read(struct mtd_info *mtd, loff_t from, 800 struct mtd_oob_ops *ops) 801 { 802 struct nand_device *nand = mtd_to_nanddev(mtd); 803 struct spinand_device *spinand = nand_to_spinand(nand); 804 struct nand_pos start_pos, end_pos; 805 806 if (!spinand->cont_read_possible) 807 return false; 808 809 /* OOBs won't be retrieved */ 810 if (ops->ooblen || ops->oobbuf) 811 return false; 812 813 nanddev_offs_to_pos(nand, from, &start_pos); 814 nanddev_offs_to_pos(nand, from + ops->len - 1, &end_pos); 815 816 /* 817 * Continuous reads never cross LUN boundaries. Some devices don't 818 * support crossing planes boundaries. Some devices don't even support 819 * crossing blocks boundaries. The common case being to read through UBI, 820 * we will very rarely read two consequent blocks or more, so it is safer 821 * and easier (can be improved) to only enable continuous reads when 822 * reading within the same erase block. 823 */ 824 if (start_pos.target != end_pos.target || 825 start_pos.plane != end_pos.plane || 826 start_pos.eraseblock != end_pos.eraseblock) 827 return false; 828 829 return start_pos.page < end_pos.page; 830 } 831 832 static int spinand_mtd_read(struct mtd_info *mtd, loff_t from, 833 struct mtd_oob_ops *ops) 834 { 835 struct spinand_device *spinand = mtd_to_spinand(mtd); 836 struct mtd_ecc_stats old_stats; 837 unsigned int max_bitflips = 0; 838 int ret; 839 840 mutex_lock(&spinand->lock); 841 842 old_stats = mtd->ecc_stats; 843 844 if (spinand_use_cont_read(mtd, from, ops)) 845 ret = spinand_mtd_continuous_page_read(mtd, from, ops, &max_bitflips); 846 else 847 ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips); 848 849 if (ops->stats) { 850 ops->stats->uncorrectable_errors += 851 mtd->ecc_stats.failed - old_stats.failed; 852 ops->stats->corrected_bitflips += 853 mtd->ecc_stats.corrected - old_stats.corrected; 854 } 855 856 mutex_unlock(&spinand->lock); 857 858 return ret ? ret : max_bitflips; 859 } 860 861 static int spinand_mtd_write(struct mtd_info *mtd, loff_t to, 862 struct mtd_oob_ops *ops) 863 { 864 struct spinand_device *spinand = mtd_to_spinand(mtd); 865 struct nand_device *nand = mtd_to_nanddev(mtd); 866 struct nand_io_iter iter; 867 bool disable_ecc = false; 868 int ret = 0; 869 870 if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout) 871 disable_ecc = true; 872 873 mutex_lock(&spinand->lock); 874 875 nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) { 876 if (disable_ecc) 877 iter.req.mode = MTD_OPS_RAW; 878 879 ret = spinand_select_target(spinand, iter.req.pos.target); 880 if (ret) 881 break; 882 883 ret = spinand_write_page(spinand, &iter.req); 884 if (ret) 885 break; 886 887 ops->retlen += iter.req.datalen; 888 ops->oobretlen += iter.req.ooblen; 889 } 890 891 mutex_unlock(&spinand->lock); 892 893 return ret; 894 } 895 896 static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos) 897 { 898 struct spinand_device *spinand = nand_to_spinand(nand); 899 u8 marker[2] = { }; 900 struct nand_page_io_req req = { 901 .pos = *pos, 902 .ooblen = sizeof(marker), 903 .ooboffs = 0, 904 .oobbuf.in = marker, 905 .mode = MTD_OPS_RAW, 906 }; 907 int ret; 908 909 spinand_select_target(spinand, pos->target); 910 911 ret = spinand_read_page(spinand, &req); 912 if (ret == -EOPNOTSUPP) { 913 /* Retry with ECC in case raw access is not supported */ 914 req.mode = MTD_OPS_PLACE_OOB; 915 spinand_read_page(spinand, &req); 916 } 917 918 if (marker[0] != 0xff || marker[1] != 0xff) 919 return true; 920 921 return false; 922 } 923 924 static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs) 925 { 926 struct nand_device *nand = mtd_to_nanddev(mtd); 927 struct spinand_device *spinand = nand_to_spinand(nand); 928 struct nand_pos pos; 929 int ret; 930 931 nanddev_offs_to_pos(nand, offs, &pos); 932 mutex_lock(&spinand->lock); 933 ret = nanddev_isbad(nand, &pos); 934 mutex_unlock(&spinand->lock); 935 936 return ret; 937 } 938 939 static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos) 940 { 941 struct spinand_device *spinand = nand_to_spinand(nand); 942 u8 marker[2] = { }; 943 struct nand_page_io_req req = { 944 .pos = *pos, 945 .ooboffs = 0, 946 .ooblen = sizeof(marker), 947 .oobbuf.out = marker, 948 .mode = MTD_OPS_RAW, 949 }; 950 int ret; 951 952 ret = spinand_select_target(spinand, pos->target); 953 if (ret) 954 return ret; 955 956 ret = spinand_write_page(spinand, &req); 957 if (ret == -EOPNOTSUPP) { 958 /* Retry with ECC in case raw access is not supported */ 959 req.mode = MTD_OPS_PLACE_OOB; 960 ret = spinand_write_page(spinand, &req); 961 } 962 963 return ret; 964 } 965 966 static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs) 967 { 968 struct nand_device *nand = mtd_to_nanddev(mtd); 969 struct spinand_device *spinand = nand_to_spinand(nand); 970 struct nand_pos pos; 971 int ret; 972 973 nanddev_offs_to_pos(nand, offs, &pos); 974 mutex_lock(&spinand->lock); 975 ret = nanddev_markbad(nand, &pos); 976 mutex_unlock(&spinand->lock); 977 978 return ret; 979 } 980 981 static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos) 982 { 983 struct spinand_device *spinand = nand_to_spinand(nand); 984 u8 status; 985 int ret; 986 987 ret = spinand_select_target(spinand, pos->target); 988 if (ret) 989 return ret; 990 991 ret = spinand_write_enable_op(spinand); 992 if (ret) 993 return ret; 994 995 ret = spinand_erase_op(spinand, pos); 996 if (ret) 997 return ret; 998 999 ret = spinand_wait(spinand, 1000 SPINAND_ERASE_INITIAL_DELAY_US, 1001 SPINAND_ERASE_POLL_DELAY_US, 1002 &status); 1003 1004 if (!ret && (status & STATUS_ERASE_FAILED)) 1005 ret = -EIO; 1006 1007 return ret; 1008 } 1009 1010 static int spinand_mtd_erase(struct mtd_info *mtd, 1011 struct erase_info *einfo) 1012 { 1013 struct spinand_device *spinand = mtd_to_spinand(mtd); 1014 int ret; 1015 1016 mutex_lock(&spinand->lock); 1017 ret = nanddev_mtd_erase(mtd, einfo); 1018 mutex_unlock(&spinand->lock); 1019 1020 return ret; 1021 } 1022 1023 static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs) 1024 { 1025 struct spinand_device *spinand = mtd_to_spinand(mtd); 1026 struct nand_device *nand = mtd_to_nanddev(mtd); 1027 struct nand_pos pos; 1028 int ret; 1029 1030 nanddev_offs_to_pos(nand, offs, &pos); 1031 mutex_lock(&spinand->lock); 1032 ret = nanddev_isreserved(nand, &pos); 1033 mutex_unlock(&spinand->lock); 1034 1035 return ret; 1036 } 1037 1038 static int spinand_create_dirmap(struct spinand_device *spinand, 1039 unsigned int plane) 1040 { 1041 struct nand_device *nand = spinand_to_nand(spinand); 1042 struct spi_mem_dirmap_info info = { 1043 .length = nanddev_page_size(nand) + 1044 nanddev_per_page_oobsize(nand), 1045 }; 1046 struct spi_mem_dirmap_desc *desc; 1047 1048 if (spinand->cont_read_possible) 1049 info.length = nanddev_eraseblock_size(nand); 1050 1051 /* The plane number is passed in MSB just above the column address */ 1052 info.offset = plane << fls(nand->memorg.pagesize); 1053 1054 info.op_tmpl = *spinand->op_templates.update_cache; 1055 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, 1056 spinand->spimem, &info); 1057 if (IS_ERR(desc)) 1058 return PTR_ERR(desc); 1059 1060 spinand->dirmaps[plane].wdesc = desc; 1061 1062 info.op_tmpl = *spinand->op_templates.read_cache; 1063 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, 1064 spinand->spimem, &info); 1065 if (IS_ERR(desc)) 1066 return PTR_ERR(desc); 1067 1068 spinand->dirmaps[plane].rdesc = desc; 1069 1070 if (nand->ecc.engine->integration != NAND_ECC_ENGINE_INTEGRATION_PIPELINED) { 1071 spinand->dirmaps[plane].wdesc_ecc = spinand->dirmaps[plane].wdesc; 1072 spinand->dirmaps[plane].rdesc_ecc = spinand->dirmaps[plane].rdesc; 1073 1074 return 0; 1075 } 1076 1077 info.op_tmpl = *spinand->op_templates.update_cache; 1078 info.op_tmpl.data.ecc = true; 1079 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, 1080 spinand->spimem, &info); 1081 if (IS_ERR(desc)) 1082 return PTR_ERR(desc); 1083 1084 spinand->dirmaps[plane].wdesc_ecc = desc; 1085 1086 info.op_tmpl = *spinand->op_templates.read_cache; 1087 info.op_tmpl.data.ecc = true; 1088 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, 1089 spinand->spimem, &info); 1090 if (IS_ERR(desc)) 1091 return PTR_ERR(desc); 1092 1093 spinand->dirmaps[plane].rdesc_ecc = desc; 1094 1095 return 0; 1096 } 1097 1098 static int spinand_create_dirmaps(struct spinand_device *spinand) 1099 { 1100 struct nand_device *nand = spinand_to_nand(spinand); 1101 int i, ret; 1102 1103 spinand->dirmaps = devm_kzalloc(&spinand->spimem->spi->dev, 1104 sizeof(*spinand->dirmaps) * 1105 nand->memorg.planes_per_lun, 1106 GFP_KERNEL); 1107 if (!spinand->dirmaps) 1108 return -ENOMEM; 1109 1110 for (i = 0; i < nand->memorg.planes_per_lun; i++) { 1111 ret = spinand_create_dirmap(spinand, i); 1112 if (ret) 1113 return ret; 1114 } 1115 1116 return 0; 1117 } 1118 1119 static const struct nand_ops spinand_ops = { 1120 .erase = spinand_erase, 1121 .markbad = spinand_markbad, 1122 .isbad = spinand_isbad, 1123 }; 1124 1125 static const struct spinand_manufacturer *spinand_manufacturers[] = { 1126 &alliancememory_spinand_manufacturer, 1127 &ato_spinand_manufacturer, 1128 &esmt_c8_spinand_manufacturer, 1129 &foresee_spinand_manufacturer, 1130 &gigadevice_spinand_manufacturer, 1131 ¯onix_spinand_manufacturer, 1132 µn_spinand_manufacturer, 1133 ¶gon_spinand_manufacturer, 1134 &skyhigh_spinand_manufacturer, 1135 &toshiba_spinand_manufacturer, 1136 &winbond_spinand_manufacturer, 1137 &xtx_spinand_manufacturer, 1138 }; 1139 1140 static int spinand_manufacturer_match(struct spinand_device *spinand, 1141 enum spinand_readid_method rdid_method) 1142 { 1143 u8 *id = spinand->id.data; 1144 unsigned int i; 1145 int ret; 1146 1147 for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) { 1148 const struct spinand_manufacturer *manufacturer = 1149 spinand_manufacturers[i]; 1150 1151 if (id[0] != manufacturer->id) 1152 continue; 1153 1154 ret = spinand_match_and_init(spinand, 1155 manufacturer->chips, 1156 manufacturer->nchips, 1157 rdid_method); 1158 if (ret < 0) 1159 continue; 1160 1161 spinand->manufacturer = manufacturer; 1162 return 0; 1163 } 1164 return -EOPNOTSUPP; 1165 } 1166 1167 static int spinand_id_detect(struct spinand_device *spinand) 1168 { 1169 u8 *id = spinand->id.data; 1170 int ret; 1171 1172 ret = spinand_read_id_op(spinand, 0, 0, id); 1173 if (ret) 1174 return ret; 1175 ret = spinand_manufacturer_match(spinand, SPINAND_READID_METHOD_OPCODE); 1176 if (!ret) 1177 return 0; 1178 1179 ret = spinand_read_id_op(spinand, 1, 0, id); 1180 if (ret) 1181 return ret; 1182 ret = spinand_manufacturer_match(spinand, 1183 SPINAND_READID_METHOD_OPCODE_ADDR); 1184 if (!ret) 1185 return 0; 1186 1187 ret = spinand_read_id_op(spinand, 0, 1, id); 1188 if (ret) 1189 return ret; 1190 ret = spinand_manufacturer_match(spinand, 1191 SPINAND_READID_METHOD_OPCODE_DUMMY); 1192 1193 return ret; 1194 } 1195 1196 static int spinand_manufacturer_init(struct spinand_device *spinand) 1197 { 1198 if (spinand->manufacturer->ops->init) 1199 return spinand->manufacturer->ops->init(spinand); 1200 1201 return 0; 1202 } 1203 1204 static void spinand_manufacturer_cleanup(struct spinand_device *spinand) 1205 { 1206 /* Release manufacturer private data */ 1207 if (spinand->manufacturer->ops->cleanup) 1208 return spinand->manufacturer->ops->cleanup(spinand); 1209 } 1210 1211 static const struct spi_mem_op * 1212 spinand_select_op_variant(struct spinand_device *spinand, 1213 const struct spinand_op_variants *variants) 1214 { 1215 struct nand_device *nand = spinand_to_nand(spinand); 1216 const struct spi_mem_op *best_variant = NULL; 1217 u64 best_op_duration_ns = ULLONG_MAX; 1218 unsigned int i; 1219 1220 for (i = 0; i < variants->nops; i++) { 1221 struct spi_mem_op op = variants->ops[i]; 1222 u64 op_duration_ns = 0; 1223 unsigned int nbytes; 1224 int ret; 1225 1226 nbytes = nanddev_per_page_oobsize(nand) + 1227 nanddev_page_size(nand); 1228 1229 while (nbytes) { 1230 op.data.nbytes = nbytes; 1231 ret = spi_mem_adjust_op_size(spinand->spimem, &op); 1232 if (ret) 1233 break; 1234 1235 spi_mem_adjust_op_freq(spinand->spimem, &op); 1236 1237 if (!spi_mem_supports_op(spinand->spimem, &op)) 1238 break; 1239 1240 nbytes -= op.data.nbytes; 1241 1242 op_duration_ns += spi_mem_calc_op_duration(&op); 1243 } 1244 1245 if (!nbytes && op_duration_ns < best_op_duration_ns) { 1246 best_op_duration_ns = op_duration_ns; 1247 best_variant = &variants->ops[i]; 1248 } 1249 } 1250 1251 return best_variant; 1252 } 1253 1254 /** 1255 * spinand_match_and_init() - Try to find a match between a device ID and an 1256 * entry in a spinand_info table 1257 * @spinand: SPI NAND object 1258 * @table: SPI NAND device description table 1259 * @table_size: size of the device description table 1260 * @rdid_method: read id method to match 1261 * 1262 * Match between a device ID retrieved through the READ_ID command and an 1263 * entry in the SPI NAND description table. If a match is found, the spinand 1264 * object will be initialized with information provided by the matching 1265 * spinand_info entry. 1266 * 1267 * Return: 0 on success, a negative error code otherwise. 1268 */ 1269 int spinand_match_and_init(struct spinand_device *spinand, 1270 const struct spinand_info *table, 1271 unsigned int table_size, 1272 enum spinand_readid_method rdid_method) 1273 { 1274 u8 *id = spinand->id.data; 1275 struct nand_device *nand = spinand_to_nand(spinand); 1276 unsigned int i; 1277 1278 for (i = 0; i < table_size; i++) { 1279 const struct spinand_info *info = &table[i]; 1280 const struct spi_mem_op *op; 1281 1282 if (rdid_method != info->devid.method) 1283 continue; 1284 1285 if (memcmp(id + 1, info->devid.id, info->devid.len)) 1286 continue; 1287 1288 nand->memorg = table[i].memorg; 1289 nanddev_set_ecc_requirements(nand, &table[i].eccreq); 1290 spinand->eccinfo = table[i].eccinfo; 1291 spinand->flags = table[i].flags; 1292 spinand->id.len = 1 + table[i].devid.len; 1293 spinand->select_target = table[i].select_target; 1294 spinand->set_cont_read = table[i].set_cont_read; 1295 1296 op = spinand_select_op_variant(spinand, 1297 info->op_variants.read_cache); 1298 if (!op) 1299 return -ENOTSUPP; 1300 1301 spinand->op_templates.read_cache = op; 1302 1303 op = spinand_select_op_variant(spinand, 1304 info->op_variants.write_cache); 1305 if (!op) 1306 return -ENOTSUPP; 1307 1308 spinand->op_templates.write_cache = op; 1309 1310 op = spinand_select_op_variant(spinand, 1311 info->op_variants.update_cache); 1312 spinand->op_templates.update_cache = op; 1313 1314 return 0; 1315 } 1316 1317 return -ENOTSUPP; 1318 } 1319 1320 static int spinand_detect(struct spinand_device *spinand) 1321 { 1322 struct device *dev = &spinand->spimem->spi->dev; 1323 struct nand_device *nand = spinand_to_nand(spinand); 1324 int ret; 1325 1326 ret = spinand_reset_op(spinand); 1327 if (ret) 1328 return ret; 1329 1330 ret = spinand_id_detect(spinand); 1331 if (ret) { 1332 dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN, 1333 spinand->id.data); 1334 return ret; 1335 } 1336 1337 if (nand->memorg.ntargets > 1 && !spinand->select_target) { 1338 dev_err(dev, 1339 "SPI NANDs with more than one die must implement ->select_target()\n"); 1340 return -EINVAL; 1341 } 1342 1343 dev_info(&spinand->spimem->spi->dev, 1344 "%s SPI NAND was found.\n", spinand->manufacturer->name); 1345 dev_info(&spinand->spimem->spi->dev, 1346 "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n", 1347 nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10, 1348 nanddev_page_size(nand), nanddev_per_page_oobsize(nand)); 1349 1350 return 0; 1351 } 1352 1353 static int spinand_init_flash(struct spinand_device *spinand) 1354 { 1355 struct device *dev = &spinand->spimem->spi->dev; 1356 struct nand_device *nand = spinand_to_nand(spinand); 1357 int ret, i; 1358 1359 ret = spinand_read_cfg(spinand); 1360 if (ret) 1361 return ret; 1362 1363 ret = spinand_init_quad_enable(spinand); 1364 if (ret) 1365 return ret; 1366 1367 ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0); 1368 if (ret) 1369 return ret; 1370 1371 ret = spinand_manufacturer_init(spinand); 1372 if (ret) { 1373 dev_err(dev, 1374 "Failed to initialize the SPI NAND chip (err = %d)\n", 1375 ret); 1376 return ret; 1377 } 1378 1379 /* After power up, all blocks are locked, so unlock them here. */ 1380 for (i = 0; i < nand->memorg.ntargets; i++) { 1381 ret = spinand_select_target(spinand, i); 1382 if (ret) 1383 break; 1384 1385 ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED); 1386 if (ret) 1387 break; 1388 } 1389 1390 if (ret) 1391 spinand_manufacturer_cleanup(spinand); 1392 1393 return ret; 1394 } 1395 1396 static void spinand_mtd_resume(struct mtd_info *mtd) 1397 { 1398 struct spinand_device *spinand = mtd_to_spinand(mtd); 1399 int ret; 1400 1401 ret = spinand_reset_op(spinand); 1402 if (ret) 1403 return; 1404 1405 ret = spinand_init_flash(spinand); 1406 if (ret) 1407 return; 1408 1409 spinand_ecc_enable(spinand, false); 1410 } 1411 1412 static int spinand_init(struct spinand_device *spinand) 1413 { 1414 struct device *dev = &spinand->spimem->spi->dev; 1415 struct mtd_info *mtd = spinand_to_mtd(spinand); 1416 struct nand_device *nand = mtd_to_nanddev(mtd); 1417 int ret; 1418 1419 /* 1420 * We need a scratch buffer because the spi_mem interface requires that 1421 * buf passed in spi_mem_op->data.buf be DMA-able. 1422 */ 1423 spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL); 1424 if (!spinand->scratchbuf) 1425 return -ENOMEM; 1426 1427 ret = spinand_detect(spinand); 1428 if (ret) 1429 goto err_free_bufs; 1430 1431 /* 1432 * Use kzalloc() instead of devm_kzalloc() here, because some drivers 1433 * may use this buffer for DMA access. 1434 * Memory allocated by devm_ does not guarantee DMA-safe alignment. 1435 */ 1436 spinand->databuf = kzalloc(nanddev_eraseblock_size(nand), 1437 GFP_KERNEL); 1438 if (!spinand->databuf) { 1439 ret = -ENOMEM; 1440 goto err_free_bufs; 1441 } 1442 1443 spinand->oobbuf = spinand->databuf + nanddev_page_size(nand); 1444 1445 ret = spinand_init_cfg_cache(spinand); 1446 if (ret) 1447 goto err_free_bufs; 1448 1449 ret = spinand_init_flash(spinand); 1450 if (ret) 1451 goto err_free_bufs; 1452 1453 ret = nanddev_init(nand, &spinand_ops, THIS_MODULE); 1454 if (ret) 1455 goto err_manuf_cleanup; 1456 1457 /* SPI-NAND default ECC engine is on-die */ 1458 nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE; 1459 nand->ecc.ondie_engine = &spinand_ondie_ecc_engine; 1460 1461 spinand_ecc_enable(spinand, false); 1462 ret = nanddev_ecc_engine_init(nand); 1463 if (ret) 1464 goto err_cleanup_nanddev; 1465 1466 /* 1467 * Continuous read can only be enabled with an on-die ECC engine, so the 1468 * ECC initialization must have happened previously. 1469 */ 1470 spinand_cont_read_init(spinand); 1471 1472 mtd->_read_oob = spinand_mtd_read; 1473 mtd->_write_oob = spinand_mtd_write; 1474 mtd->_block_isbad = spinand_mtd_block_isbad; 1475 mtd->_block_markbad = spinand_mtd_block_markbad; 1476 mtd->_block_isreserved = spinand_mtd_block_isreserved; 1477 mtd->_erase = spinand_mtd_erase; 1478 mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks; 1479 mtd->_resume = spinand_mtd_resume; 1480 1481 if (nand->ecc.engine) { 1482 ret = mtd_ooblayout_count_freebytes(mtd); 1483 if (ret < 0) 1484 goto err_cleanup_ecc_engine; 1485 } 1486 1487 mtd->oobavail = ret; 1488 1489 /* Propagate ECC information to mtd_info */ 1490 mtd->ecc_strength = nanddev_get_ecc_conf(nand)->strength; 1491 mtd->ecc_step_size = nanddev_get_ecc_conf(nand)->step_size; 1492 mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4); 1493 1494 ret = spinand_create_dirmaps(spinand); 1495 if (ret) { 1496 dev_err(dev, 1497 "Failed to create direct mappings for read/write operations (err = %d)\n", 1498 ret); 1499 goto err_cleanup_ecc_engine; 1500 } 1501 1502 return 0; 1503 1504 err_cleanup_ecc_engine: 1505 nanddev_ecc_engine_cleanup(nand); 1506 1507 err_cleanup_nanddev: 1508 nanddev_cleanup(nand); 1509 1510 err_manuf_cleanup: 1511 spinand_manufacturer_cleanup(spinand); 1512 1513 err_free_bufs: 1514 kfree(spinand->databuf); 1515 kfree(spinand->scratchbuf); 1516 return ret; 1517 } 1518 1519 static void spinand_cleanup(struct spinand_device *spinand) 1520 { 1521 struct nand_device *nand = spinand_to_nand(spinand); 1522 1523 nanddev_cleanup(nand); 1524 spinand_manufacturer_cleanup(spinand); 1525 kfree(spinand->databuf); 1526 kfree(spinand->scratchbuf); 1527 } 1528 1529 static int spinand_probe(struct spi_mem *mem) 1530 { 1531 struct spinand_device *spinand; 1532 struct mtd_info *mtd; 1533 int ret; 1534 1535 spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand), 1536 GFP_KERNEL); 1537 if (!spinand) 1538 return -ENOMEM; 1539 1540 spinand->spimem = mem; 1541 spi_mem_set_drvdata(mem, spinand); 1542 spinand_set_of_node(spinand, mem->spi->dev.of_node); 1543 mutex_init(&spinand->lock); 1544 mtd = spinand_to_mtd(spinand); 1545 mtd->dev.parent = &mem->spi->dev; 1546 1547 ret = spinand_init(spinand); 1548 if (ret) 1549 return ret; 1550 1551 ret = mtd_device_register(mtd, NULL, 0); 1552 if (ret) 1553 goto err_spinand_cleanup; 1554 1555 return 0; 1556 1557 err_spinand_cleanup: 1558 spinand_cleanup(spinand); 1559 1560 return ret; 1561 } 1562 1563 static int spinand_remove(struct spi_mem *mem) 1564 { 1565 struct spinand_device *spinand; 1566 struct mtd_info *mtd; 1567 int ret; 1568 1569 spinand = spi_mem_get_drvdata(mem); 1570 mtd = spinand_to_mtd(spinand); 1571 1572 ret = mtd_device_unregister(mtd); 1573 if (ret) 1574 return ret; 1575 1576 spinand_cleanup(spinand); 1577 1578 return 0; 1579 } 1580 1581 static const struct spi_device_id spinand_ids[] = { 1582 { .name = "spi-nand" }, 1583 { /* sentinel */ }, 1584 }; 1585 MODULE_DEVICE_TABLE(spi, spinand_ids); 1586 1587 #ifdef CONFIG_OF 1588 static const struct of_device_id spinand_of_ids[] = { 1589 { .compatible = "spi-nand" }, 1590 { /* sentinel */ }, 1591 }; 1592 MODULE_DEVICE_TABLE(of, spinand_of_ids); 1593 #endif 1594 1595 static struct spi_mem_driver spinand_drv = { 1596 .spidrv = { 1597 .id_table = spinand_ids, 1598 .driver = { 1599 .name = "spi-nand", 1600 .of_match_table = of_match_ptr(spinand_of_ids), 1601 }, 1602 }, 1603 .probe = spinand_probe, 1604 .remove = spinand_remove, 1605 }; 1606 module_spi_mem_driver(spinand_drv); 1607 1608 MODULE_DESCRIPTION("SPI NAND framework"); 1609 MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>"); 1610 MODULE_LICENSE("GPL v2"); 1611