1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2016-2017 Micron Technology, Inc. 4 * 5 * Authors: 6 * Peter Pan <peterpandong@micron.com> 7 * Boris Brezillon <boris.brezillon@bootlin.com> 8 */ 9 10 #define pr_fmt(fmt) "spi-nand: " fmt 11 12 #include <linux/device.h> 13 #include <linux/jiffies.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/mtd/spinand.h> 17 #include <linux/of.h> 18 #include <linux/slab.h> 19 #include <linux/string.h> 20 #include <linux/spi/spi.h> 21 #include <linux/spi/spi-mem.h> 22 23 static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val) 24 { 25 struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg, 26 spinand->scratchbuf); 27 int ret; 28 29 ret = spi_mem_exec_op(spinand->spimem, &op); 30 if (ret) 31 return ret; 32 33 *val = *spinand->scratchbuf; 34 return 0; 35 } 36 37 static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val) 38 { 39 struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg, 40 spinand->scratchbuf); 41 42 *spinand->scratchbuf = val; 43 return spi_mem_exec_op(spinand->spimem, &op); 44 } 45 46 static int spinand_read_status(struct spinand_device *spinand, u8 *status) 47 { 48 return spinand_read_reg_op(spinand, REG_STATUS, status); 49 } 50 51 static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg) 52 { 53 struct nand_device *nand = spinand_to_nand(spinand); 54 55 if (WARN_ON(spinand->cur_target < 0 || 56 spinand->cur_target >= nand->memorg.ntargets)) 57 return -EINVAL; 58 59 *cfg = spinand->cfg_cache[spinand->cur_target]; 60 return 0; 61 } 62 63 static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg) 64 { 65 struct nand_device *nand = spinand_to_nand(spinand); 66 int ret; 67 68 if (WARN_ON(spinand->cur_target < 0 || 69 spinand->cur_target >= nand->memorg.ntargets)) 70 return -EINVAL; 71 72 if (spinand->cfg_cache[spinand->cur_target] == cfg) 73 return 0; 74 75 ret = spinand_write_reg_op(spinand, REG_CFG, cfg); 76 if (ret) 77 return ret; 78 79 spinand->cfg_cache[spinand->cur_target] = cfg; 80 return 0; 81 } 82 83 /** 84 * spinand_upd_cfg() - Update the configuration register 85 * @spinand: the spinand device 86 * @mask: the mask encoding the bits to update in the config reg 87 * @val: the new value to apply 88 * 89 * Update the configuration register. 90 * 91 * Return: 0 on success, a negative error code otherwise. 92 */ 93 int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val) 94 { 95 int ret; 96 u8 cfg; 97 98 ret = spinand_get_cfg(spinand, &cfg); 99 if (ret) 100 return ret; 101 102 cfg &= ~mask; 103 cfg |= val; 104 105 return spinand_set_cfg(spinand, cfg); 106 } 107 108 /** 109 * spinand_select_target() - Select a specific NAND target/die 110 * @spinand: the spinand device 111 * @target: the target/die to select 112 * 113 * Select a new target/die. If chip only has one die, this function is a NOOP. 114 * 115 * Return: 0 on success, a negative error code otherwise. 116 */ 117 int spinand_select_target(struct spinand_device *spinand, unsigned int target) 118 { 119 struct nand_device *nand = spinand_to_nand(spinand); 120 int ret; 121 122 if (WARN_ON(target >= nand->memorg.ntargets)) 123 return -EINVAL; 124 125 if (spinand->cur_target == target) 126 return 0; 127 128 if (nand->memorg.ntargets == 1) { 129 spinand->cur_target = target; 130 return 0; 131 } 132 133 ret = spinand->select_target(spinand, target); 134 if (ret) 135 return ret; 136 137 spinand->cur_target = target; 138 return 0; 139 } 140 141 static int spinand_read_cfg(struct spinand_device *spinand) 142 { 143 struct nand_device *nand = spinand_to_nand(spinand); 144 unsigned int target; 145 int ret; 146 147 for (target = 0; target < nand->memorg.ntargets; target++) { 148 ret = spinand_select_target(spinand, target); 149 if (ret) 150 return ret; 151 152 /* 153 * We use spinand_read_reg_op() instead of spinand_get_cfg() 154 * here to bypass the config cache. 155 */ 156 ret = spinand_read_reg_op(spinand, REG_CFG, 157 &spinand->cfg_cache[target]); 158 if (ret) 159 return ret; 160 } 161 162 return 0; 163 } 164 165 static int spinand_init_cfg_cache(struct spinand_device *spinand) 166 { 167 struct nand_device *nand = spinand_to_nand(spinand); 168 struct device *dev = &spinand->spimem->spi->dev; 169 170 spinand->cfg_cache = devm_kcalloc(dev, 171 nand->memorg.ntargets, 172 sizeof(*spinand->cfg_cache), 173 GFP_KERNEL); 174 if (!spinand->cfg_cache) 175 return -ENOMEM; 176 177 return 0; 178 } 179 180 static int spinand_init_quad_enable(struct spinand_device *spinand) 181 { 182 bool enable = false; 183 184 if (!(spinand->flags & SPINAND_HAS_QE_BIT)) 185 return 0; 186 187 if (spinand->op_templates.read_cache->data.buswidth == 4 || 188 spinand->op_templates.write_cache->data.buswidth == 4 || 189 spinand->op_templates.update_cache->data.buswidth == 4) 190 enable = true; 191 192 return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE, 193 enable ? CFG_QUAD_ENABLE : 0); 194 } 195 196 static int spinand_ecc_enable(struct spinand_device *spinand, 197 bool enable) 198 { 199 return spinand_upd_cfg(spinand, CFG_ECC_ENABLE, 200 enable ? CFG_ECC_ENABLE : 0); 201 } 202 203 static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status) 204 { 205 struct nand_device *nand = spinand_to_nand(spinand); 206 207 if (spinand->eccinfo.get_status) 208 return spinand->eccinfo.get_status(spinand, status); 209 210 switch (status & STATUS_ECC_MASK) { 211 case STATUS_ECC_NO_BITFLIPS: 212 return 0; 213 214 case STATUS_ECC_HAS_BITFLIPS: 215 /* 216 * We have no way to know exactly how many bitflips have been 217 * fixed, so let's return the maximum possible value so that 218 * wear-leveling layers move the data immediately. 219 */ 220 return nanddev_get_ecc_conf(nand)->strength; 221 222 case STATUS_ECC_UNCOR_ERROR: 223 return -EBADMSG; 224 225 default: 226 break; 227 } 228 229 return -EINVAL; 230 } 231 232 static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section, 233 struct mtd_oob_region *region) 234 { 235 return -ERANGE; 236 } 237 238 static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section, 239 struct mtd_oob_region *region) 240 { 241 if (section) 242 return -ERANGE; 243 244 /* Reserve 2 bytes for the BBM. */ 245 region->offset = 2; 246 region->length = 62; 247 248 return 0; 249 } 250 251 static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = { 252 .ecc = spinand_noecc_ooblayout_ecc, 253 .free = spinand_noecc_ooblayout_free, 254 }; 255 256 static int spinand_ondie_ecc_init_ctx(struct nand_device *nand) 257 { 258 struct spinand_device *spinand = nand_to_spinand(nand); 259 struct mtd_info *mtd = nanddev_to_mtd(nand); 260 struct spinand_ondie_ecc_conf *engine_conf; 261 262 nand->ecc.ctx.conf.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE; 263 nand->ecc.ctx.conf.step_size = nand->ecc.requirements.step_size; 264 nand->ecc.ctx.conf.strength = nand->ecc.requirements.strength; 265 266 engine_conf = kzalloc(sizeof(*engine_conf), GFP_KERNEL); 267 if (!engine_conf) 268 return -ENOMEM; 269 270 nand->ecc.ctx.priv = engine_conf; 271 272 if (spinand->eccinfo.ooblayout) 273 mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout); 274 else 275 mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout); 276 277 return 0; 278 } 279 280 static void spinand_ondie_ecc_cleanup_ctx(struct nand_device *nand) 281 { 282 kfree(nand->ecc.ctx.priv); 283 } 284 285 static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand, 286 struct nand_page_io_req *req) 287 { 288 struct spinand_device *spinand = nand_to_spinand(nand); 289 bool enable = (req->mode != MTD_OPS_RAW); 290 291 memset(spinand->oobbuf, 0xff, nanddev_per_page_oobsize(nand)); 292 293 /* Only enable or disable the engine */ 294 return spinand_ecc_enable(spinand, enable); 295 } 296 297 static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand, 298 struct nand_page_io_req *req) 299 { 300 struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv; 301 struct spinand_device *spinand = nand_to_spinand(nand); 302 struct mtd_info *mtd = spinand_to_mtd(spinand); 303 int ret; 304 305 if (req->mode == MTD_OPS_RAW) 306 return 0; 307 308 /* Nothing to do when finishing a page write */ 309 if (req->type == NAND_PAGE_WRITE) 310 return 0; 311 312 /* Finish a page read: check the status, report errors/bitflips */ 313 ret = spinand_check_ecc_status(spinand, engine_conf->status); 314 if (ret == -EBADMSG) 315 mtd->ecc_stats.failed++; 316 else if (ret > 0) 317 mtd->ecc_stats.corrected += ret; 318 319 return ret; 320 } 321 322 static struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = { 323 .init_ctx = spinand_ondie_ecc_init_ctx, 324 .cleanup_ctx = spinand_ondie_ecc_cleanup_ctx, 325 .prepare_io_req = spinand_ondie_ecc_prepare_io_req, 326 .finish_io_req = spinand_ondie_ecc_finish_io_req, 327 }; 328 329 static struct nand_ecc_engine spinand_ondie_ecc_engine = { 330 .ops = &spinand_ondie_ecc_engine_ops, 331 }; 332 333 static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status) 334 { 335 struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv; 336 337 if (nand->ecc.ctx.conf.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE && 338 engine_conf) 339 engine_conf->status = status; 340 } 341 342 static int spinand_write_enable_op(struct spinand_device *spinand) 343 { 344 struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true); 345 346 return spi_mem_exec_op(spinand->spimem, &op); 347 } 348 349 static int spinand_load_page_op(struct spinand_device *spinand, 350 const struct nand_page_io_req *req) 351 { 352 struct nand_device *nand = spinand_to_nand(spinand); 353 unsigned int row = nanddev_pos_to_row(nand, &req->pos); 354 struct spi_mem_op op = SPINAND_PAGE_READ_OP(row); 355 356 return spi_mem_exec_op(spinand->spimem, &op); 357 } 358 359 static int spinand_read_from_cache_op(struct spinand_device *spinand, 360 const struct nand_page_io_req *req) 361 { 362 struct nand_device *nand = spinand_to_nand(spinand); 363 struct mtd_info *mtd = spinand_to_mtd(spinand); 364 struct spi_mem_dirmap_desc *rdesc; 365 unsigned int nbytes = 0; 366 void *buf = NULL; 367 u16 column = 0; 368 ssize_t ret; 369 370 if (req->datalen) { 371 buf = spinand->databuf; 372 nbytes = nanddev_page_size(nand); 373 column = 0; 374 } 375 376 if (req->ooblen) { 377 nbytes += nanddev_per_page_oobsize(nand); 378 if (!buf) { 379 buf = spinand->oobbuf; 380 column = nanddev_page_size(nand); 381 } 382 } 383 384 if (req->mode == MTD_OPS_RAW) 385 rdesc = spinand->dirmaps[req->pos.plane].rdesc; 386 else 387 rdesc = spinand->dirmaps[req->pos.plane].rdesc_ecc; 388 389 while (nbytes) { 390 ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf); 391 if (ret < 0) 392 return ret; 393 394 if (!ret || ret > nbytes) 395 return -EIO; 396 397 nbytes -= ret; 398 column += ret; 399 buf += ret; 400 } 401 402 if (req->datalen) 403 memcpy(req->databuf.in, spinand->databuf + req->dataoffs, 404 req->datalen); 405 406 if (req->ooblen) { 407 if (req->mode == MTD_OPS_AUTO_OOB) 408 mtd_ooblayout_get_databytes(mtd, req->oobbuf.in, 409 spinand->oobbuf, 410 req->ooboffs, 411 req->ooblen); 412 else 413 memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs, 414 req->ooblen); 415 } 416 417 return 0; 418 } 419 420 static int spinand_write_to_cache_op(struct spinand_device *spinand, 421 const struct nand_page_io_req *req) 422 { 423 struct nand_device *nand = spinand_to_nand(spinand); 424 struct mtd_info *mtd = spinand_to_mtd(spinand); 425 struct spi_mem_dirmap_desc *wdesc; 426 unsigned int nbytes, column = 0; 427 void *buf = spinand->databuf; 428 ssize_t ret; 429 430 /* 431 * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset 432 * the cache content to 0xFF (depends on vendor implementation), so we 433 * must fill the page cache entirely even if we only want to program 434 * the data portion of the page, otherwise we might corrupt the BBM or 435 * user data previously programmed in OOB area. 436 * 437 * Only reset the data buffer manually, the OOB buffer is prepared by 438 * ECC engines ->prepare_io_req() callback. 439 */ 440 nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand); 441 memset(spinand->databuf, 0xff, nanddev_page_size(nand)); 442 443 if (req->datalen) 444 memcpy(spinand->databuf + req->dataoffs, req->databuf.out, 445 req->datalen); 446 447 if (req->ooblen) { 448 if (req->mode == MTD_OPS_AUTO_OOB) 449 mtd_ooblayout_set_databytes(mtd, req->oobbuf.out, 450 spinand->oobbuf, 451 req->ooboffs, 452 req->ooblen); 453 else 454 memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out, 455 req->ooblen); 456 } 457 458 if (req->mode == MTD_OPS_RAW) 459 wdesc = spinand->dirmaps[req->pos.plane].wdesc; 460 else 461 wdesc = spinand->dirmaps[req->pos.plane].wdesc_ecc; 462 463 while (nbytes) { 464 ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf); 465 if (ret < 0) 466 return ret; 467 468 if (!ret || ret > nbytes) 469 return -EIO; 470 471 nbytes -= ret; 472 column += ret; 473 buf += ret; 474 } 475 476 return 0; 477 } 478 479 static int spinand_program_op(struct spinand_device *spinand, 480 const struct nand_page_io_req *req) 481 { 482 struct nand_device *nand = spinand_to_nand(spinand); 483 unsigned int row = nanddev_pos_to_row(nand, &req->pos); 484 struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row); 485 486 return spi_mem_exec_op(spinand->spimem, &op); 487 } 488 489 static int spinand_erase_op(struct spinand_device *spinand, 490 const struct nand_pos *pos) 491 { 492 struct nand_device *nand = spinand_to_nand(spinand); 493 unsigned int row = nanddev_pos_to_row(nand, pos); 494 struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row); 495 496 return spi_mem_exec_op(spinand->spimem, &op); 497 } 498 499 static int spinand_wait(struct spinand_device *spinand, 500 unsigned long initial_delay_us, 501 unsigned long poll_delay_us, 502 u8 *s) 503 { 504 struct spi_mem_op op = SPINAND_GET_FEATURE_OP(REG_STATUS, 505 spinand->scratchbuf); 506 u8 status; 507 int ret; 508 509 ret = spi_mem_poll_status(spinand->spimem, &op, STATUS_BUSY, 0, 510 initial_delay_us, 511 poll_delay_us, 512 SPINAND_WAITRDY_TIMEOUT_MS); 513 if (ret) 514 return ret; 515 516 status = *spinand->scratchbuf; 517 if (!(status & STATUS_BUSY)) 518 goto out; 519 520 /* 521 * Extra read, just in case the STATUS_READY bit has changed 522 * since our last check 523 */ 524 ret = spinand_read_status(spinand, &status); 525 if (ret) 526 return ret; 527 528 out: 529 if (s) 530 *s = status; 531 532 return status & STATUS_BUSY ? -ETIMEDOUT : 0; 533 } 534 535 static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr, 536 u8 ndummy, u8 *buf) 537 { 538 struct spi_mem_op op = SPINAND_READID_OP( 539 naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN); 540 int ret; 541 542 ret = spi_mem_exec_op(spinand->spimem, &op); 543 if (!ret) 544 memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN); 545 546 return ret; 547 } 548 549 static int spinand_reset_op(struct spinand_device *spinand) 550 { 551 struct spi_mem_op op = SPINAND_RESET_OP; 552 int ret; 553 554 ret = spi_mem_exec_op(spinand->spimem, &op); 555 if (ret) 556 return ret; 557 558 return spinand_wait(spinand, 559 SPINAND_RESET_INITIAL_DELAY_US, 560 SPINAND_RESET_POLL_DELAY_US, 561 NULL); 562 } 563 564 static int spinand_lock_block(struct spinand_device *spinand, u8 lock) 565 { 566 return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock); 567 } 568 569 static int spinand_read_page(struct spinand_device *spinand, 570 const struct nand_page_io_req *req) 571 { 572 struct nand_device *nand = spinand_to_nand(spinand); 573 u8 status; 574 int ret; 575 576 ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req); 577 if (ret) 578 return ret; 579 580 ret = spinand_load_page_op(spinand, req); 581 if (ret) 582 return ret; 583 584 ret = spinand_wait(spinand, 585 SPINAND_READ_INITIAL_DELAY_US, 586 SPINAND_READ_POLL_DELAY_US, 587 &status); 588 if (ret < 0) 589 return ret; 590 591 spinand_ondie_ecc_save_status(nand, status); 592 593 ret = spinand_read_from_cache_op(spinand, req); 594 if (ret) 595 return ret; 596 597 return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req); 598 } 599 600 static int spinand_write_page(struct spinand_device *spinand, 601 const struct nand_page_io_req *req) 602 { 603 struct nand_device *nand = spinand_to_nand(spinand); 604 u8 status; 605 int ret; 606 607 ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req); 608 if (ret) 609 return ret; 610 611 ret = spinand_write_enable_op(spinand); 612 if (ret) 613 return ret; 614 615 ret = spinand_write_to_cache_op(spinand, req); 616 if (ret) 617 return ret; 618 619 ret = spinand_program_op(spinand, req); 620 if (ret) 621 return ret; 622 623 ret = spinand_wait(spinand, 624 SPINAND_WRITE_INITIAL_DELAY_US, 625 SPINAND_WRITE_POLL_DELAY_US, 626 &status); 627 if (!ret && (status & STATUS_PROG_FAILED)) 628 return -EIO; 629 630 return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req); 631 } 632 633 static int spinand_mtd_read(struct mtd_info *mtd, loff_t from, 634 struct mtd_oob_ops *ops) 635 { 636 struct spinand_device *spinand = mtd_to_spinand(mtd); 637 struct nand_device *nand = mtd_to_nanddev(mtd); 638 unsigned int max_bitflips = 0; 639 struct nand_io_iter iter; 640 bool disable_ecc = false; 641 bool ecc_failed = false; 642 int ret = 0; 643 644 if (ops->mode == MTD_OPS_RAW || !spinand->eccinfo.ooblayout) 645 disable_ecc = true; 646 647 mutex_lock(&spinand->lock); 648 649 nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) { 650 if (disable_ecc) 651 iter.req.mode = MTD_OPS_RAW; 652 653 ret = spinand_select_target(spinand, iter.req.pos.target); 654 if (ret) 655 break; 656 657 ret = spinand_read_page(spinand, &iter.req); 658 if (ret < 0 && ret != -EBADMSG) 659 break; 660 661 if (ret == -EBADMSG) 662 ecc_failed = true; 663 else 664 max_bitflips = max_t(unsigned int, max_bitflips, ret); 665 666 ret = 0; 667 ops->retlen += iter.req.datalen; 668 ops->oobretlen += iter.req.ooblen; 669 } 670 671 mutex_unlock(&spinand->lock); 672 673 if (ecc_failed && !ret) 674 ret = -EBADMSG; 675 676 return ret ? ret : max_bitflips; 677 } 678 679 static int spinand_mtd_write(struct mtd_info *mtd, loff_t to, 680 struct mtd_oob_ops *ops) 681 { 682 struct spinand_device *spinand = mtd_to_spinand(mtd); 683 struct nand_device *nand = mtd_to_nanddev(mtd); 684 struct nand_io_iter iter; 685 bool disable_ecc = false; 686 int ret = 0; 687 688 if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout) 689 disable_ecc = true; 690 691 mutex_lock(&spinand->lock); 692 693 nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) { 694 if (disable_ecc) 695 iter.req.mode = MTD_OPS_RAW; 696 697 ret = spinand_select_target(spinand, iter.req.pos.target); 698 if (ret) 699 break; 700 701 ret = spinand_write_page(spinand, &iter.req); 702 if (ret) 703 break; 704 705 ops->retlen += iter.req.datalen; 706 ops->oobretlen += iter.req.ooblen; 707 } 708 709 mutex_unlock(&spinand->lock); 710 711 return ret; 712 } 713 714 static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos) 715 { 716 struct spinand_device *spinand = nand_to_spinand(nand); 717 u8 marker[2] = { }; 718 struct nand_page_io_req req = { 719 .pos = *pos, 720 .ooblen = sizeof(marker), 721 .ooboffs = 0, 722 .oobbuf.in = marker, 723 .mode = MTD_OPS_RAW, 724 }; 725 726 spinand_select_target(spinand, pos->target); 727 spinand_read_page(spinand, &req); 728 if (marker[0] != 0xff || marker[1] != 0xff) 729 return true; 730 731 return false; 732 } 733 734 static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs) 735 { 736 struct nand_device *nand = mtd_to_nanddev(mtd); 737 struct spinand_device *spinand = nand_to_spinand(nand); 738 struct nand_pos pos; 739 int ret; 740 741 nanddev_offs_to_pos(nand, offs, &pos); 742 mutex_lock(&spinand->lock); 743 ret = nanddev_isbad(nand, &pos); 744 mutex_unlock(&spinand->lock); 745 746 return ret; 747 } 748 749 static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos) 750 { 751 struct spinand_device *spinand = nand_to_spinand(nand); 752 u8 marker[2] = { }; 753 struct nand_page_io_req req = { 754 .pos = *pos, 755 .ooboffs = 0, 756 .ooblen = sizeof(marker), 757 .oobbuf.out = marker, 758 .mode = MTD_OPS_RAW, 759 }; 760 int ret; 761 762 ret = spinand_select_target(spinand, pos->target); 763 if (ret) 764 return ret; 765 766 ret = spinand_write_enable_op(spinand); 767 if (ret) 768 return ret; 769 770 return spinand_write_page(spinand, &req); 771 } 772 773 static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs) 774 { 775 struct nand_device *nand = mtd_to_nanddev(mtd); 776 struct spinand_device *spinand = nand_to_spinand(nand); 777 struct nand_pos pos; 778 int ret; 779 780 nanddev_offs_to_pos(nand, offs, &pos); 781 mutex_lock(&spinand->lock); 782 ret = nanddev_markbad(nand, &pos); 783 mutex_unlock(&spinand->lock); 784 785 return ret; 786 } 787 788 static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos) 789 { 790 struct spinand_device *spinand = nand_to_spinand(nand); 791 u8 status; 792 int ret; 793 794 ret = spinand_select_target(spinand, pos->target); 795 if (ret) 796 return ret; 797 798 ret = spinand_write_enable_op(spinand); 799 if (ret) 800 return ret; 801 802 ret = spinand_erase_op(spinand, pos); 803 if (ret) 804 return ret; 805 806 ret = spinand_wait(spinand, 807 SPINAND_ERASE_INITIAL_DELAY_US, 808 SPINAND_ERASE_POLL_DELAY_US, 809 &status); 810 811 if (!ret && (status & STATUS_ERASE_FAILED)) 812 ret = -EIO; 813 814 return ret; 815 } 816 817 static int spinand_mtd_erase(struct mtd_info *mtd, 818 struct erase_info *einfo) 819 { 820 struct spinand_device *spinand = mtd_to_spinand(mtd); 821 int ret; 822 823 mutex_lock(&spinand->lock); 824 ret = nanddev_mtd_erase(mtd, einfo); 825 mutex_unlock(&spinand->lock); 826 827 return ret; 828 } 829 830 static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs) 831 { 832 struct spinand_device *spinand = mtd_to_spinand(mtd); 833 struct nand_device *nand = mtd_to_nanddev(mtd); 834 struct nand_pos pos; 835 int ret; 836 837 nanddev_offs_to_pos(nand, offs, &pos); 838 mutex_lock(&spinand->lock); 839 ret = nanddev_isreserved(nand, &pos); 840 mutex_unlock(&spinand->lock); 841 842 return ret; 843 } 844 845 static int spinand_create_dirmap(struct spinand_device *spinand, 846 unsigned int plane) 847 { 848 struct nand_device *nand = spinand_to_nand(spinand); 849 struct spi_mem_dirmap_info info = { 850 .length = nanddev_page_size(nand) + 851 nanddev_per_page_oobsize(nand), 852 }; 853 struct spi_mem_dirmap_desc *desc; 854 855 /* The plane number is passed in MSB just above the column address */ 856 info.offset = plane << fls(nand->memorg.pagesize); 857 858 info.op_tmpl = *spinand->op_templates.update_cache; 859 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, 860 spinand->spimem, &info); 861 if (IS_ERR(desc)) 862 return PTR_ERR(desc); 863 864 spinand->dirmaps[plane].wdesc = desc; 865 866 info.op_tmpl = *spinand->op_templates.read_cache; 867 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, 868 spinand->spimem, &info); 869 if (IS_ERR(desc)) 870 return PTR_ERR(desc); 871 872 spinand->dirmaps[plane].rdesc = desc; 873 874 if (nand->ecc.engine->integration != NAND_ECC_ENGINE_INTEGRATION_PIPELINED) { 875 spinand->dirmaps[plane].wdesc_ecc = spinand->dirmaps[plane].wdesc; 876 spinand->dirmaps[plane].rdesc_ecc = spinand->dirmaps[plane].rdesc; 877 878 return 0; 879 } 880 881 info.op_tmpl = *spinand->op_templates.update_cache; 882 info.op_tmpl.data.ecc = true; 883 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, 884 spinand->spimem, &info); 885 if (IS_ERR(desc)) 886 return PTR_ERR(desc); 887 888 spinand->dirmaps[plane].wdesc_ecc = desc; 889 890 info.op_tmpl = *spinand->op_templates.read_cache; 891 info.op_tmpl.data.ecc = true; 892 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, 893 spinand->spimem, &info); 894 if (IS_ERR(desc)) 895 return PTR_ERR(desc); 896 897 spinand->dirmaps[plane].rdesc_ecc = desc; 898 899 return 0; 900 } 901 902 static int spinand_create_dirmaps(struct spinand_device *spinand) 903 { 904 struct nand_device *nand = spinand_to_nand(spinand); 905 int i, ret; 906 907 spinand->dirmaps = devm_kzalloc(&spinand->spimem->spi->dev, 908 sizeof(*spinand->dirmaps) * 909 nand->memorg.planes_per_lun, 910 GFP_KERNEL); 911 if (!spinand->dirmaps) 912 return -ENOMEM; 913 914 for (i = 0; i < nand->memorg.planes_per_lun; i++) { 915 ret = spinand_create_dirmap(spinand, i); 916 if (ret) 917 return ret; 918 } 919 920 return 0; 921 } 922 923 static const struct nand_ops spinand_ops = { 924 .erase = spinand_erase, 925 .markbad = spinand_markbad, 926 .isbad = spinand_isbad, 927 }; 928 929 static const struct spinand_manufacturer *spinand_manufacturers[] = { 930 &gigadevice_spinand_manufacturer, 931 ¯onix_spinand_manufacturer, 932 µn_spinand_manufacturer, 933 ¶gon_spinand_manufacturer, 934 &toshiba_spinand_manufacturer, 935 &winbond_spinand_manufacturer, 936 }; 937 938 static int spinand_manufacturer_match(struct spinand_device *spinand, 939 enum spinand_readid_method rdid_method) 940 { 941 u8 *id = spinand->id.data; 942 unsigned int i; 943 int ret; 944 945 for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) { 946 const struct spinand_manufacturer *manufacturer = 947 spinand_manufacturers[i]; 948 949 if (id[0] != manufacturer->id) 950 continue; 951 952 ret = spinand_match_and_init(spinand, 953 manufacturer->chips, 954 manufacturer->nchips, 955 rdid_method); 956 if (ret < 0) 957 continue; 958 959 spinand->manufacturer = manufacturer; 960 return 0; 961 } 962 return -ENOTSUPP; 963 } 964 965 static int spinand_id_detect(struct spinand_device *spinand) 966 { 967 u8 *id = spinand->id.data; 968 int ret; 969 970 ret = spinand_read_id_op(spinand, 0, 0, id); 971 if (ret) 972 return ret; 973 ret = spinand_manufacturer_match(spinand, SPINAND_READID_METHOD_OPCODE); 974 if (!ret) 975 return 0; 976 977 ret = spinand_read_id_op(spinand, 1, 0, id); 978 if (ret) 979 return ret; 980 ret = spinand_manufacturer_match(spinand, 981 SPINAND_READID_METHOD_OPCODE_ADDR); 982 if (!ret) 983 return 0; 984 985 ret = spinand_read_id_op(spinand, 0, 1, id); 986 if (ret) 987 return ret; 988 ret = spinand_manufacturer_match(spinand, 989 SPINAND_READID_METHOD_OPCODE_DUMMY); 990 991 return ret; 992 } 993 994 static int spinand_manufacturer_init(struct spinand_device *spinand) 995 { 996 if (spinand->manufacturer->ops->init) 997 return spinand->manufacturer->ops->init(spinand); 998 999 return 0; 1000 } 1001 1002 static void spinand_manufacturer_cleanup(struct spinand_device *spinand) 1003 { 1004 /* Release manufacturer private data */ 1005 if (spinand->manufacturer->ops->cleanup) 1006 return spinand->manufacturer->ops->cleanup(spinand); 1007 } 1008 1009 static const struct spi_mem_op * 1010 spinand_select_op_variant(struct spinand_device *spinand, 1011 const struct spinand_op_variants *variants) 1012 { 1013 struct nand_device *nand = spinand_to_nand(spinand); 1014 unsigned int i; 1015 1016 for (i = 0; i < variants->nops; i++) { 1017 struct spi_mem_op op = variants->ops[i]; 1018 unsigned int nbytes; 1019 int ret; 1020 1021 nbytes = nanddev_per_page_oobsize(nand) + 1022 nanddev_page_size(nand); 1023 1024 while (nbytes) { 1025 op.data.nbytes = nbytes; 1026 ret = spi_mem_adjust_op_size(spinand->spimem, &op); 1027 if (ret) 1028 break; 1029 1030 if (!spi_mem_supports_op(spinand->spimem, &op)) 1031 break; 1032 1033 nbytes -= op.data.nbytes; 1034 } 1035 1036 if (!nbytes) 1037 return &variants->ops[i]; 1038 } 1039 1040 return NULL; 1041 } 1042 1043 /** 1044 * spinand_match_and_init() - Try to find a match between a device ID and an 1045 * entry in a spinand_info table 1046 * @spinand: SPI NAND object 1047 * @table: SPI NAND device description table 1048 * @table_size: size of the device description table 1049 * @rdid_method: read id method to match 1050 * 1051 * Match between a device ID retrieved through the READ_ID command and an 1052 * entry in the SPI NAND description table. If a match is found, the spinand 1053 * object will be initialized with information provided by the matching 1054 * spinand_info entry. 1055 * 1056 * Return: 0 on success, a negative error code otherwise. 1057 */ 1058 int spinand_match_and_init(struct spinand_device *spinand, 1059 const struct spinand_info *table, 1060 unsigned int table_size, 1061 enum spinand_readid_method rdid_method) 1062 { 1063 u8 *id = spinand->id.data; 1064 struct nand_device *nand = spinand_to_nand(spinand); 1065 unsigned int i; 1066 1067 for (i = 0; i < table_size; i++) { 1068 const struct spinand_info *info = &table[i]; 1069 const struct spi_mem_op *op; 1070 1071 if (rdid_method != info->devid.method) 1072 continue; 1073 1074 if (memcmp(id + 1, info->devid.id, info->devid.len)) 1075 continue; 1076 1077 nand->memorg = table[i].memorg; 1078 nanddev_set_ecc_requirements(nand, &table[i].eccreq); 1079 spinand->eccinfo = table[i].eccinfo; 1080 spinand->flags = table[i].flags; 1081 spinand->id.len = 1 + table[i].devid.len; 1082 spinand->select_target = table[i].select_target; 1083 1084 op = spinand_select_op_variant(spinand, 1085 info->op_variants.read_cache); 1086 if (!op) 1087 return -ENOTSUPP; 1088 1089 spinand->op_templates.read_cache = op; 1090 1091 op = spinand_select_op_variant(spinand, 1092 info->op_variants.write_cache); 1093 if (!op) 1094 return -ENOTSUPP; 1095 1096 spinand->op_templates.write_cache = op; 1097 1098 op = spinand_select_op_variant(spinand, 1099 info->op_variants.update_cache); 1100 spinand->op_templates.update_cache = op; 1101 1102 return 0; 1103 } 1104 1105 return -ENOTSUPP; 1106 } 1107 1108 static int spinand_detect(struct spinand_device *spinand) 1109 { 1110 struct device *dev = &spinand->spimem->spi->dev; 1111 struct nand_device *nand = spinand_to_nand(spinand); 1112 int ret; 1113 1114 ret = spinand_reset_op(spinand); 1115 if (ret) 1116 return ret; 1117 1118 ret = spinand_id_detect(spinand); 1119 if (ret) { 1120 dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN, 1121 spinand->id.data); 1122 return ret; 1123 } 1124 1125 if (nand->memorg.ntargets > 1 && !spinand->select_target) { 1126 dev_err(dev, 1127 "SPI NANDs with more than one die must implement ->select_target()\n"); 1128 return -EINVAL; 1129 } 1130 1131 dev_info(&spinand->spimem->spi->dev, 1132 "%s SPI NAND was found.\n", spinand->manufacturer->name); 1133 dev_info(&spinand->spimem->spi->dev, 1134 "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n", 1135 nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10, 1136 nanddev_page_size(nand), nanddev_per_page_oobsize(nand)); 1137 1138 return 0; 1139 } 1140 1141 static int spinand_init_flash(struct spinand_device *spinand) 1142 { 1143 struct device *dev = &spinand->spimem->spi->dev; 1144 struct nand_device *nand = spinand_to_nand(spinand); 1145 int ret, i; 1146 1147 ret = spinand_read_cfg(spinand); 1148 if (ret) 1149 return ret; 1150 1151 ret = spinand_init_quad_enable(spinand); 1152 if (ret) 1153 return ret; 1154 1155 ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0); 1156 if (ret) 1157 return ret; 1158 1159 ret = spinand_manufacturer_init(spinand); 1160 if (ret) { 1161 dev_err(dev, 1162 "Failed to initialize the SPI NAND chip (err = %d)\n", 1163 ret); 1164 return ret; 1165 } 1166 1167 /* After power up, all blocks are locked, so unlock them here. */ 1168 for (i = 0; i < nand->memorg.ntargets; i++) { 1169 ret = spinand_select_target(spinand, i); 1170 if (ret) 1171 break; 1172 1173 ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED); 1174 if (ret) 1175 break; 1176 } 1177 1178 if (ret) 1179 spinand_manufacturer_cleanup(spinand); 1180 1181 return ret; 1182 } 1183 1184 static void spinand_mtd_resume(struct mtd_info *mtd) 1185 { 1186 struct spinand_device *spinand = mtd_to_spinand(mtd); 1187 int ret; 1188 1189 ret = spinand_reset_op(spinand); 1190 if (ret) 1191 return; 1192 1193 ret = spinand_init_flash(spinand); 1194 if (ret) 1195 return; 1196 1197 spinand_ecc_enable(spinand, false); 1198 } 1199 1200 static int spinand_init(struct spinand_device *spinand) 1201 { 1202 struct device *dev = &spinand->spimem->spi->dev; 1203 struct mtd_info *mtd = spinand_to_mtd(spinand); 1204 struct nand_device *nand = mtd_to_nanddev(mtd); 1205 int ret; 1206 1207 /* 1208 * We need a scratch buffer because the spi_mem interface requires that 1209 * buf passed in spi_mem_op->data.buf be DMA-able. 1210 */ 1211 spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL); 1212 if (!spinand->scratchbuf) 1213 return -ENOMEM; 1214 1215 ret = spinand_detect(spinand); 1216 if (ret) 1217 goto err_free_bufs; 1218 1219 /* 1220 * Use kzalloc() instead of devm_kzalloc() here, because some drivers 1221 * may use this buffer for DMA access. 1222 * Memory allocated by devm_ does not guarantee DMA-safe alignment. 1223 */ 1224 spinand->databuf = kzalloc(nanddev_page_size(nand) + 1225 nanddev_per_page_oobsize(nand), 1226 GFP_KERNEL); 1227 if (!spinand->databuf) { 1228 ret = -ENOMEM; 1229 goto err_free_bufs; 1230 } 1231 1232 spinand->oobbuf = spinand->databuf + nanddev_page_size(nand); 1233 1234 ret = spinand_init_cfg_cache(spinand); 1235 if (ret) 1236 goto err_free_bufs; 1237 1238 ret = spinand_init_flash(spinand); 1239 if (ret) 1240 goto err_free_bufs; 1241 1242 ret = nanddev_init(nand, &spinand_ops, THIS_MODULE); 1243 if (ret) 1244 goto err_manuf_cleanup; 1245 1246 /* SPI-NAND default ECC engine is on-die */ 1247 nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE; 1248 nand->ecc.ondie_engine = &spinand_ondie_ecc_engine; 1249 1250 spinand_ecc_enable(spinand, false); 1251 ret = nanddev_ecc_engine_init(nand); 1252 if (ret) 1253 goto err_cleanup_nanddev; 1254 1255 mtd->_read_oob = spinand_mtd_read; 1256 mtd->_write_oob = spinand_mtd_write; 1257 mtd->_block_isbad = spinand_mtd_block_isbad; 1258 mtd->_block_markbad = spinand_mtd_block_markbad; 1259 mtd->_block_isreserved = spinand_mtd_block_isreserved; 1260 mtd->_erase = spinand_mtd_erase; 1261 mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks; 1262 mtd->_resume = spinand_mtd_resume; 1263 1264 if (nand->ecc.engine) { 1265 ret = mtd_ooblayout_count_freebytes(mtd); 1266 if (ret < 0) 1267 goto err_cleanup_ecc_engine; 1268 } 1269 1270 mtd->oobavail = ret; 1271 1272 /* Propagate ECC information to mtd_info */ 1273 mtd->ecc_strength = nanddev_get_ecc_conf(nand)->strength; 1274 mtd->ecc_step_size = nanddev_get_ecc_conf(nand)->step_size; 1275 1276 ret = spinand_create_dirmaps(spinand); 1277 if (ret) { 1278 dev_err(dev, 1279 "Failed to create direct mappings for read/write operations (err = %d)\n", 1280 ret); 1281 goto err_cleanup_ecc_engine; 1282 } 1283 1284 return 0; 1285 1286 err_cleanup_ecc_engine: 1287 nanddev_ecc_engine_cleanup(nand); 1288 1289 err_cleanup_nanddev: 1290 nanddev_cleanup(nand); 1291 1292 err_manuf_cleanup: 1293 spinand_manufacturer_cleanup(spinand); 1294 1295 err_free_bufs: 1296 kfree(spinand->databuf); 1297 kfree(spinand->scratchbuf); 1298 return ret; 1299 } 1300 1301 static void spinand_cleanup(struct spinand_device *spinand) 1302 { 1303 struct nand_device *nand = spinand_to_nand(spinand); 1304 1305 nanddev_cleanup(nand); 1306 spinand_manufacturer_cleanup(spinand); 1307 kfree(spinand->databuf); 1308 kfree(spinand->scratchbuf); 1309 } 1310 1311 static int spinand_probe(struct spi_mem *mem) 1312 { 1313 struct spinand_device *spinand; 1314 struct mtd_info *mtd; 1315 int ret; 1316 1317 spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand), 1318 GFP_KERNEL); 1319 if (!spinand) 1320 return -ENOMEM; 1321 1322 spinand->spimem = mem; 1323 spi_mem_set_drvdata(mem, spinand); 1324 spinand_set_of_node(spinand, mem->spi->dev.of_node); 1325 mutex_init(&spinand->lock); 1326 mtd = spinand_to_mtd(spinand); 1327 mtd->dev.parent = &mem->spi->dev; 1328 1329 ret = spinand_init(spinand); 1330 if (ret) 1331 return ret; 1332 1333 ret = mtd_device_register(mtd, NULL, 0); 1334 if (ret) 1335 goto err_spinand_cleanup; 1336 1337 return 0; 1338 1339 err_spinand_cleanup: 1340 spinand_cleanup(spinand); 1341 1342 return ret; 1343 } 1344 1345 static int spinand_remove(struct spi_mem *mem) 1346 { 1347 struct spinand_device *spinand; 1348 struct mtd_info *mtd; 1349 int ret; 1350 1351 spinand = spi_mem_get_drvdata(mem); 1352 mtd = spinand_to_mtd(spinand); 1353 1354 ret = mtd_device_unregister(mtd); 1355 if (ret) 1356 return ret; 1357 1358 spinand_cleanup(spinand); 1359 1360 return 0; 1361 } 1362 1363 static const struct spi_device_id spinand_ids[] = { 1364 { .name = "spi-nand" }, 1365 { /* sentinel */ }, 1366 }; 1367 MODULE_DEVICE_TABLE(spi, spinand_ids); 1368 1369 #ifdef CONFIG_OF 1370 static const struct of_device_id spinand_of_ids[] = { 1371 { .compatible = "spi-nand" }, 1372 { /* sentinel */ }, 1373 }; 1374 MODULE_DEVICE_TABLE(of, spinand_of_ids); 1375 #endif 1376 1377 static struct spi_mem_driver spinand_drv = { 1378 .spidrv = { 1379 .id_table = spinand_ids, 1380 .driver = { 1381 .name = "spi-nand", 1382 .of_match_table = of_match_ptr(spinand_of_ids), 1383 }, 1384 }, 1385 .probe = spinand_probe, 1386 .remove = spinand_remove, 1387 }; 1388 module_spi_mem_driver(spinand_drv); 1389 1390 MODULE_DESCRIPTION("SPI NAND framework"); 1391 MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>"); 1392 MODULE_LICENSE("GPL v2"); 1393