1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2016-2017 Micron Technology, Inc. 4 * 5 * Authors: 6 * Peter Pan <peterpandong@micron.com> 7 * Boris Brezillon <boris.brezillon@bootlin.com> 8 */ 9 10 #define pr_fmt(fmt) "spi-nand: " fmt 11 12 #include <linux/device.h> 13 #include <linux/jiffies.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/mtd/spinand.h> 17 #include <linux/of.h> 18 #include <linux/slab.h> 19 #include <linux/string.h> 20 #include <linux/spi/spi.h> 21 #include <linux/spi/spi-mem.h> 22 23 static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val) 24 { 25 struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg, 26 spinand->scratchbuf); 27 int ret; 28 29 ret = spi_mem_exec_op(spinand->spimem, &op); 30 if (ret) 31 return ret; 32 33 *val = *spinand->scratchbuf; 34 return 0; 35 } 36 37 static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val) 38 { 39 struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg, 40 spinand->scratchbuf); 41 42 *spinand->scratchbuf = val; 43 return spi_mem_exec_op(spinand->spimem, &op); 44 } 45 46 static int spinand_read_status(struct spinand_device *spinand, u8 *status) 47 { 48 return spinand_read_reg_op(spinand, REG_STATUS, status); 49 } 50 51 static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg) 52 { 53 struct nand_device *nand = spinand_to_nand(spinand); 54 55 if (WARN_ON(spinand->cur_target < 0 || 56 spinand->cur_target >= nand->memorg.ntargets)) 57 return -EINVAL; 58 59 *cfg = spinand->cfg_cache[spinand->cur_target]; 60 return 0; 61 } 62 63 static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg) 64 { 65 struct nand_device *nand = spinand_to_nand(spinand); 66 int ret; 67 68 if (WARN_ON(spinand->cur_target < 0 || 69 spinand->cur_target >= nand->memorg.ntargets)) 70 return -EINVAL; 71 72 if (spinand->cfg_cache[spinand->cur_target] == cfg) 73 return 0; 74 75 ret = spinand_write_reg_op(spinand, REG_CFG, cfg); 76 if (ret) 77 return ret; 78 79 spinand->cfg_cache[spinand->cur_target] = cfg; 80 return 0; 81 } 82 83 /** 84 * spinand_upd_cfg() - Update the configuration register 85 * @spinand: the spinand device 86 * @mask: the mask encoding the bits to update in the config reg 87 * @val: the new value to apply 88 * 89 * Update the configuration register. 90 * 91 * Return: 0 on success, a negative error code otherwise. 92 */ 93 int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val) 94 { 95 int ret; 96 u8 cfg; 97 98 ret = spinand_get_cfg(spinand, &cfg); 99 if (ret) 100 return ret; 101 102 cfg &= ~mask; 103 cfg |= val; 104 105 return spinand_set_cfg(spinand, cfg); 106 } 107 108 /** 109 * spinand_select_target() - Select a specific NAND target/die 110 * @spinand: the spinand device 111 * @target: the target/die to select 112 * 113 * Select a new target/die. If chip only has one die, this function is a NOOP. 114 * 115 * Return: 0 on success, a negative error code otherwise. 116 */ 117 int spinand_select_target(struct spinand_device *spinand, unsigned int target) 118 { 119 struct nand_device *nand = spinand_to_nand(spinand); 120 int ret; 121 122 if (WARN_ON(target >= nand->memorg.ntargets)) 123 return -EINVAL; 124 125 if (spinand->cur_target == target) 126 return 0; 127 128 if (nand->memorg.ntargets == 1) { 129 spinand->cur_target = target; 130 return 0; 131 } 132 133 ret = spinand->select_target(spinand, target); 134 if (ret) 135 return ret; 136 137 spinand->cur_target = target; 138 return 0; 139 } 140 141 static int spinand_read_cfg(struct spinand_device *spinand) 142 { 143 struct nand_device *nand = spinand_to_nand(spinand); 144 unsigned int target; 145 int ret; 146 147 for (target = 0; target < nand->memorg.ntargets; target++) { 148 ret = spinand_select_target(spinand, target); 149 if (ret) 150 return ret; 151 152 /* 153 * We use spinand_read_reg_op() instead of spinand_get_cfg() 154 * here to bypass the config cache. 155 */ 156 ret = spinand_read_reg_op(spinand, REG_CFG, 157 &spinand->cfg_cache[target]); 158 if (ret) 159 return ret; 160 } 161 162 return 0; 163 } 164 165 static int spinand_init_cfg_cache(struct spinand_device *spinand) 166 { 167 struct nand_device *nand = spinand_to_nand(spinand); 168 struct device *dev = &spinand->spimem->spi->dev; 169 170 spinand->cfg_cache = devm_kcalloc(dev, 171 nand->memorg.ntargets, 172 sizeof(*spinand->cfg_cache), 173 GFP_KERNEL); 174 if (!spinand->cfg_cache) 175 return -ENOMEM; 176 177 return 0; 178 } 179 180 static int spinand_init_quad_enable(struct spinand_device *spinand) 181 { 182 bool enable = false; 183 184 if (!(spinand->flags & SPINAND_HAS_QE_BIT)) 185 return 0; 186 187 if (spinand->op_templates.read_cache->data.buswidth == 4 || 188 spinand->op_templates.write_cache->data.buswidth == 4 || 189 spinand->op_templates.update_cache->data.buswidth == 4) 190 enable = true; 191 192 return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE, 193 enable ? CFG_QUAD_ENABLE : 0); 194 } 195 196 static int spinand_ecc_enable(struct spinand_device *spinand, 197 bool enable) 198 { 199 return spinand_upd_cfg(spinand, CFG_ECC_ENABLE, 200 enable ? CFG_ECC_ENABLE : 0); 201 } 202 203 static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status) 204 { 205 struct nand_device *nand = spinand_to_nand(spinand); 206 207 if (spinand->eccinfo.get_status) 208 return spinand->eccinfo.get_status(spinand, status); 209 210 switch (status & STATUS_ECC_MASK) { 211 case STATUS_ECC_NO_BITFLIPS: 212 return 0; 213 214 case STATUS_ECC_HAS_BITFLIPS: 215 /* 216 * We have no way to know exactly how many bitflips have been 217 * fixed, so let's return the maximum possible value so that 218 * wear-leveling layers move the data immediately. 219 */ 220 return nanddev_get_ecc_conf(nand)->strength; 221 222 case STATUS_ECC_UNCOR_ERROR: 223 return -EBADMSG; 224 225 default: 226 break; 227 } 228 229 return -EINVAL; 230 } 231 232 static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section, 233 struct mtd_oob_region *region) 234 { 235 return -ERANGE; 236 } 237 238 static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section, 239 struct mtd_oob_region *region) 240 { 241 if (section) 242 return -ERANGE; 243 244 /* Reserve 2 bytes for the BBM. */ 245 region->offset = 2; 246 region->length = 62; 247 248 return 0; 249 } 250 251 static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = { 252 .ecc = spinand_noecc_ooblayout_ecc, 253 .free = spinand_noecc_ooblayout_free, 254 }; 255 256 static int spinand_ondie_ecc_init_ctx(struct nand_device *nand) 257 { 258 struct spinand_device *spinand = nand_to_spinand(nand); 259 struct mtd_info *mtd = nanddev_to_mtd(nand); 260 struct spinand_ondie_ecc_conf *engine_conf; 261 262 nand->ecc.ctx.conf.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE; 263 nand->ecc.ctx.conf.step_size = nand->ecc.requirements.step_size; 264 nand->ecc.ctx.conf.strength = nand->ecc.requirements.strength; 265 266 engine_conf = kzalloc(sizeof(*engine_conf), GFP_KERNEL); 267 if (!engine_conf) 268 return -ENOMEM; 269 270 nand->ecc.ctx.priv = engine_conf; 271 272 if (spinand->eccinfo.ooblayout) 273 mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout); 274 else 275 mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout); 276 277 return 0; 278 } 279 280 static void spinand_ondie_ecc_cleanup_ctx(struct nand_device *nand) 281 { 282 kfree(nand->ecc.ctx.priv); 283 } 284 285 static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand, 286 struct nand_page_io_req *req) 287 { 288 struct spinand_device *spinand = nand_to_spinand(nand); 289 bool enable = (req->mode != MTD_OPS_RAW); 290 291 /* Only enable or disable the engine */ 292 return spinand_ecc_enable(spinand, enable); 293 } 294 295 static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand, 296 struct nand_page_io_req *req) 297 { 298 struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv; 299 struct spinand_device *spinand = nand_to_spinand(nand); 300 struct mtd_info *mtd = spinand_to_mtd(spinand); 301 int ret; 302 303 if (req->mode == MTD_OPS_RAW) 304 return 0; 305 306 /* Nothing to do when finishing a page write */ 307 if (req->type == NAND_PAGE_WRITE) 308 return 0; 309 310 /* Finish a page write: check the status, report errors/bitflips */ 311 ret = spinand_check_ecc_status(spinand, engine_conf->status); 312 if (ret == -EBADMSG) 313 mtd->ecc_stats.failed++; 314 else if (ret > 0) 315 mtd->ecc_stats.corrected += ret; 316 317 return ret; 318 } 319 320 static struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = { 321 .init_ctx = spinand_ondie_ecc_init_ctx, 322 .cleanup_ctx = spinand_ondie_ecc_cleanup_ctx, 323 .prepare_io_req = spinand_ondie_ecc_prepare_io_req, 324 .finish_io_req = spinand_ondie_ecc_finish_io_req, 325 }; 326 327 static struct nand_ecc_engine spinand_ondie_ecc_engine = { 328 .ops = &spinand_ondie_ecc_engine_ops, 329 }; 330 331 static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status) 332 { 333 struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv; 334 335 if (nand->ecc.ctx.conf.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE && 336 engine_conf) 337 engine_conf->status = status; 338 } 339 340 static int spinand_write_enable_op(struct spinand_device *spinand) 341 { 342 struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true); 343 344 return spi_mem_exec_op(spinand->spimem, &op); 345 } 346 347 static int spinand_load_page_op(struct spinand_device *spinand, 348 const struct nand_page_io_req *req) 349 { 350 struct nand_device *nand = spinand_to_nand(spinand); 351 unsigned int row = nanddev_pos_to_row(nand, &req->pos); 352 struct spi_mem_op op = SPINAND_PAGE_READ_OP(row); 353 354 return spi_mem_exec_op(spinand->spimem, &op); 355 } 356 357 static int spinand_read_from_cache_op(struct spinand_device *spinand, 358 const struct nand_page_io_req *req) 359 { 360 struct nand_device *nand = spinand_to_nand(spinand); 361 struct mtd_info *mtd = spinand_to_mtd(spinand); 362 struct spi_mem_dirmap_desc *rdesc; 363 unsigned int nbytes = 0; 364 void *buf = NULL; 365 u16 column = 0; 366 ssize_t ret; 367 368 if (req->datalen) { 369 buf = spinand->databuf; 370 nbytes = nanddev_page_size(nand); 371 column = 0; 372 } 373 374 if (req->ooblen) { 375 nbytes += nanddev_per_page_oobsize(nand); 376 if (!buf) { 377 buf = spinand->oobbuf; 378 column = nanddev_page_size(nand); 379 } 380 } 381 382 rdesc = spinand->dirmaps[req->pos.plane].rdesc; 383 384 while (nbytes) { 385 ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf); 386 if (ret < 0) 387 return ret; 388 389 if (!ret || ret > nbytes) 390 return -EIO; 391 392 nbytes -= ret; 393 column += ret; 394 buf += ret; 395 } 396 397 if (req->datalen) 398 memcpy(req->databuf.in, spinand->databuf + req->dataoffs, 399 req->datalen); 400 401 if (req->ooblen) { 402 if (req->mode == MTD_OPS_AUTO_OOB) 403 mtd_ooblayout_get_databytes(mtd, req->oobbuf.in, 404 spinand->oobbuf, 405 req->ooboffs, 406 req->ooblen); 407 else 408 memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs, 409 req->ooblen); 410 } 411 412 return 0; 413 } 414 415 static int spinand_write_to_cache_op(struct spinand_device *spinand, 416 const struct nand_page_io_req *req) 417 { 418 struct nand_device *nand = spinand_to_nand(spinand); 419 struct mtd_info *mtd = spinand_to_mtd(spinand); 420 struct spi_mem_dirmap_desc *wdesc; 421 unsigned int nbytes, column = 0; 422 void *buf = spinand->databuf; 423 ssize_t ret; 424 425 /* 426 * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset 427 * the cache content to 0xFF (depends on vendor implementation), so we 428 * must fill the page cache entirely even if we only want to program 429 * the data portion of the page, otherwise we might corrupt the BBM or 430 * user data previously programmed in OOB area. 431 * 432 * Only reset the data buffer manually, the OOB buffer is prepared by 433 * ECC engines ->prepare_io_req() callback. 434 */ 435 nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand); 436 memset(spinand->databuf, 0xff, nanddev_page_size(nand)); 437 438 if (req->datalen) 439 memcpy(spinand->databuf + req->dataoffs, req->databuf.out, 440 req->datalen); 441 442 if (req->ooblen) { 443 if (req->mode == MTD_OPS_AUTO_OOB) 444 mtd_ooblayout_set_databytes(mtd, req->oobbuf.out, 445 spinand->oobbuf, 446 req->ooboffs, 447 req->ooblen); 448 else 449 memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out, 450 req->ooblen); 451 } 452 453 wdesc = spinand->dirmaps[req->pos.plane].wdesc; 454 455 while (nbytes) { 456 ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf); 457 if (ret < 0) 458 return ret; 459 460 if (!ret || ret > nbytes) 461 return -EIO; 462 463 nbytes -= ret; 464 column += ret; 465 buf += ret; 466 } 467 468 return 0; 469 } 470 471 static int spinand_program_op(struct spinand_device *spinand, 472 const struct nand_page_io_req *req) 473 { 474 struct nand_device *nand = spinand_to_nand(spinand); 475 unsigned int row = nanddev_pos_to_row(nand, &req->pos); 476 struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row); 477 478 return spi_mem_exec_op(spinand->spimem, &op); 479 } 480 481 static int spinand_erase_op(struct spinand_device *spinand, 482 const struct nand_pos *pos) 483 { 484 struct nand_device *nand = spinand_to_nand(spinand); 485 unsigned int row = nanddev_pos_to_row(nand, pos); 486 struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row); 487 488 return spi_mem_exec_op(spinand->spimem, &op); 489 } 490 491 static int spinand_wait(struct spinand_device *spinand, 492 unsigned long initial_delay_us, 493 unsigned long poll_delay_us, 494 u8 *s) 495 { 496 struct spi_mem_op op = SPINAND_GET_FEATURE_OP(REG_STATUS, 497 spinand->scratchbuf); 498 u8 status; 499 int ret; 500 501 ret = spi_mem_poll_status(spinand->spimem, &op, STATUS_BUSY, 0, 502 initial_delay_us, 503 poll_delay_us, 504 SPINAND_WAITRDY_TIMEOUT_MS); 505 if (ret) 506 return ret; 507 508 status = *spinand->scratchbuf; 509 if (!(status & STATUS_BUSY)) 510 goto out; 511 512 /* 513 * Extra read, just in case the STATUS_READY bit has changed 514 * since our last check 515 */ 516 ret = spinand_read_status(spinand, &status); 517 if (ret) 518 return ret; 519 520 out: 521 if (s) 522 *s = status; 523 524 return status & STATUS_BUSY ? -ETIMEDOUT : 0; 525 } 526 527 static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr, 528 u8 ndummy, u8 *buf) 529 { 530 struct spi_mem_op op = SPINAND_READID_OP( 531 naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN); 532 int ret; 533 534 ret = spi_mem_exec_op(spinand->spimem, &op); 535 if (!ret) 536 memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN); 537 538 return ret; 539 } 540 541 static int spinand_reset_op(struct spinand_device *spinand) 542 { 543 struct spi_mem_op op = SPINAND_RESET_OP; 544 int ret; 545 546 ret = spi_mem_exec_op(spinand->spimem, &op); 547 if (ret) 548 return ret; 549 550 return spinand_wait(spinand, 551 SPINAND_RESET_INITIAL_DELAY_US, 552 SPINAND_RESET_POLL_DELAY_US, 553 NULL); 554 } 555 556 static int spinand_lock_block(struct spinand_device *spinand, u8 lock) 557 { 558 return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock); 559 } 560 561 static int spinand_read_page(struct spinand_device *spinand, 562 const struct nand_page_io_req *req) 563 { 564 struct nand_device *nand = spinand_to_nand(spinand); 565 u8 status; 566 int ret; 567 568 ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req); 569 if (ret) 570 return ret; 571 572 ret = spinand_load_page_op(spinand, req); 573 if (ret) 574 return ret; 575 576 ret = spinand_wait(spinand, 577 SPINAND_READ_INITIAL_DELAY_US, 578 SPINAND_READ_POLL_DELAY_US, 579 &status); 580 if (ret < 0) 581 return ret; 582 583 spinand_ondie_ecc_save_status(nand, status); 584 585 ret = spinand_read_from_cache_op(spinand, req); 586 if (ret) 587 return ret; 588 589 return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req); 590 } 591 592 static int spinand_write_page(struct spinand_device *spinand, 593 const struct nand_page_io_req *req) 594 { 595 struct nand_device *nand = spinand_to_nand(spinand); 596 u8 status; 597 int ret; 598 599 ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req); 600 if (ret) 601 return ret; 602 603 ret = spinand_write_enable_op(spinand); 604 if (ret) 605 return ret; 606 607 ret = spinand_write_to_cache_op(spinand, req); 608 if (ret) 609 return ret; 610 611 ret = spinand_program_op(spinand, req); 612 if (ret) 613 return ret; 614 615 ret = spinand_wait(spinand, 616 SPINAND_WRITE_INITIAL_DELAY_US, 617 SPINAND_WRITE_POLL_DELAY_US, 618 &status); 619 if (!ret && (status & STATUS_PROG_FAILED)) 620 return -EIO; 621 622 return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req); 623 } 624 625 static int spinand_mtd_read(struct mtd_info *mtd, loff_t from, 626 struct mtd_oob_ops *ops) 627 { 628 struct spinand_device *spinand = mtd_to_spinand(mtd); 629 struct nand_device *nand = mtd_to_nanddev(mtd); 630 unsigned int max_bitflips = 0; 631 struct nand_io_iter iter; 632 bool disable_ecc = false; 633 bool ecc_failed = false; 634 int ret = 0; 635 636 if (ops->mode == MTD_OPS_RAW || !spinand->eccinfo.ooblayout) 637 disable_ecc = true; 638 639 mutex_lock(&spinand->lock); 640 641 nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) { 642 if (disable_ecc) 643 iter.req.mode = MTD_OPS_RAW; 644 645 ret = spinand_select_target(spinand, iter.req.pos.target); 646 if (ret) 647 break; 648 649 ret = spinand_read_page(spinand, &iter.req); 650 if (ret < 0 && ret != -EBADMSG) 651 break; 652 653 if (ret == -EBADMSG) 654 ecc_failed = true; 655 else 656 max_bitflips = max_t(unsigned int, max_bitflips, ret); 657 658 ret = 0; 659 ops->retlen += iter.req.datalen; 660 ops->oobretlen += iter.req.ooblen; 661 } 662 663 mutex_unlock(&spinand->lock); 664 665 if (ecc_failed && !ret) 666 ret = -EBADMSG; 667 668 return ret ? ret : max_bitflips; 669 } 670 671 static int spinand_mtd_write(struct mtd_info *mtd, loff_t to, 672 struct mtd_oob_ops *ops) 673 { 674 struct spinand_device *spinand = mtd_to_spinand(mtd); 675 struct nand_device *nand = mtd_to_nanddev(mtd); 676 struct nand_io_iter iter; 677 bool disable_ecc = false; 678 int ret = 0; 679 680 if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout) 681 disable_ecc = true; 682 683 mutex_lock(&spinand->lock); 684 685 nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) { 686 if (disable_ecc) 687 iter.req.mode = MTD_OPS_RAW; 688 689 ret = spinand_select_target(spinand, iter.req.pos.target); 690 if (ret) 691 break; 692 693 ret = spinand_write_page(spinand, &iter.req); 694 if (ret) 695 break; 696 697 ops->retlen += iter.req.datalen; 698 ops->oobretlen += iter.req.ooblen; 699 } 700 701 mutex_unlock(&spinand->lock); 702 703 return ret; 704 } 705 706 static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos) 707 { 708 struct spinand_device *spinand = nand_to_spinand(nand); 709 u8 marker[2] = { }; 710 struct nand_page_io_req req = { 711 .pos = *pos, 712 .ooblen = sizeof(marker), 713 .ooboffs = 0, 714 .oobbuf.in = marker, 715 .mode = MTD_OPS_RAW, 716 }; 717 718 spinand_select_target(spinand, pos->target); 719 spinand_read_page(spinand, &req); 720 if (marker[0] != 0xff || marker[1] != 0xff) 721 return true; 722 723 return false; 724 } 725 726 static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs) 727 { 728 struct nand_device *nand = mtd_to_nanddev(mtd); 729 struct spinand_device *spinand = nand_to_spinand(nand); 730 struct nand_pos pos; 731 int ret; 732 733 nanddev_offs_to_pos(nand, offs, &pos); 734 mutex_lock(&spinand->lock); 735 ret = nanddev_isbad(nand, &pos); 736 mutex_unlock(&spinand->lock); 737 738 return ret; 739 } 740 741 static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos) 742 { 743 struct spinand_device *spinand = nand_to_spinand(nand); 744 u8 marker[2] = { }; 745 struct nand_page_io_req req = { 746 .pos = *pos, 747 .ooboffs = 0, 748 .ooblen = sizeof(marker), 749 .oobbuf.out = marker, 750 .mode = MTD_OPS_RAW, 751 }; 752 int ret; 753 754 ret = spinand_select_target(spinand, pos->target); 755 if (ret) 756 return ret; 757 758 ret = spinand_write_enable_op(spinand); 759 if (ret) 760 return ret; 761 762 return spinand_write_page(spinand, &req); 763 } 764 765 static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs) 766 { 767 struct nand_device *nand = mtd_to_nanddev(mtd); 768 struct spinand_device *spinand = nand_to_spinand(nand); 769 struct nand_pos pos; 770 int ret; 771 772 nanddev_offs_to_pos(nand, offs, &pos); 773 mutex_lock(&spinand->lock); 774 ret = nanddev_markbad(nand, &pos); 775 mutex_unlock(&spinand->lock); 776 777 return ret; 778 } 779 780 static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos) 781 { 782 struct spinand_device *spinand = nand_to_spinand(nand); 783 u8 status; 784 int ret; 785 786 ret = spinand_select_target(spinand, pos->target); 787 if (ret) 788 return ret; 789 790 ret = spinand_write_enable_op(spinand); 791 if (ret) 792 return ret; 793 794 ret = spinand_erase_op(spinand, pos); 795 if (ret) 796 return ret; 797 798 ret = spinand_wait(spinand, 799 SPINAND_ERASE_INITIAL_DELAY_US, 800 SPINAND_ERASE_POLL_DELAY_US, 801 &status); 802 803 if (!ret && (status & STATUS_ERASE_FAILED)) 804 ret = -EIO; 805 806 return ret; 807 } 808 809 static int spinand_mtd_erase(struct mtd_info *mtd, 810 struct erase_info *einfo) 811 { 812 struct spinand_device *spinand = mtd_to_spinand(mtd); 813 int ret; 814 815 mutex_lock(&spinand->lock); 816 ret = nanddev_mtd_erase(mtd, einfo); 817 mutex_unlock(&spinand->lock); 818 819 return ret; 820 } 821 822 static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs) 823 { 824 struct spinand_device *spinand = mtd_to_spinand(mtd); 825 struct nand_device *nand = mtd_to_nanddev(mtd); 826 struct nand_pos pos; 827 int ret; 828 829 nanddev_offs_to_pos(nand, offs, &pos); 830 mutex_lock(&spinand->lock); 831 ret = nanddev_isreserved(nand, &pos); 832 mutex_unlock(&spinand->lock); 833 834 return ret; 835 } 836 837 static int spinand_create_dirmap(struct spinand_device *spinand, 838 unsigned int plane) 839 { 840 struct nand_device *nand = spinand_to_nand(spinand); 841 struct spi_mem_dirmap_info info = { 842 .length = nanddev_page_size(nand) + 843 nanddev_per_page_oobsize(nand), 844 }; 845 struct spi_mem_dirmap_desc *desc; 846 847 /* The plane number is passed in MSB just above the column address */ 848 info.offset = plane << fls(nand->memorg.pagesize); 849 850 info.op_tmpl = *spinand->op_templates.update_cache; 851 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, 852 spinand->spimem, &info); 853 if (IS_ERR(desc)) 854 return PTR_ERR(desc); 855 856 spinand->dirmaps[plane].wdesc = desc; 857 858 info.op_tmpl = *spinand->op_templates.read_cache; 859 desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, 860 spinand->spimem, &info); 861 if (IS_ERR(desc)) 862 return PTR_ERR(desc); 863 864 spinand->dirmaps[plane].rdesc = desc; 865 866 return 0; 867 } 868 869 static int spinand_create_dirmaps(struct spinand_device *spinand) 870 { 871 struct nand_device *nand = spinand_to_nand(spinand); 872 int i, ret; 873 874 spinand->dirmaps = devm_kzalloc(&spinand->spimem->spi->dev, 875 sizeof(*spinand->dirmaps) * 876 nand->memorg.planes_per_lun, 877 GFP_KERNEL); 878 if (!spinand->dirmaps) 879 return -ENOMEM; 880 881 for (i = 0; i < nand->memorg.planes_per_lun; i++) { 882 ret = spinand_create_dirmap(spinand, i); 883 if (ret) 884 return ret; 885 } 886 887 return 0; 888 } 889 890 static const struct nand_ops spinand_ops = { 891 .erase = spinand_erase, 892 .markbad = spinand_markbad, 893 .isbad = spinand_isbad, 894 }; 895 896 static const struct spinand_manufacturer *spinand_manufacturers[] = { 897 &gigadevice_spinand_manufacturer, 898 ¯onix_spinand_manufacturer, 899 µn_spinand_manufacturer, 900 ¶gon_spinand_manufacturer, 901 &toshiba_spinand_manufacturer, 902 &winbond_spinand_manufacturer, 903 }; 904 905 static int spinand_manufacturer_match(struct spinand_device *spinand, 906 enum spinand_readid_method rdid_method) 907 { 908 u8 *id = spinand->id.data; 909 unsigned int i; 910 int ret; 911 912 for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) { 913 const struct spinand_manufacturer *manufacturer = 914 spinand_manufacturers[i]; 915 916 if (id[0] != manufacturer->id) 917 continue; 918 919 ret = spinand_match_and_init(spinand, 920 manufacturer->chips, 921 manufacturer->nchips, 922 rdid_method); 923 if (ret < 0) 924 continue; 925 926 spinand->manufacturer = manufacturer; 927 return 0; 928 } 929 return -ENOTSUPP; 930 } 931 932 static int spinand_id_detect(struct spinand_device *spinand) 933 { 934 u8 *id = spinand->id.data; 935 int ret; 936 937 ret = spinand_read_id_op(spinand, 0, 0, id); 938 if (ret) 939 return ret; 940 ret = spinand_manufacturer_match(spinand, SPINAND_READID_METHOD_OPCODE); 941 if (!ret) 942 return 0; 943 944 ret = spinand_read_id_op(spinand, 1, 0, id); 945 if (ret) 946 return ret; 947 ret = spinand_manufacturer_match(spinand, 948 SPINAND_READID_METHOD_OPCODE_ADDR); 949 if (!ret) 950 return 0; 951 952 ret = spinand_read_id_op(spinand, 0, 1, id); 953 if (ret) 954 return ret; 955 ret = spinand_manufacturer_match(spinand, 956 SPINAND_READID_METHOD_OPCODE_DUMMY); 957 958 return ret; 959 } 960 961 static int spinand_manufacturer_init(struct spinand_device *spinand) 962 { 963 if (spinand->manufacturer->ops->init) 964 return spinand->manufacturer->ops->init(spinand); 965 966 return 0; 967 } 968 969 static void spinand_manufacturer_cleanup(struct spinand_device *spinand) 970 { 971 /* Release manufacturer private data */ 972 if (spinand->manufacturer->ops->cleanup) 973 return spinand->manufacturer->ops->cleanup(spinand); 974 } 975 976 static const struct spi_mem_op * 977 spinand_select_op_variant(struct spinand_device *spinand, 978 const struct spinand_op_variants *variants) 979 { 980 struct nand_device *nand = spinand_to_nand(spinand); 981 unsigned int i; 982 983 for (i = 0; i < variants->nops; i++) { 984 struct spi_mem_op op = variants->ops[i]; 985 unsigned int nbytes; 986 int ret; 987 988 nbytes = nanddev_per_page_oobsize(nand) + 989 nanddev_page_size(nand); 990 991 while (nbytes) { 992 op.data.nbytes = nbytes; 993 ret = spi_mem_adjust_op_size(spinand->spimem, &op); 994 if (ret) 995 break; 996 997 if (!spi_mem_supports_op(spinand->spimem, &op)) 998 break; 999 1000 nbytes -= op.data.nbytes; 1001 } 1002 1003 if (!nbytes) 1004 return &variants->ops[i]; 1005 } 1006 1007 return NULL; 1008 } 1009 1010 /** 1011 * spinand_match_and_init() - Try to find a match between a device ID and an 1012 * entry in a spinand_info table 1013 * @spinand: SPI NAND object 1014 * @table: SPI NAND device description table 1015 * @table_size: size of the device description table 1016 * @rdid_method: read id method to match 1017 * 1018 * Match between a device ID retrieved through the READ_ID command and an 1019 * entry in the SPI NAND description table. If a match is found, the spinand 1020 * object will be initialized with information provided by the matching 1021 * spinand_info entry. 1022 * 1023 * Return: 0 on success, a negative error code otherwise. 1024 */ 1025 int spinand_match_and_init(struct spinand_device *spinand, 1026 const struct spinand_info *table, 1027 unsigned int table_size, 1028 enum spinand_readid_method rdid_method) 1029 { 1030 u8 *id = spinand->id.data; 1031 struct nand_device *nand = spinand_to_nand(spinand); 1032 unsigned int i; 1033 1034 for (i = 0; i < table_size; i++) { 1035 const struct spinand_info *info = &table[i]; 1036 const struct spi_mem_op *op; 1037 1038 if (rdid_method != info->devid.method) 1039 continue; 1040 1041 if (memcmp(id + 1, info->devid.id, info->devid.len)) 1042 continue; 1043 1044 nand->memorg = table[i].memorg; 1045 nanddev_set_ecc_requirements(nand, &table[i].eccreq); 1046 spinand->eccinfo = table[i].eccinfo; 1047 spinand->flags = table[i].flags; 1048 spinand->id.len = 1 + table[i].devid.len; 1049 spinand->select_target = table[i].select_target; 1050 1051 op = spinand_select_op_variant(spinand, 1052 info->op_variants.read_cache); 1053 if (!op) 1054 return -ENOTSUPP; 1055 1056 spinand->op_templates.read_cache = op; 1057 1058 op = spinand_select_op_variant(spinand, 1059 info->op_variants.write_cache); 1060 if (!op) 1061 return -ENOTSUPP; 1062 1063 spinand->op_templates.write_cache = op; 1064 1065 op = spinand_select_op_variant(spinand, 1066 info->op_variants.update_cache); 1067 spinand->op_templates.update_cache = op; 1068 1069 return 0; 1070 } 1071 1072 return -ENOTSUPP; 1073 } 1074 1075 static int spinand_detect(struct spinand_device *spinand) 1076 { 1077 struct device *dev = &spinand->spimem->spi->dev; 1078 struct nand_device *nand = spinand_to_nand(spinand); 1079 int ret; 1080 1081 ret = spinand_reset_op(spinand); 1082 if (ret) 1083 return ret; 1084 1085 ret = spinand_id_detect(spinand); 1086 if (ret) { 1087 dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN, 1088 spinand->id.data); 1089 return ret; 1090 } 1091 1092 if (nand->memorg.ntargets > 1 && !spinand->select_target) { 1093 dev_err(dev, 1094 "SPI NANDs with more than one die must implement ->select_target()\n"); 1095 return -EINVAL; 1096 } 1097 1098 dev_info(&spinand->spimem->spi->dev, 1099 "%s SPI NAND was found.\n", spinand->manufacturer->name); 1100 dev_info(&spinand->spimem->spi->dev, 1101 "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n", 1102 nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10, 1103 nanddev_page_size(nand), nanddev_per_page_oobsize(nand)); 1104 1105 return 0; 1106 } 1107 1108 static int spinand_init_flash(struct spinand_device *spinand) 1109 { 1110 struct device *dev = &spinand->spimem->spi->dev; 1111 struct nand_device *nand = spinand_to_nand(spinand); 1112 int ret, i; 1113 1114 ret = spinand_read_cfg(spinand); 1115 if (ret) 1116 return ret; 1117 1118 ret = spinand_init_quad_enable(spinand); 1119 if (ret) 1120 return ret; 1121 1122 ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0); 1123 if (ret) 1124 return ret; 1125 1126 ret = spinand_manufacturer_init(spinand); 1127 if (ret) { 1128 dev_err(dev, 1129 "Failed to initialize the SPI NAND chip (err = %d)\n", 1130 ret); 1131 return ret; 1132 } 1133 1134 /* After power up, all blocks are locked, so unlock them here. */ 1135 for (i = 0; i < nand->memorg.ntargets; i++) { 1136 ret = spinand_select_target(spinand, i); 1137 if (ret) 1138 break; 1139 1140 ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED); 1141 if (ret) 1142 break; 1143 } 1144 1145 if (ret) 1146 spinand_manufacturer_cleanup(spinand); 1147 1148 return ret; 1149 } 1150 1151 static void spinand_mtd_resume(struct mtd_info *mtd) 1152 { 1153 struct spinand_device *spinand = mtd_to_spinand(mtd); 1154 int ret; 1155 1156 ret = spinand_reset_op(spinand); 1157 if (ret) 1158 return; 1159 1160 ret = spinand_init_flash(spinand); 1161 if (ret) 1162 return; 1163 1164 spinand_ecc_enable(spinand, false); 1165 } 1166 1167 static int spinand_init(struct spinand_device *spinand) 1168 { 1169 struct device *dev = &spinand->spimem->spi->dev; 1170 struct mtd_info *mtd = spinand_to_mtd(spinand); 1171 struct nand_device *nand = mtd_to_nanddev(mtd); 1172 int ret; 1173 1174 /* 1175 * We need a scratch buffer because the spi_mem interface requires that 1176 * buf passed in spi_mem_op->data.buf be DMA-able. 1177 */ 1178 spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL); 1179 if (!spinand->scratchbuf) 1180 return -ENOMEM; 1181 1182 ret = spinand_detect(spinand); 1183 if (ret) 1184 goto err_free_bufs; 1185 1186 /* 1187 * Use kzalloc() instead of devm_kzalloc() here, because some drivers 1188 * may use this buffer for DMA access. 1189 * Memory allocated by devm_ does not guarantee DMA-safe alignment. 1190 */ 1191 spinand->databuf = kzalloc(nanddev_page_size(nand) + 1192 nanddev_per_page_oobsize(nand), 1193 GFP_KERNEL); 1194 if (!spinand->databuf) { 1195 ret = -ENOMEM; 1196 goto err_free_bufs; 1197 } 1198 1199 spinand->oobbuf = spinand->databuf + nanddev_page_size(nand); 1200 1201 ret = spinand_init_cfg_cache(spinand); 1202 if (ret) 1203 goto err_free_bufs; 1204 1205 ret = spinand_init_flash(spinand); 1206 if (ret) 1207 goto err_free_bufs; 1208 1209 ret = spinand_create_dirmaps(spinand); 1210 if (ret) { 1211 dev_err(dev, 1212 "Failed to create direct mappings for read/write operations (err = %d)\n", 1213 ret); 1214 goto err_manuf_cleanup; 1215 } 1216 1217 ret = nanddev_init(nand, &spinand_ops, THIS_MODULE); 1218 if (ret) 1219 goto err_manuf_cleanup; 1220 1221 /* SPI-NAND default ECC engine is on-die */ 1222 nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE; 1223 nand->ecc.ondie_engine = &spinand_ondie_ecc_engine; 1224 1225 spinand_ecc_enable(spinand, false); 1226 ret = nanddev_ecc_engine_init(nand); 1227 if (ret) 1228 goto err_cleanup_nanddev; 1229 1230 mtd->_read_oob = spinand_mtd_read; 1231 mtd->_write_oob = spinand_mtd_write; 1232 mtd->_block_isbad = spinand_mtd_block_isbad; 1233 mtd->_block_markbad = spinand_mtd_block_markbad; 1234 mtd->_block_isreserved = spinand_mtd_block_isreserved; 1235 mtd->_erase = spinand_mtd_erase; 1236 mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks; 1237 mtd->_resume = spinand_mtd_resume; 1238 1239 if (nand->ecc.engine) { 1240 ret = mtd_ooblayout_count_freebytes(mtd); 1241 if (ret < 0) 1242 goto err_cleanup_ecc_engine; 1243 } 1244 1245 mtd->oobavail = ret; 1246 1247 /* Propagate ECC information to mtd_info */ 1248 mtd->ecc_strength = nanddev_get_ecc_conf(nand)->strength; 1249 mtd->ecc_step_size = nanddev_get_ecc_conf(nand)->step_size; 1250 1251 return 0; 1252 1253 err_cleanup_ecc_engine: 1254 nanddev_ecc_engine_cleanup(nand); 1255 1256 err_cleanup_nanddev: 1257 nanddev_cleanup(nand); 1258 1259 err_manuf_cleanup: 1260 spinand_manufacturer_cleanup(spinand); 1261 1262 err_free_bufs: 1263 kfree(spinand->databuf); 1264 kfree(spinand->scratchbuf); 1265 return ret; 1266 } 1267 1268 static void spinand_cleanup(struct spinand_device *spinand) 1269 { 1270 struct nand_device *nand = spinand_to_nand(spinand); 1271 1272 nanddev_cleanup(nand); 1273 spinand_manufacturer_cleanup(spinand); 1274 kfree(spinand->databuf); 1275 kfree(spinand->scratchbuf); 1276 } 1277 1278 static int spinand_probe(struct spi_mem *mem) 1279 { 1280 struct spinand_device *spinand; 1281 struct mtd_info *mtd; 1282 int ret; 1283 1284 spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand), 1285 GFP_KERNEL); 1286 if (!spinand) 1287 return -ENOMEM; 1288 1289 spinand->spimem = mem; 1290 spi_mem_set_drvdata(mem, spinand); 1291 spinand_set_of_node(spinand, mem->spi->dev.of_node); 1292 mutex_init(&spinand->lock); 1293 mtd = spinand_to_mtd(spinand); 1294 mtd->dev.parent = &mem->spi->dev; 1295 1296 ret = spinand_init(spinand); 1297 if (ret) 1298 return ret; 1299 1300 ret = mtd_device_register(mtd, NULL, 0); 1301 if (ret) 1302 goto err_spinand_cleanup; 1303 1304 return 0; 1305 1306 err_spinand_cleanup: 1307 spinand_cleanup(spinand); 1308 1309 return ret; 1310 } 1311 1312 static int spinand_remove(struct spi_mem *mem) 1313 { 1314 struct spinand_device *spinand; 1315 struct mtd_info *mtd; 1316 int ret; 1317 1318 spinand = spi_mem_get_drvdata(mem); 1319 mtd = spinand_to_mtd(spinand); 1320 1321 ret = mtd_device_unregister(mtd); 1322 if (ret) 1323 return ret; 1324 1325 spinand_cleanup(spinand); 1326 1327 return 0; 1328 } 1329 1330 static const struct spi_device_id spinand_ids[] = { 1331 { .name = "spi-nand" }, 1332 { /* sentinel */ }, 1333 }; 1334 MODULE_DEVICE_TABLE(spi, spinand_ids); 1335 1336 #ifdef CONFIG_OF 1337 static const struct of_device_id spinand_of_ids[] = { 1338 { .compatible = "spi-nand" }, 1339 { /* sentinel */ }, 1340 }; 1341 MODULE_DEVICE_TABLE(of, spinand_of_ids); 1342 #endif 1343 1344 static struct spi_mem_driver spinand_drv = { 1345 .spidrv = { 1346 .id_table = spinand_ids, 1347 .driver = { 1348 .name = "spi-nand", 1349 .of_match_table = of_match_ptr(spinand_of_ids), 1350 }, 1351 }, 1352 .probe = spinand_probe, 1353 .remove = spinand_remove, 1354 }; 1355 module_spi_mem_driver(spinand_drv); 1356 1357 MODULE_DESCRIPTION("SPI NAND framework"); 1358 MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>"); 1359 MODULE_LICENSE("GPL v2"); 1360