1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * SuperH FLCTL nand controller 4 * 5 * Copyright (c) 2008 Renesas Solutions Corp. 6 * Copyright (c) 2008 Atom Create Engineering Co., Ltd. 7 * 8 * Based on fsl_elbc_nand.c, Copyright (c) 2006-2007 Freescale Semiconductor 9 */ 10 11 #include <linux/module.h> 12 #include <linux/kernel.h> 13 #include <linux/completion.h> 14 #include <linux/delay.h> 15 #include <linux/dmaengine.h> 16 #include <linux/dma-mapping.h> 17 #include <linux/interrupt.h> 18 #include <linux/io.h> 19 #include <linux/of.h> 20 #include <linux/platform_device.h> 21 #include <linux/pm_runtime.h> 22 #include <linux/sh_dma.h> 23 #include <linux/slab.h> 24 #include <linux/string.h> 25 26 #include <linux/mtd/mtd.h> 27 #include <linux/mtd/rawnand.h> 28 #include <linux/mtd/partitions.h> 29 #include <linux/mtd/sh_flctl.h> 30 31 static int flctl_4secc_ooblayout_sp_ecc(struct mtd_info *mtd, int section, 32 struct mtd_oob_region *oobregion) 33 { 34 struct nand_chip *chip = mtd_to_nand(mtd); 35 36 if (section) 37 return -ERANGE; 38 39 oobregion->offset = 0; 40 oobregion->length = chip->ecc.bytes; 41 42 return 0; 43 } 44 45 static int flctl_4secc_ooblayout_sp_free(struct mtd_info *mtd, int section, 46 struct mtd_oob_region *oobregion) 47 { 48 if (section) 49 return -ERANGE; 50 51 oobregion->offset = 12; 52 oobregion->length = 4; 53 54 return 0; 55 } 56 57 static const struct mtd_ooblayout_ops flctl_4secc_oob_smallpage_ops = { 58 .ecc = flctl_4secc_ooblayout_sp_ecc, 59 .free = flctl_4secc_ooblayout_sp_free, 60 }; 61 62 static int flctl_4secc_ooblayout_lp_ecc(struct mtd_info *mtd, int section, 63 struct mtd_oob_region *oobregion) 64 { 65 struct nand_chip *chip = mtd_to_nand(mtd); 66 67 if (section >= chip->ecc.steps) 68 return -ERANGE; 69 70 oobregion->offset = (section * 16) + 6; 71 oobregion->length = chip->ecc.bytes; 72 73 return 0; 74 } 75 76 static int flctl_4secc_ooblayout_lp_free(struct mtd_info *mtd, int section, 77 struct mtd_oob_region *oobregion) 78 { 79 struct nand_chip *chip = mtd_to_nand(mtd); 80 81 if (section >= chip->ecc.steps) 82 return -ERANGE; 83 84 oobregion->offset = section * 16; 85 oobregion->length = 6; 86 87 if (!section) { 88 oobregion->offset += 2; 89 oobregion->length -= 2; 90 } 91 92 return 0; 93 } 94 95 static const struct mtd_ooblayout_ops flctl_4secc_oob_largepage_ops = { 96 .ecc = flctl_4secc_ooblayout_lp_ecc, 97 .free = flctl_4secc_ooblayout_lp_free, 98 }; 99 100 static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; 101 102 static struct nand_bbt_descr flctl_4secc_smallpage = { 103 .offs = 11, 104 .len = 1, 105 .pattern = scan_ff_pattern, 106 }; 107 108 static struct nand_bbt_descr flctl_4secc_largepage = { 109 .offs = 0, 110 .len = 2, 111 .pattern = scan_ff_pattern, 112 }; 113 114 static void empty_fifo(struct sh_flctl *flctl) 115 { 116 writel(flctl->flintdmacr_base | AC1CLR | AC0CLR, FLINTDMACR(flctl)); 117 writel(flctl->flintdmacr_base, FLINTDMACR(flctl)); 118 } 119 120 static void start_translation(struct sh_flctl *flctl) 121 { 122 writeb(TRSTRT, FLTRCR(flctl)); 123 } 124 125 static void timeout_error(struct sh_flctl *flctl, const char *str) 126 { 127 dev_err(&flctl->pdev->dev, "Timeout occurred in %s\n", str); 128 } 129 130 static void wait_completion(struct sh_flctl *flctl) 131 { 132 uint32_t timeout = LOOP_TIMEOUT_MAX; 133 134 while (timeout--) { 135 if (readb(FLTRCR(flctl)) & TREND) { 136 writeb(0x0, FLTRCR(flctl)); 137 return; 138 } 139 udelay(1); 140 } 141 142 timeout_error(flctl, __func__); 143 writeb(0x0, FLTRCR(flctl)); 144 } 145 146 static void flctl_dma_complete(void *param) 147 { 148 struct sh_flctl *flctl = param; 149 150 complete(&flctl->dma_complete); 151 } 152 153 static void flctl_release_dma(struct sh_flctl *flctl) 154 { 155 if (flctl->chan_fifo0_rx) { 156 dma_release_channel(flctl->chan_fifo0_rx); 157 flctl->chan_fifo0_rx = NULL; 158 } 159 if (flctl->chan_fifo0_tx) { 160 dma_release_channel(flctl->chan_fifo0_tx); 161 flctl->chan_fifo0_tx = NULL; 162 } 163 } 164 165 static void flctl_setup_dma(struct sh_flctl *flctl) 166 { 167 dma_cap_mask_t mask; 168 struct dma_slave_config cfg; 169 struct platform_device *pdev = flctl->pdev; 170 struct sh_flctl_platform_data *pdata = dev_get_platdata(&pdev->dev); 171 int ret; 172 173 if (!pdata) 174 return; 175 176 if (pdata->slave_id_fifo0_tx <= 0 || pdata->slave_id_fifo0_rx <= 0) 177 return; 178 179 /* We can only either use DMA for both Tx and Rx or not use it at all */ 180 dma_cap_zero(mask); 181 dma_cap_set(DMA_SLAVE, mask); 182 183 flctl->chan_fifo0_tx = dma_request_channel(mask, shdma_chan_filter, 184 (void *)(uintptr_t)pdata->slave_id_fifo0_tx); 185 dev_dbg(&pdev->dev, "%s: TX: got channel %p\n", __func__, 186 flctl->chan_fifo0_tx); 187 188 if (!flctl->chan_fifo0_tx) 189 return; 190 191 memset(&cfg, 0, sizeof(cfg)); 192 cfg.direction = DMA_MEM_TO_DEV; 193 cfg.dst_addr = flctl->fifo; 194 cfg.src_addr = 0; 195 ret = dmaengine_slave_config(flctl->chan_fifo0_tx, &cfg); 196 if (ret < 0) 197 goto err; 198 199 flctl->chan_fifo0_rx = dma_request_channel(mask, shdma_chan_filter, 200 (void *)(uintptr_t)pdata->slave_id_fifo0_rx); 201 dev_dbg(&pdev->dev, "%s: RX: got channel %p\n", __func__, 202 flctl->chan_fifo0_rx); 203 204 if (!flctl->chan_fifo0_rx) 205 goto err; 206 207 cfg.direction = DMA_DEV_TO_MEM; 208 cfg.dst_addr = 0; 209 cfg.src_addr = flctl->fifo; 210 ret = dmaengine_slave_config(flctl->chan_fifo0_rx, &cfg); 211 if (ret < 0) 212 goto err; 213 214 init_completion(&flctl->dma_complete); 215 216 return; 217 218 err: 219 flctl_release_dma(flctl); 220 } 221 222 static void set_addr(struct mtd_info *mtd, int column, int page_addr) 223 { 224 struct sh_flctl *flctl = mtd_to_flctl(mtd); 225 uint32_t addr = 0; 226 227 if (column == -1) { 228 addr = page_addr; /* ERASE1 */ 229 } else if (page_addr != -1) { 230 /* SEQIN, READ0, etc.. */ 231 if (flctl->chip.options & NAND_BUSWIDTH_16) 232 column >>= 1; 233 if (flctl->page_size) { 234 addr = column & 0x0FFF; 235 addr |= (page_addr & 0xff) << 16; 236 addr |= ((page_addr >> 8) & 0xff) << 24; 237 /* big than 128MB */ 238 if (flctl->rw_ADRCNT == ADRCNT2_E) { 239 uint32_t addr2; 240 addr2 = (page_addr >> 16) & 0xff; 241 writel(addr2, FLADR2(flctl)); 242 } 243 } else { 244 addr = column; 245 addr |= (page_addr & 0xff) << 8; 246 addr |= ((page_addr >> 8) & 0xff) << 16; 247 addr |= ((page_addr >> 16) & 0xff) << 24; 248 } 249 } 250 writel(addr, FLADR(flctl)); 251 } 252 253 static void wait_rfifo_ready(struct sh_flctl *flctl) 254 { 255 uint32_t timeout = LOOP_TIMEOUT_MAX; 256 257 while (timeout--) { 258 uint32_t val; 259 /* check FIFO */ 260 val = readl(FLDTCNTR(flctl)) >> 16; 261 if (val & 0xFF) 262 return; 263 udelay(1); 264 } 265 timeout_error(flctl, __func__); 266 } 267 268 static void wait_wfifo_ready(struct sh_flctl *flctl) 269 { 270 uint32_t len, timeout = LOOP_TIMEOUT_MAX; 271 272 while (timeout--) { 273 /* check FIFO */ 274 len = (readl(FLDTCNTR(flctl)) >> 16) & 0xFF; 275 if (len >= 4) 276 return; 277 udelay(1); 278 } 279 timeout_error(flctl, __func__); 280 } 281 282 static enum flctl_ecc_res_t wait_recfifo_ready 283 (struct sh_flctl *flctl, int sector_number) 284 { 285 uint32_t timeout = LOOP_TIMEOUT_MAX; 286 void __iomem *ecc_reg[4]; 287 int i; 288 int state = FL_SUCCESS; 289 uint32_t data, size; 290 291 /* 292 * First this loops checks in FLDTCNTR if we are ready to read out the 293 * oob data. This is the case if either all went fine without errors or 294 * if the bottom part of the loop corrected the errors or marked them as 295 * uncorrectable and the controller is given time to push the data into 296 * the FIFO. 297 */ 298 while (timeout--) { 299 /* check if all is ok and we can read out the OOB */ 300 size = readl(FLDTCNTR(flctl)) >> 24; 301 if ((size & 0xFF) == 4) 302 return state; 303 304 /* check if a correction code has been calculated */ 305 if (!(readl(FL4ECCCR(flctl)) & _4ECCEND)) { 306 /* 307 * either we wait for the fifo to be filled or a 308 * correction pattern is being generated 309 */ 310 udelay(1); 311 continue; 312 } 313 314 /* check for an uncorrectable error */ 315 if (readl(FL4ECCCR(flctl)) & _4ECCFA) { 316 /* check if we face a non-empty page */ 317 for (i = 0; i < 512; i++) { 318 if (flctl->done_buff[i] != 0xff) { 319 state = FL_ERROR; /* can't correct */ 320 break; 321 } 322 } 323 324 if (state == FL_SUCCESS) 325 dev_dbg(&flctl->pdev->dev, 326 "reading empty sector %d, ecc error ignored\n", 327 sector_number); 328 329 writel(0, FL4ECCCR(flctl)); 330 continue; 331 } 332 333 /* start error correction */ 334 ecc_reg[0] = FL4ECCRESULT0(flctl); 335 ecc_reg[1] = FL4ECCRESULT1(flctl); 336 ecc_reg[2] = FL4ECCRESULT2(flctl); 337 ecc_reg[3] = FL4ECCRESULT3(flctl); 338 339 for (i = 0; i < 3; i++) { 340 uint8_t org; 341 unsigned int index; 342 343 data = readl(ecc_reg[i]); 344 345 if (flctl->page_size) 346 index = (512 * sector_number) + 347 (data >> 16); 348 else 349 index = data >> 16; 350 351 org = flctl->done_buff[index]; 352 flctl->done_buff[index] = org ^ (data & 0xFF); 353 } 354 state = FL_REPAIRABLE; 355 writel(0, FL4ECCCR(flctl)); 356 } 357 358 timeout_error(flctl, __func__); 359 return FL_TIMEOUT; /* timeout */ 360 } 361 362 static void wait_wecfifo_ready(struct sh_flctl *flctl) 363 { 364 uint32_t timeout = LOOP_TIMEOUT_MAX; 365 uint32_t len; 366 367 while (timeout--) { 368 /* check FLECFIFO */ 369 len = (readl(FLDTCNTR(flctl)) >> 24) & 0xFF; 370 if (len >= 4) 371 return; 372 udelay(1); 373 } 374 timeout_error(flctl, __func__); 375 } 376 377 static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf, 378 int len, enum dma_data_direction dir) 379 { 380 struct dma_async_tx_descriptor *desc = NULL; 381 struct dma_chan *chan; 382 enum dma_transfer_direction tr_dir; 383 dma_addr_t dma_addr; 384 dma_cookie_t cookie; 385 uint32_t reg; 386 int ret = 0; 387 unsigned long time_left; 388 389 if (dir == DMA_FROM_DEVICE) { 390 chan = flctl->chan_fifo0_rx; 391 tr_dir = DMA_DEV_TO_MEM; 392 } else { 393 chan = flctl->chan_fifo0_tx; 394 tr_dir = DMA_MEM_TO_DEV; 395 } 396 397 dma_addr = dma_map_single(chan->device->dev, buf, len, dir); 398 399 if (!dma_mapping_error(chan->device->dev, dma_addr)) 400 desc = dmaengine_prep_slave_single(chan, dma_addr, len, 401 tr_dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 402 403 if (desc) { 404 reg = readl(FLINTDMACR(flctl)); 405 reg |= DREQ0EN; 406 writel(reg, FLINTDMACR(flctl)); 407 408 desc->callback = flctl_dma_complete; 409 desc->callback_param = flctl; 410 cookie = dmaengine_submit(desc); 411 if (dma_submit_error(cookie)) { 412 ret = dma_submit_error(cookie); 413 dev_warn(&flctl->pdev->dev, 414 "DMA submit failed, falling back to PIO\n"); 415 goto out; 416 } 417 418 dma_async_issue_pending(chan); 419 } else { 420 /* DMA failed, fall back to PIO */ 421 flctl_release_dma(flctl); 422 dev_warn(&flctl->pdev->dev, 423 "DMA failed, falling back to PIO\n"); 424 ret = -EIO; 425 goto out; 426 } 427 428 time_left = 429 wait_for_completion_timeout(&flctl->dma_complete, 430 msecs_to_jiffies(3000)); 431 432 if (time_left == 0) { 433 dmaengine_terminate_all(chan); 434 dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n"); 435 ret = -ETIMEDOUT; 436 } 437 438 out: 439 reg = readl(FLINTDMACR(flctl)); 440 reg &= ~DREQ0EN; 441 writel(reg, FLINTDMACR(flctl)); 442 443 dma_unmap_single(chan->device->dev, dma_addr, len, dir); 444 445 /* ret == 0 is success */ 446 return ret; 447 } 448 449 static void read_datareg(struct sh_flctl *flctl, int offset) 450 { 451 unsigned long data; 452 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset]; 453 454 wait_completion(flctl); 455 456 data = readl(FLDATAR(flctl)); 457 *buf = le32_to_cpu(data); 458 } 459 460 static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset) 461 { 462 int i, len_4align; 463 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset]; 464 465 len_4align = (rlen + 3) / 4; 466 467 /* initiate DMA transfer */ 468 if (flctl->chan_fifo0_rx && rlen >= 32 && 469 !flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_FROM_DEVICE)) 470 goto convert; /* DMA success */ 471 472 /* do polling transfer */ 473 for (i = 0; i < len_4align; i++) { 474 wait_rfifo_ready(flctl); 475 buf[i] = readl(FLDTFIFO(flctl)); 476 } 477 478 convert: 479 for (i = 0; i < len_4align; i++) 480 buf[i] = be32_to_cpu(buf[i]); 481 } 482 483 static enum flctl_ecc_res_t read_ecfiforeg 484 (struct sh_flctl *flctl, uint8_t *buff, int sector) 485 { 486 int i; 487 enum flctl_ecc_res_t res; 488 unsigned long *ecc_buf = (unsigned long *)buff; 489 490 res = wait_recfifo_ready(flctl , sector); 491 492 if (res != FL_ERROR) { 493 for (i = 0; i < 4; i++) { 494 ecc_buf[i] = readl(FLECFIFO(flctl)); 495 ecc_buf[i] = be32_to_cpu(ecc_buf[i]); 496 } 497 } 498 499 return res; 500 } 501 502 static void write_fiforeg(struct sh_flctl *flctl, int rlen, 503 unsigned int offset) 504 { 505 int i, len_4align; 506 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset]; 507 508 len_4align = (rlen + 3) / 4; 509 for (i = 0; i < len_4align; i++) { 510 wait_wfifo_ready(flctl); 511 writel(cpu_to_be32(buf[i]), FLDTFIFO(flctl)); 512 } 513 } 514 515 static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen, 516 unsigned int offset) 517 { 518 int i, len_4align; 519 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset]; 520 521 len_4align = (rlen + 3) / 4; 522 523 for (i = 0; i < len_4align; i++) 524 buf[i] = cpu_to_be32(buf[i]); 525 526 /* initiate DMA transfer */ 527 if (flctl->chan_fifo0_tx && rlen >= 32 && 528 !flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_TO_DEVICE)) 529 return; /* DMA success */ 530 531 /* do polling transfer */ 532 for (i = 0; i < len_4align; i++) { 533 wait_wecfifo_ready(flctl); 534 writel(buf[i], FLECFIFO(flctl)); 535 } 536 } 537 538 static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val) 539 { 540 struct sh_flctl *flctl = mtd_to_flctl(mtd); 541 uint32_t flcmncr_val = flctl->flcmncr_base & ~SEL_16BIT; 542 uint32_t flcmdcr_val, addr_len_bytes = 0; 543 544 /* Set SNAND bit if page size is 2048byte */ 545 if (flctl->page_size) 546 flcmncr_val |= SNAND_E; 547 else 548 flcmncr_val &= ~SNAND_E; 549 550 /* default FLCMDCR val */ 551 flcmdcr_val = DOCMD1_E | DOADR_E; 552 553 /* Set for FLCMDCR */ 554 switch (cmd) { 555 case NAND_CMD_ERASE1: 556 addr_len_bytes = flctl->erase_ADRCNT; 557 flcmdcr_val |= DOCMD2_E; 558 break; 559 case NAND_CMD_READ0: 560 case NAND_CMD_READOOB: 561 case NAND_CMD_RNDOUT: 562 addr_len_bytes = flctl->rw_ADRCNT; 563 flcmdcr_val |= CDSRC_E; 564 if (flctl->chip.options & NAND_BUSWIDTH_16) 565 flcmncr_val |= SEL_16BIT; 566 break; 567 case NAND_CMD_SEQIN: 568 /* This case is that cmd is READ0 or READ1 or READ00 */ 569 flcmdcr_val &= ~DOADR_E; /* ONLY execute 1st cmd */ 570 break; 571 case NAND_CMD_PAGEPROG: 572 addr_len_bytes = flctl->rw_ADRCNT; 573 flcmdcr_val |= DOCMD2_E | CDSRC_E | SELRW; 574 if (flctl->chip.options & NAND_BUSWIDTH_16) 575 flcmncr_val |= SEL_16BIT; 576 break; 577 case NAND_CMD_READID: 578 flcmncr_val &= ~SNAND_E; 579 flcmdcr_val |= CDSRC_E; 580 addr_len_bytes = ADRCNT_1; 581 break; 582 case NAND_CMD_STATUS: 583 case NAND_CMD_RESET: 584 flcmncr_val &= ~SNAND_E; 585 flcmdcr_val &= ~(DOADR_E | DOSR_E); 586 break; 587 default: 588 break; 589 } 590 591 /* Set address bytes parameter */ 592 flcmdcr_val |= addr_len_bytes; 593 594 /* Now actually write */ 595 writel(flcmncr_val, FLCMNCR(flctl)); 596 writel(flcmdcr_val, FLCMDCR(flctl)); 597 writel(flcmcdr_val, FLCMCDR(flctl)); 598 } 599 600 static int flctl_read_page_hwecc(struct nand_chip *chip, uint8_t *buf, 601 int oob_required, int page) 602 { 603 struct mtd_info *mtd = nand_to_mtd(chip); 604 605 nand_read_page_op(chip, page, 0, buf, mtd->writesize); 606 if (oob_required) 607 chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize); 608 return 0; 609 } 610 611 static int flctl_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf, 612 int oob_required, int page) 613 { 614 struct mtd_info *mtd = nand_to_mtd(chip); 615 616 nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize); 617 chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize); 618 return nand_prog_page_end_op(chip); 619 } 620 621 static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr) 622 { 623 struct sh_flctl *flctl = mtd_to_flctl(mtd); 624 int sector, page_sectors; 625 enum flctl_ecc_res_t ecc_result; 626 627 page_sectors = flctl->page_size ? 4 : 1; 628 629 set_cmd_regs(mtd, NAND_CMD_READ0, 630 (NAND_CMD_READSTART << 8) | NAND_CMD_READ0); 631 632 writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT, 633 FLCMNCR(flctl)); 634 writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl)); 635 writel(page_addr << 2, FLADR(flctl)); 636 637 empty_fifo(flctl); 638 start_translation(flctl); 639 640 for (sector = 0; sector < page_sectors; sector++) { 641 read_fiforeg(flctl, 512, 512 * sector); 642 643 ecc_result = read_ecfiforeg(flctl, 644 &flctl->done_buff[mtd->writesize + 16 * sector], 645 sector); 646 647 switch (ecc_result) { 648 case FL_REPAIRABLE: 649 dev_info(&flctl->pdev->dev, 650 "applied ecc on page 0x%x", page_addr); 651 mtd->ecc_stats.corrected++; 652 break; 653 case FL_ERROR: 654 dev_warn(&flctl->pdev->dev, 655 "page 0x%x contains corrupted data\n", 656 page_addr); 657 mtd->ecc_stats.failed++; 658 break; 659 default: 660 ; 661 } 662 } 663 664 wait_completion(flctl); 665 666 writel(readl(FLCMNCR(flctl)) & ~(ACM_SACCES_MODE | _4ECCCORRECT), 667 FLCMNCR(flctl)); 668 } 669 670 static void execmd_read_oob(struct mtd_info *mtd, int page_addr) 671 { 672 struct sh_flctl *flctl = mtd_to_flctl(mtd); 673 int page_sectors = flctl->page_size ? 4 : 1; 674 int i; 675 676 set_cmd_regs(mtd, NAND_CMD_READ0, 677 (NAND_CMD_READSTART << 8) | NAND_CMD_READ0); 678 679 empty_fifo(flctl); 680 681 for (i = 0; i < page_sectors; i++) { 682 set_addr(mtd, (512 + 16) * i + 512 , page_addr); 683 writel(16, FLDTCNTR(flctl)); 684 685 start_translation(flctl); 686 read_fiforeg(flctl, 16, 16 * i); 687 wait_completion(flctl); 688 } 689 } 690 691 static void execmd_write_page_sector(struct mtd_info *mtd) 692 { 693 struct sh_flctl *flctl = mtd_to_flctl(mtd); 694 int page_addr = flctl->seqin_page_addr; 695 int sector, page_sectors; 696 697 page_sectors = flctl->page_size ? 4 : 1; 698 699 set_cmd_regs(mtd, NAND_CMD_PAGEPROG, 700 (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN); 701 702 empty_fifo(flctl); 703 writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl)); 704 writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl)); 705 writel(page_addr << 2, FLADR(flctl)); 706 start_translation(flctl); 707 708 for (sector = 0; sector < page_sectors; sector++) { 709 write_fiforeg(flctl, 512, 512 * sector); 710 write_ec_fiforeg(flctl, 16, mtd->writesize + 16 * sector); 711 } 712 713 wait_completion(flctl); 714 writel(readl(FLCMNCR(flctl)) & ~ACM_SACCES_MODE, FLCMNCR(flctl)); 715 } 716 717 static void execmd_write_oob(struct mtd_info *mtd) 718 { 719 struct sh_flctl *flctl = mtd_to_flctl(mtd); 720 int page_addr = flctl->seqin_page_addr; 721 int sector, page_sectors; 722 723 page_sectors = flctl->page_size ? 4 : 1; 724 725 set_cmd_regs(mtd, NAND_CMD_PAGEPROG, 726 (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN); 727 728 for (sector = 0; sector < page_sectors; sector++) { 729 empty_fifo(flctl); 730 set_addr(mtd, sector * 528 + 512, page_addr); 731 writel(16, FLDTCNTR(flctl)); /* set read size */ 732 733 start_translation(flctl); 734 write_fiforeg(flctl, 16, 16 * sector); 735 wait_completion(flctl); 736 } 737 } 738 739 static void flctl_cmdfunc(struct nand_chip *chip, unsigned int command, 740 int column, int page_addr) 741 { 742 struct mtd_info *mtd = nand_to_mtd(chip); 743 struct sh_flctl *flctl = mtd_to_flctl(mtd); 744 uint32_t read_cmd = 0; 745 746 pm_runtime_get_sync(&flctl->pdev->dev); 747 748 flctl->read_bytes = 0; 749 if (command != NAND_CMD_PAGEPROG) 750 flctl->index = 0; 751 752 switch (command) { 753 case NAND_CMD_READ1: 754 case NAND_CMD_READ0: 755 if (flctl->hwecc) { 756 /* read page with hwecc */ 757 execmd_read_page_sector(mtd, page_addr); 758 break; 759 } 760 if (flctl->page_size) 761 set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8) 762 | command); 763 else 764 set_cmd_regs(mtd, command, command); 765 766 set_addr(mtd, 0, page_addr); 767 768 flctl->read_bytes = mtd->writesize + mtd->oobsize; 769 if (flctl->chip.options & NAND_BUSWIDTH_16) 770 column >>= 1; 771 flctl->index += column; 772 goto read_normal_exit; 773 774 case NAND_CMD_READOOB: 775 if (flctl->hwecc) { 776 /* read page with hwecc */ 777 execmd_read_oob(mtd, page_addr); 778 break; 779 } 780 781 if (flctl->page_size) { 782 set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8) 783 | NAND_CMD_READ0); 784 set_addr(mtd, mtd->writesize, page_addr); 785 } else { 786 set_cmd_regs(mtd, command, command); 787 set_addr(mtd, 0, page_addr); 788 } 789 flctl->read_bytes = mtd->oobsize; 790 goto read_normal_exit; 791 792 case NAND_CMD_RNDOUT: 793 if (flctl->hwecc) 794 break; 795 796 if (flctl->page_size) 797 set_cmd_regs(mtd, command, (NAND_CMD_RNDOUTSTART << 8) 798 | command); 799 else 800 set_cmd_regs(mtd, command, command); 801 802 set_addr(mtd, column, 0); 803 804 flctl->read_bytes = mtd->writesize + mtd->oobsize - column; 805 goto read_normal_exit; 806 807 case NAND_CMD_READID: 808 set_cmd_regs(mtd, command, command); 809 810 /* READID is always performed using an 8-bit bus */ 811 if (flctl->chip.options & NAND_BUSWIDTH_16) 812 column <<= 1; 813 set_addr(mtd, column, 0); 814 815 flctl->read_bytes = 8; 816 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */ 817 empty_fifo(flctl); 818 start_translation(flctl); 819 read_fiforeg(flctl, flctl->read_bytes, 0); 820 wait_completion(flctl); 821 break; 822 823 case NAND_CMD_ERASE1: 824 flctl->erase1_page_addr = page_addr; 825 break; 826 827 case NAND_CMD_ERASE2: 828 set_cmd_regs(mtd, NAND_CMD_ERASE1, 829 (command << 8) | NAND_CMD_ERASE1); 830 set_addr(mtd, -1, flctl->erase1_page_addr); 831 start_translation(flctl); 832 wait_completion(flctl); 833 break; 834 835 case NAND_CMD_SEQIN: 836 if (!flctl->page_size) { 837 /* output read command */ 838 if (column >= mtd->writesize) { 839 column -= mtd->writesize; 840 read_cmd = NAND_CMD_READOOB; 841 } else if (column < 256) { 842 read_cmd = NAND_CMD_READ0; 843 } else { 844 column -= 256; 845 read_cmd = NAND_CMD_READ1; 846 } 847 } 848 flctl->seqin_column = column; 849 flctl->seqin_page_addr = page_addr; 850 flctl->seqin_read_cmd = read_cmd; 851 break; 852 853 case NAND_CMD_PAGEPROG: 854 empty_fifo(flctl); 855 if (!flctl->page_size) { 856 set_cmd_regs(mtd, NAND_CMD_SEQIN, 857 flctl->seqin_read_cmd); 858 set_addr(mtd, -1, -1); 859 writel(0, FLDTCNTR(flctl)); /* set 0 size */ 860 start_translation(flctl); 861 wait_completion(flctl); 862 } 863 if (flctl->hwecc) { 864 /* write page with hwecc */ 865 if (flctl->seqin_column == mtd->writesize) 866 execmd_write_oob(mtd); 867 else if (!flctl->seqin_column) 868 execmd_write_page_sector(mtd); 869 else 870 pr_err("Invalid address !?\n"); 871 break; 872 } 873 set_cmd_regs(mtd, command, (command << 8) | NAND_CMD_SEQIN); 874 set_addr(mtd, flctl->seqin_column, flctl->seqin_page_addr); 875 writel(flctl->index, FLDTCNTR(flctl)); /* set write size */ 876 start_translation(flctl); 877 write_fiforeg(flctl, flctl->index, 0); 878 wait_completion(flctl); 879 break; 880 881 case NAND_CMD_STATUS: 882 set_cmd_regs(mtd, command, command); 883 set_addr(mtd, -1, -1); 884 885 flctl->read_bytes = 1; 886 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */ 887 start_translation(flctl); 888 read_datareg(flctl, 0); /* read and end */ 889 break; 890 891 case NAND_CMD_RESET: 892 set_cmd_regs(mtd, command, command); 893 set_addr(mtd, -1, -1); 894 895 writel(0, FLDTCNTR(flctl)); /* set 0 size */ 896 start_translation(flctl); 897 wait_completion(flctl); 898 break; 899 900 default: 901 break; 902 } 903 goto runtime_exit; 904 905 read_normal_exit: 906 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */ 907 empty_fifo(flctl); 908 start_translation(flctl); 909 read_fiforeg(flctl, flctl->read_bytes, 0); 910 wait_completion(flctl); 911 runtime_exit: 912 pm_runtime_put_sync(&flctl->pdev->dev); 913 return; 914 } 915 916 static void flctl_select_chip(struct nand_chip *chip, int chipnr) 917 { 918 struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip)); 919 int ret; 920 921 switch (chipnr) { 922 case -1: 923 flctl->flcmncr_base &= ~CE0_ENABLE; 924 925 pm_runtime_get_sync(&flctl->pdev->dev); 926 writel(flctl->flcmncr_base, FLCMNCR(flctl)); 927 928 if (flctl->qos_request) { 929 dev_pm_qos_remove_request(&flctl->pm_qos); 930 flctl->qos_request = 0; 931 } 932 933 pm_runtime_put_sync(&flctl->pdev->dev); 934 break; 935 case 0: 936 flctl->flcmncr_base |= CE0_ENABLE; 937 938 if (!flctl->qos_request) { 939 ret = dev_pm_qos_add_request(&flctl->pdev->dev, 940 &flctl->pm_qos, 941 DEV_PM_QOS_RESUME_LATENCY, 942 100); 943 if (ret < 0) 944 dev_err(&flctl->pdev->dev, 945 "PM QoS request failed: %d\n", ret); 946 flctl->qos_request = 1; 947 } 948 949 if (flctl->holden) { 950 pm_runtime_get_sync(&flctl->pdev->dev); 951 writel(HOLDEN, FLHOLDCR(flctl)); 952 pm_runtime_put_sync(&flctl->pdev->dev); 953 } 954 break; 955 default: 956 BUG(); 957 } 958 } 959 960 static void flctl_write_buf(struct nand_chip *chip, const uint8_t *buf, int len) 961 { 962 struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip)); 963 964 memcpy(&flctl->done_buff[flctl->index], buf, len); 965 flctl->index += len; 966 } 967 968 static uint8_t flctl_read_byte(struct nand_chip *chip) 969 { 970 struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip)); 971 uint8_t data; 972 973 data = flctl->done_buff[flctl->index]; 974 flctl->index++; 975 return data; 976 } 977 978 static void flctl_read_buf(struct nand_chip *chip, uint8_t *buf, int len) 979 { 980 struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip)); 981 982 memcpy(buf, &flctl->done_buff[flctl->index], len); 983 flctl->index += len; 984 } 985 986 static int flctl_chip_attach_chip(struct nand_chip *chip) 987 { 988 u64 targetsize = nanddev_target_size(&chip->base); 989 struct mtd_info *mtd = nand_to_mtd(chip); 990 struct sh_flctl *flctl = mtd_to_flctl(mtd); 991 992 /* 993 * NAND_BUSWIDTH_16 may have been set by nand_scan_ident(). 994 * Add the SEL_16BIT flag in flctl->flcmncr_base. 995 */ 996 if (chip->options & NAND_BUSWIDTH_16) 997 flctl->flcmncr_base |= SEL_16BIT; 998 999 if (mtd->writesize == 512) { 1000 flctl->page_size = 0; 1001 if (targetsize > (32 << 20)) { 1002 /* big than 32MB */ 1003 flctl->rw_ADRCNT = ADRCNT_4; 1004 flctl->erase_ADRCNT = ADRCNT_3; 1005 } else if (targetsize > (2 << 16)) { 1006 /* big than 128KB */ 1007 flctl->rw_ADRCNT = ADRCNT_3; 1008 flctl->erase_ADRCNT = ADRCNT_2; 1009 } else { 1010 flctl->rw_ADRCNT = ADRCNT_2; 1011 flctl->erase_ADRCNT = ADRCNT_1; 1012 } 1013 } else { 1014 flctl->page_size = 1; 1015 if (targetsize > (128 << 20)) { 1016 /* big than 128MB */ 1017 flctl->rw_ADRCNT = ADRCNT2_E; 1018 flctl->erase_ADRCNT = ADRCNT_3; 1019 } else if (targetsize > (8 << 16)) { 1020 /* big than 512KB */ 1021 flctl->rw_ADRCNT = ADRCNT_4; 1022 flctl->erase_ADRCNT = ADRCNT_2; 1023 } else { 1024 flctl->rw_ADRCNT = ADRCNT_3; 1025 flctl->erase_ADRCNT = ADRCNT_1; 1026 } 1027 } 1028 1029 if (flctl->hwecc) { 1030 if (mtd->writesize == 512) { 1031 mtd_set_ooblayout(mtd, &flctl_4secc_oob_smallpage_ops); 1032 chip->badblock_pattern = &flctl_4secc_smallpage; 1033 } else { 1034 mtd_set_ooblayout(mtd, &flctl_4secc_oob_largepage_ops); 1035 chip->badblock_pattern = &flctl_4secc_largepage; 1036 } 1037 1038 chip->ecc.size = 512; 1039 chip->ecc.bytes = 10; 1040 chip->ecc.strength = 4; 1041 chip->ecc.read_page = flctl_read_page_hwecc; 1042 chip->ecc.write_page = flctl_write_page_hwecc; 1043 chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; 1044 1045 /* 4 symbols ECC enabled */ 1046 flctl->flcmncr_base |= _4ECCEN; 1047 } else { 1048 chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; 1049 chip->ecc.algo = NAND_ECC_ALGO_HAMMING; 1050 } 1051 1052 return 0; 1053 } 1054 1055 static const struct nand_controller_ops flctl_nand_controller_ops = { 1056 .attach_chip = flctl_chip_attach_chip, 1057 }; 1058 1059 static irqreturn_t flctl_handle_flste(int irq, void *dev_id) 1060 { 1061 struct sh_flctl *flctl = dev_id; 1062 1063 dev_err(&flctl->pdev->dev, "flste irq: %x\n", readl(FLINTDMACR(flctl))); 1064 writel(flctl->flintdmacr_base, FLINTDMACR(flctl)); 1065 1066 return IRQ_HANDLED; 1067 } 1068 1069 struct flctl_soc_config { 1070 unsigned long flcmncr_val; 1071 unsigned has_hwecc:1; 1072 unsigned use_holden:1; 1073 }; 1074 1075 static struct flctl_soc_config flctl_sh7372_config = { 1076 .flcmncr_val = CLK_16B_12L_4H | TYPESEL_SET | SHBUSSEL, 1077 .has_hwecc = 1, 1078 .use_holden = 1, 1079 }; 1080 1081 static const struct of_device_id of_flctl_match[] = { 1082 { .compatible = "renesas,shmobile-flctl-sh7372", 1083 .data = &flctl_sh7372_config }, 1084 {}, 1085 }; 1086 MODULE_DEVICE_TABLE(of, of_flctl_match); 1087 1088 static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev) 1089 { 1090 const struct flctl_soc_config *config; 1091 struct sh_flctl_platform_data *pdata; 1092 1093 config = of_device_get_match_data(dev); 1094 if (!config) { 1095 dev_err(dev, "%s: no OF configuration attached\n", __func__); 1096 return NULL; 1097 } 1098 1099 pdata = devm_kzalloc(dev, sizeof(struct sh_flctl_platform_data), 1100 GFP_KERNEL); 1101 if (!pdata) 1102 return NULL; 1103 1104 /* set SoC specific options */ 1105 pdata->flcmncr_val = config->flcmncr_val; 1106 pdata->has_hwecc = config->has_hwecc; 1107 pdata->use_holden = config->use_holden; 1108 1109 return pdata; 1110 } 1111 1112 static int flctl_probe(struct platform_device *pdev) 1113 { 1114 struct resource *res; 1115 struct sh_flctl *flctl; 1116 struct mtd_info *flctl_mtd; 1117 struct nand_chip *nand; 1118 struct sh_flctl_platform_data *pdata; 1119 int ret; 1120 int irq; 1121 1122 flctl = devm_kzalloc(&pdev->dev, sizeof(struct sh_flctl), GFP_KERNEL); 1123 if (!flctl) 1124 return -ENOMEM; 1125 1126 flctl->reg = devm_platform_get_and_ioremap_resource(pdev, 0, &res); 1127 if (IS_ERR(flctl->reg)) 1128 return PTR_ERR(flctl->reg); 1129 flctl->fifo = res->start + 0x24; /* FLDTFIFO */ 1130 1131 irq = platform_get_irq(pdev, 0); 1132 if (irq < 0) 1133 return irq; 1134 1135 ret = devm_request_irq(&pdev->dev, irq, flctl_handle_flste, IRQF_SHARED, 1136 "flste", flctl); 1137 if (ret) { 1138 dev_err(&pdev->dev, "request interrupt failed.\n"); 1139 return ret; 1140 } 1141 1142 if (pdev->dev.of_node) 1143 pdata = flctl_parse_dt(&pdev->dev); 1144 else 1145 pdata = dev_get_platdata(&pdev->dev); 1146 1147 if (!pdata) { 1148 dev_err(&pdev->dev, "no setup data defined\n"); 1149 return -EINVAL; 1150 } 1151 1152 platform_set_drvdata(pdev, flctl); 1153 nand = &flctl->chip; 1154 flctl_mtd = nand_to_mtd(nand); 1155 nand_set_flash_node(nand, pdev->dev.of_node); 1156 flctl_mtd->dev.parent = &pdev->dev; 1157 flctl->pdev = pdev; 1158 flctl->hwecc = pdata->has_hwecc; 1159 flctl->holden = pdata->use_holden; 1160 flctl->flcmncr_base = pdata->flcmncr_val; 1161 flctl->flintdmacr_base = flctl->hwecc ? (STERINTE | ECERB) : STERINTE; 1162 1163 /* Set address of hardware control function */ 1164 /* 20 us command delay time */ 1165 nand->legacy.chip_delay = 20; 1166 1167 nand->legacy.read_byte = flctl_read_byte; 1168 nand->legacy.write_buf = flctl_write_buf; 1169 nand->legacy.read_buf = flctl_read_buf; 1170 nand->legacy.select_chip = flctl_select_chip; 1171 nand->legacy.cmdfunc = flctl_cmdfunc; 1172 nand->legacy.set_features = nand_get_set_features_notsupp; 1173 nand->legacy.get_features = nand_get_set_features_notsupp; 1174 1175 if (pdata->flcmncr_val & SEL_16BIT) 1176 nand->options |= NAND_BUSWIDTH_16; 1177 1178 nand->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE; 1179 1180 pm_runtime_enable(&pdev->dev); 1181 pm_runtime_resume(&pdev->dev); 1182 1183 flctl_setup_dma(flctl); 1184 1185 nand->legacy.dummy_controller.ops = &flctl_nand_controller_ops; 1186 ret = nand_scan(nand, 1); 1187 if (ret) 1188 goto err_chip; 1189 1190 ret = mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts); 1191 if (ret) 1192 goto cleanup_nand; 1193 1194 return 0; 1195 1196 cleanup_nand: 1197 nand_cleanup(nand); 1198 err_chip: 1199 flctl_release_dma(flctl); 1200 pm_runtime_disable(&pdev->dev); 1201 return ret; 1202 } 1203 1204 static void flctl_remove(struct platform_device *pdev) 1205 { 1206 struct sh_flctl *flctl = platform_get_drvdata(pdev); 1207 struct nand_chip *chip = &flctl->chip; 1208 int ret; 1209 1210 flctl_release_dma(flctl); 1211 ret = mtd_device_unregister(nand_to_mtd(chip)); 1212 WARN_ON(ret); 1213 nand_cleanup(chip); 1214 pm_runtime_disable(&pdev->dev); 1215 } 1216 1217 static struct platform_driver flctl_driver = { 1218 .probe = flctl_probe, 1219 .remove_new = flctl_remove, 1220 .driver = { 1221 .name = "sh_flctl", 1222 .of_match_table = of_flctl_match, 1223 }, 1224 }; 1225 1226 module_platform_driver(flctl_driver); 1227 1228 MODULE_LICENSE("GPL v2"); 1229 MODULE_AUTHOR("Yoshihiro Shimoda"); 1230 MODULE_DESCRIPTION("SuperH FLCTL driver"); 1231 MODULE_ALIAS("platform:sh_flctl"); 1232