1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * SuperH FLCTL nand controller 4 * 5 * Copyright (c) 2008 Renesas Solutions Corp. 6 * Copyright (c) 2008 Atom Create Engineering Co., Ltd. 7 * 8 * Based on fsl_elbc_nand.c, Copyright (c) 2006-2007 Freescale Semiconductor 9 */ 10 11 #include <linux/module.h> 12 #include <linux/kernel.h> 13 #include <linux/completion.h> 14 #include <linux/delay.h> 15 #include <linux/dmaengine.h> 16 #include <linux/dma-mapping.h> 17 #include <linux/interrupt.h> 18 #include <linux/io.h> 19 #include <linux/of.h> 20 #include <linux/of_device.h> 21 #include <linux/platform_device.h> 22 #include <linux/pm_runtime.h> 23 #include <linux/sh_dma.h> 24 #include <linux/slab.h> 25 #include <linux/string.h> 26 27 #include <linux/mtd/mtd.h> 28 #include <linux/mtd/rawnand.h> 29 #include <linux/mtd/partitions.h> 30 #include <linux/mtd/sh_flctl.h> 31 32 static int flctl_4secc_ooblayout_sp_ecc(struct mtd_info *mtd, int section, 33 struct mtd_oob_region *oobregion) 34 { 35 struct nand_chip *chip = mtd_to_nand(mtd); 36 37 if (section) 38 return -ERANGE; 39 40 oobregion->offset = 0; 41 oobregion->length = chip->ecc.bytes; 42 43 return 0; 44 } 45 46 static int flctl_4secc_ooblayout_sp_free(struct mtd_info *mtd, int section, 47 struct mtd_oob_region *oobregion) 48 { 49 if (section) 50 return -ERANGE; 51 52 oobregion->offset = 12; 53 oobregion->length = 4; 54 55 return 0; 56 } 57 58 static const struct mtd_ooblayout_ops flctl_4secc_oob_smallpage_ops = { 59 .ecc = flctl_4secc_ooblayout_sp_ecc, 60 .free = flctl_4secc_ooblayout_sp_free, 61 }; 62 63 static int flctl_4secc_ooblayout_lp_ecc(struct mtd_info *mtd, int section, 64 struct mtd_oob_region *oobregion) 65 { 66 struct nand_chip *chip = mtd_to_nand(mtd); 67 68 if (section >= chip->ecc.steps) 69 return -ERANGE; 70 71 oobregion->offset = (section * 16) + 6; 72 oobregion->length = chip->ecc.bytes; 73 74 return 0; 75 } 76 77 static int flctl_4secc_ooblayout_lp_free(struct mtd_info *mtd, int section, 78 struct mtd_oob_region *oobregion) 79 { 80 struct nand_chip *chip = mtd_to_nand(mtd); 81 82 if (section >= chip->ecc.steps) 83 return -ERANGE; 84 85 oobregion->offset = section * 16; 86 oobregion->length = 6; 87 88 if (!section) { 89 oobregion->offset += 2; 90 oobregion->length -= 2; 91 } 92 93 return 0; 94 } 95 96 static const struct mtd_ooblayout_ops flctl_4secc_oob_largepage_ops = { 97 .ecc = flctl_4secc_ooblayout_lp_ecc, 98 .free = flctl_4secc_ooblayout_lp_free, 99 }; 100 101 static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; 102 103 static struct nand_bbt_descr flctl_4secc_smallpage = { 104 .offs = 11, 105 .len = 1, 106 .pattern = scan_ff_pattern, 107 }; 108 109 static struct nand_bbt_descr flctl_4secc_largepage = { 110 .offs = 0, 111 .len = 2, 112 .pattern = scan_ff_pattern, 113 }; 114 115 static void empty_fifo(struct sh_flctl *flctl) 116 { 117 writel(flctl->flintdmacr_base | AC1CLR | AC0CLR, FLINTDMACR(flctl)); 118 writel(flctl->flintdmacr_base, FLINTDMACR(flctl)); 119 } 120 121 static void start_translation(struct sh_flctl *flctl) 122 { 123 writeb(TRSTRT, FLTRCR(flctl)); 124 } 125 126 static void timeout_error(struct sh_flctl *flctl, const char *str) 127 { 128 dev_err(&flctl->pdev->dev, "Timeout occurred in %s\n", str); 129 } 130 131 static void wait_completion(struct sh_flctl *flctl) 132 { 133 uint32_t timeout = LOOP_TIMEOUT_MAX; 134 135 while (timeout--) { 136 if (readb(FLTRCR(flctl)) & TREND) { 137 writeb(0x0, FLTRCR(flctl)); 138 return; 139 } 140 udelay(1); 141 } 142 143 timeout_error(flctl, __func__); 144 writeb(0x0, FLTRCR(flctl)); 145 } 146 147 static void flctl_dma_complete(void *param) 148 { 149 struct sh_flctl *flctl = param; 150 151 complete(&flctl->dma_complete); 152 } 153 154 static void flctl_release_dma(struct sh_flctl *flctl) 155 { 156 if (flctl->chan_fifo0_rx) { 157 dma_release_channel(flctl->chan_fifo0_rx); 158 flctl->chan_fifo0_rx = NULL; 159 } 160 if (flctl->chan_fifo0_tx) { 161 dma_release_channel(flctl->chan_fifo0_tx); 162 flctl->chan_fifo0_tx = NULL; 163 } 164 } 165 166 static void flctl_setup_dma(struct sh_flctl *flctl) 167 { 168 dma_cap_mask_t mask; 169 struct dma_slave_config cfg; 170 struct platform_device *pdev = flctl->pdev; 171 struct sh_flctl_platform_data *pdata = dev_get_platdata(&pdev->dev); 172 int ret; 173 174 if (!pdata) 175 return; 176 177 if (pdata->slave_id_fifo0_tx <= 0 || pdata->slave_id_fifo0_rx <= 0) 178 return; 179 180 /* We can only either use DMA for both Tx and Rx or not use it at all */ 181 dma_cap_zero(mask); 182 dma_cap_set(DMA_SLAVE, mask); 183 184 flctl->chan_fifo0_tx = dma_request_channel(mask, shdma_chan_filter, 185 (void *)(uintptr_t)pdata->slave_id_fifo0_tx); 186 dev_dbg(&pdev->dev, "%s: TX: got channel %p\n", __func__, 187 flctl->chan_fifo0_tx); 188 189 if (!flctl->chan_fifo0_tx) 190 return; 191 192 memset(&cfg, 0, sizeof(cfg)); 193 cfg.direction = DMA_MEM_TO_DEV; 194 cfg.dst_addr = flctl->fifo; 195 cfg.src_addr = 0; 196 ret = dmaengine_slave_config(flctl->chan_fifo0_tx, &cfg); 197 if (ret < 0) 198 goto err; 199 200 flctl->chan_fifo0_rx = dma_request_channel(mask, shdma_chan_filter, 201 (void *)(uintptr_t)pdata->slave_id_fifo0_rx); 202 dev_dbg(&pdev->dev, "%s: RX: got channel %p\n", __func__, 203 flctl->chan_fifo0_rx); 204 205 if (!flctl->chan_fifo0_rx) 206 goto err; 207 208 cfg.direction = DMA_DEV_TO_MEM; 209 cfg.dst_addr = 0; 210 cfg.src_addr = flctl->fifo; 211 ret = dmaengine_slave_config(flctl->chan_fifo0_rx, &cfg); 212 if (ret < 0) 213 goto err; 214 215 init_completion(&flctl->dma_complete); 216 217 return; 218 219 err: 220 flctl_release_dma(flctl); 221 } 222 223 static void set_addr(struct mtd_info *mtd, int column, int page_addr) 224 { 225 struct sh_flctl *flctl = mtd_to_flctl(mtd); 226 uint32_t addr = 0; 227 228 if (column == -1) { 229 addr = page_addr; /* ERASE1 */ 230 } else if (page_addr != -1) { 231 /* SEQIN, READ0, etc.. */ 232 if (flctl->chip.options & NAND_BUSWIDTH_16) 233 column >>= 1; 234 if (flctl->page_size) { 235 addr = column & 0x0FFF; 236 addr |= (page_addr & 0xff) << 16; 237 addr |= ((page_addr >> 8) & 0xff) << 24; 238 /* big than 128MB */ 239 if (flctl->rw_ADRCNT == ADRCNT2_E) { 240 uint32_t addr2; 241 addr2 = (page_addr >> 16) & 0xff; 242 writel(addr2, FLADR2(flctl)); 243 } 244 } else { 245 addr = column; 246 addr |= (page_addr & 0xff) << 8; 247 addr |= ((page_addr >> 8) & 0xff) << 16; 248 addr |= ((page_addr >> 16) & 0xff) << 24; 249 } 250 } 251 writel(addr, FLADR(flctl)); 252 } 253 254 static void wait_rfifo_ready(struct sh_flctl *flctl) 255 { 256 uint32_t timeout = LOOP_TIMEOUT_MAX; 257 258 while (timeout--) { 259 uint32_t val; 260 /* check FIFO */ 261 val = readl(FLDTCNTR(flctl)) >> 16; 262 if (val & 0xFF) 263 return; 264 udelay(1); 265 } 266 timeout_error(flctl, __func__); 267 } 268 269 static void wait_wfifo_ready(struct sh_flctl *flctl) 270 { 271 uint32_t len, timeout = LOOP_TIMEOUT_MAX; 272 273 while (timeout--) { 274 /* check FIFO */ 275 len = (readl(FLDTCNTR(flctl)) >> 16) & 0xFF; 276 if (len >= 4) 277 return; 278 udelay(1); 279 } 280 timeout_error(flctl, __func__); 281 } 282 283 static enum flctl_ecc_res_t wait_recfifo_ready 284 (struct sh_flctl *flctl, int sector_number) 285 { 286 uint32_t timeout = LOOP_TIMEOUT_MAX; 287 void __iomem *ecc_reg[4]; 288 int i; 289 int state = FL_SUCCESS; 290 uint32_t data, size; 291 292 /* 293 * First this loops checks in FLDTCNTR if we are ready to read out the 294 * oob data. This is the case if either all went fine without errors or 295 * if the bottom part of the loop corrected the errors or marked them as 296 * uncorrectable and the controller is given time to push the data into 297 * the FIFO. 298 */ 299 while (timeout--) { 300 /* check if all is ok and we can read out the OOB */ 301 size = readl(FLDTCNTR(flctl)) >> 24; 302 if ((size & 0xFF) == 4) 303 return state; 304 305 /* check if a correction code has been calculated */ 306 if (!(readl(FL4ECCCR(flctl)) & _4ECCEND)) { 307 /* 308 * either we wait for the fifo to be filled or a 309 * correction pattern is being generated 310 */ 311 udelay(1); 312 continue; 313 } 314 315 /* check for an uncorrectable error */ 316 if (readl(FL4ECCCR(flctl)) & _4ECCFA) { 317 /* check if we face a non-empty page */ 318 for (i = 0; i < 512; i++) { 319 if (flctl->done_buff[i] != 0xff) { 320 state = FL_ERROR; /* can't correct */ 321 break; 322 } 323 } 324 325 if (state == FL_SUCCESS) 326 dev_dbg(&flctl->pdev->dev, 327 "reading empty sector %d, ecc error ignored\n", 328 sector_number); 329 330 writel(0, FL4ECCCR(flctl)); 331 continue; 332 } 333 334 /* start error correction */ 335 ecc_reg[0] = FL4ECCRESULT0(flctl); 336 ecc_reg[1] = FL4ECCRESULT1(flctl); 337 ecc_reg[2] = FL4ECCRESULT2(flctl); 338 ecc_reg[3] = FL4ECCRESULT3(flctl); 339 340 for (i = 0; i < 3; i++) { 341 uint8_t org; 342 unsigned int index; 343 344 data = readl(ecc_reg[i]); 345 346 if (flctl->page_size) 347 index = (512 * sector_number) + 348 (data >> 16); 349 else 350 index = data >> 16; 351 352 org = flctl->done_buff[index]; 353 flctl->done_buff[index] = org ^ (data & 0xFF); 354 } 355 state = FL_REPAIRABLE; 356 writel(0, FL4ECCCR(flctl)); 357 } 358 359 timeout_error(flctl, __func__); 360 return FL_TIMEOUT; /* timeout */ 361 } 362 363 static void wait_wecfifo_ready(struct sh_flctl *flctl) 364 { 365 uint32_t timeout = LOOP_TIMEOUT_MAX; 366 uint32_t len; 367 368 while (timeout--) { 369 /* check FLECFIFO */ 370 len = (readl(FLDTCNTR(flctl)) >> 24) & 0xFF; 371 if (len >= 4) 372 return; 373 udelay(1); 374 } 375 timeout_error(flctl, __func__); 376 } 377 378 static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf, 379 int len, enum dma_data_direction dir) 380 { 381 struct dma_async_tx_descriptor *desc = NULL; 382 struct dma_chan *chan; 383 enum dma_transfer_direction tr_dir; 384 dma_addr_t dma_addr; 385 dma_cookie_t cookie; 386 uint32_t reg; 387 int ret; 388 389 if (dir == DMA_FROM_DEVICE) { 390 chan = flctl->chan_fifo0_rx; 391 tr_dir = DMA_DEV_TO_MEM; 392 } else { 393 chan = flctl->chan_fifo0_tx; 394 tr_dir = DMA_MEM_TO_DEV; 395 } 396 397 dma_addr = dma_map_single(chan->device->dev, buf, len, dir); 398 399 if (!dma_mapping_error(chan->device->dev, dma_addr)) 400 desc = dmaengine_prep_slave_single(chan, dma_addr, len, 401 tr_dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 402 403 if (desc) { 404 reg = readl(FLINTDMACR(flctl)); 405 reg |= DREQ0EN; 406 writel(reg, FLINTDMACR(flctl)); 407 408 desc->callback = flctl_dma_complete; 409 desc->callback_param = flctl; 410 cookie = dmaengine_submit(desc); 411 if (dma_submit_error(cookie)) { 412 ret = dma_submit_error(cookie); 413 dev_warn(&flctl->pdev->dev, 414 "DMA submit failed, falling back to PIO\n"); 415 goto out; 416 } 417 418 dma_async_issue_pending(chan); 419 } else { 420 /* DMA failed, fall back to PIO */ 421 flctl_release_dma(flctl); 422 dev_warn(&flctl->pdev->dev, 423 "DMA failed, falling back to PIO\n"); 424 ret = -EIO; 425 goto out; 426 } 427 428 ret = 429 wait_for_completion_timeout(&flctl->dma_complete, 430 msecs_to_jiffies(3000)); 431 432 if (ret <= 0) { 433 dmaengine_terminate_all(chan); 434 dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n"); 435 } 436 437 out: 438 reg = readl(FLINTDMACR(flctl)); 439 reg &= ~DREQ0EN; 440 writel(reg, FLINTDMACR(flctl)); 441 442 dma_unmap_single(chan->device->dev, dma_addr, len, dir); 443 444 /* ret > 0 is success */ 445 return ret; 446 } 447 448 static void read_datareg(struct sh_flctl *flctl, int offset) 449 { 450 unsigned long data; 451 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset]; 452 453 wait_completion(flctl); 454 455 data = readl(FLDATAR(flctl)); 456 *buf = le32_to_cpu(data); 457 } 458 459 static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset) 460 { 461 int i, len_4align; 462 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset]; 463 464 len_4align = (rlen + 3) / 4; 465 466 /* initiate DMA transfer */ 467 if (flctl->chan_fifo0_rx && rlen >= 32 && 468 flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_FROM_DEVICE) > 0) 469 goto convert; /* DMA success */ 470 471 /* do polling transfer */ 472 for (i = 0; i < len_4align; i++) { 473 wait_rfifo_ready(flctl); 474 buf[i] = readl(FLDTFIFO(flctl)); 475 } 476 477 convert: 478 for (i = 0; i < len_4align; i++) 479 buf[i] = be32_to_cpu(buf[i]); 480 } 481 482 static enum flctl_ecc_res_t read_ecfiforeg 483 (struct sh_flctl *flctl, uint8_t *buff, int sector) 484 { 485 int i; 486 enum flctl_ecc_res_t res; 487 unsigned long *ecc_buf = (unsigned long *)buff; 488 489 res = wait_recfifo_ready(flctl , sector); 490 491 if (res != FL_ERROR) { 492 for (i = 0; i < 4; i++) { 493 ecc_buf[i] = readl(FLECFIFO(flctl)); 494 ecc_buf[i] = be32_to_cpu(ecc_buf[i]); 495 } 496 } 497 498 return res; 499 } 500 501 static void write_fiforeg(struct sh_flctl *flctl, int rlen, 502 unsigned int offset) 503 { 504 int i, len_4align; 505 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset]; 506 507 len_4align = (rlen + 3) / 4; 508 for (i = 0; i < len_4align; i++) { 509 wait_wfifo_ready(flctl); 510 writel(cpu_to_be32(buf[i]), FLDTFIFO(flctl)); 511 } 512 } 513 514 static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen, 515 unsigned int offset) 516 { 517 int i, len_4align; 518 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset]; 519 520 len_4align = (rlen + 3) / 4; 521 522 for (i = 0; i < len_4align; i++) 523 buf[i] = cpu_to_be32(buf[i]); 524 525 /* initiate DMA transfer */ 526 if (flctl->chan_fifo0_tx && rlen >= 32 && 527 flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_TO_DEVICE) > 0) 528 return; /* DMA success */ 529 530 /* do polling transfer */ 531 for (i = 0; i < len_4align; i++) { 532 wait_wecfifo_ready(flctl); 533 writel(buf[i], FLECFIFO(flctl)); 534 } 535 } 536 537 static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val) 538 { 539 struct sh_flctl *flctl = mtd_to_flctl(mtd); 540 uint32_t flcmncr_val = flctl->flcmncr_base & ~SEL_16BIT; 541 uint32_t flcmdcr_val, addr_len_bytes = 0; 542 543 /* Set SNAND bit if page size is 2048byte */ 544 if (flctl->page_size) 545 flcmncr_val |= SNAND_E; 546 else 547 flcmncr_val &= ~SNAND_E; 548 549 /* default FLCMDCR val */ 550 flcmdcr_val = DOCMD1_E | DOADR_E; 551 552 /* Set for FLCMDCR */ 553 switch (cmd) { 554 case NAND_CMD_ERASE1: 555 addr_len_bytes = flctl->erase_ADRCNT; 556 flcmdcr_val |= DOCMD2_E; 557 break; 558 case NAND_CMD_READ0: 559 case NAND_CMD_READOOB: 560 case NAND_CMD_RNDOUT: 561 addr_len_bytes = flctl->rw_ADRCNT; 562 flcmdcr_val |= CDSRC_E; 563 if (flctl->chip.options & NAND_BUSWIDTH_16) 564 flcmncr_val |= SEL_16BIT; 565 break; 566 case NAND_CMD_SEQIN: 567 /* This case is that cmd is READ0 or READ1 or READ00 */ 568 flcmdcr_val &= ~DOADR_E; /* ONLY execute 1st cmd */ 569 break; 570 case NAND_CMD_PAGEPROG: 571 addr_len_bytes = flctl->rw_ADRCNT; 572 flcmdcr_val |= DOCMD2_E | CDSRC_E | SELRW; 573 if (flctl->chip.options & NAND_BUSWIDTH_16) 574 flcmncr_val |= SEL_16BIT; 575 break; 576 case NAND_CMD_READID: 577 flcmncr_val &= ~SNAND_E; 578 flcmdcr_val |= CDSRC_E; 579 addr_len_bytes = ADRCNT_1; 580 break; 581 case NAND_CMD_STATUS: 582 case NAND_CMD_RESET: 583 flcmncr_val &= ~SNAND_E; 584 flcmdcr_val &= ~(DOADR_E | DOSR_E); 585 break; 586 default: 587 break; 588 } 589 590 /* Set address bytes parameter */ 591 flcmdcr_val |= addr_len_bytes; 592 593 /* Now actually write */ 594 writel(flcmncr_val, FLCMNCR(flctl)); 595 writel(flcmdcr_val, FLCMDCR(flctl)); 596 writel(flcmcdr_val, FLCMCDR(flctl)); 597 } 598 599 static int flctl_read_page_hwecc(struct nand_chip *chip, uint8_t *buf, 600 int oob_required, int page) 601 { 602 struct mtd_info *mtd = nand_to_mtd(chip); 603 604 nand_read_page_op(chip, page, 0, buf, mtd->writesize); 605 if (oob_required) 606 chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize); 607 return 0; 608 } 609 610 static int flctl_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf, 611 int oob_required, int page) 612 { 613 struct mtd_info *mtd = nand_to_mtd(chip); 614 615 nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize); 616 chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize); 617 return nand_prog_page_end_op(chip); 618 } 619 620 static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr) 621 { 622 struct sh_flctl *flctl = mtd_to_flctl(mtd); 623 int sector, page_sectors; 624 enum flctl_ecc_res_t ecc_result; 625 626 page_sectors = flctl->page_size ? 4 : 1; 627 628 set_cmd_regs(mtd, NAND_CMD_READ0, 629 (NAND_CMD_READSTART << 8) | NAND_CMD_READ0); 630 631 writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT, 632 FLCMNCR(flctl)); 633 writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl)); 634 writel(page_addr << 2, FLADR(flctl)); 635 636 empty_fifo(flctl); 637 start_translation(flctl); 638 639 for (sector = 0; sector < page_sectors; sector++) { 640 read_fiforeg(flctl, 512, 512 * sector); 641 642 ecc_result = read_ecfiforeg(flctl, 643 &flctl->done_buff[mtd->writesize + 16 * sector], 644 sector); 645 646 switch (ecc_result) { 647 case FL_REPAIRABLE: 648 dev_info(&flctl->pdev->dev, 649 "applied ecc on page 0x%x", page_addr); 650 mtd->ecc_stats.corrected++; 651 break; 652 case FL_ERROR: 653 dev_warn(&flctl->pdev->dev, 654 "page 0x%x contains corrupted data\n", 655 page_addr); 656 mtd->ecc_stats.failed++; 657 break; 658 default: 659 ; 660 } 661 } 662 663 wait_completion(flctl); 664 665 writel(readl(FLCMNCR(flctl)) & ~(ACM_SACCES_MODE | _4ECCCORRECT), 666 FLCMNCR(flctl)); 667 } 668 669 static void execmd_read_oob(struct mtd_info *mtd, int page_addr) 670 { 671 struct sh_flctl *flctl = mtd_to_flctl(mtd); 672 int page_sectors = flctl->page_size ? 4 : 1; 673 int i; 674 675 set_cmd_regs(mtd, NAND_CMD_READ0, 676 (NAND_CMD_READSTART << 8) | NAND_CMD_READ0); 677 678 empty_fifo(flctl); 679 680 for (i = 0; i < page_sectors; i++) { 681 set_addr(mtd, (512 + 16) * i + 512 , page_addr); 682 writel(16, FLDTCNTR(flctl)); 683 684 start_translation(flctl); 685 read_fiforeg(flctl, 16, 16 * i); 686 wait_completion(flctl); 687 } 688 } 689 690 static void execmd_write_page_sector(struct mtd_info *mtd) 691 { 692 struct sh_flctl *flctl = mtd_to_flctl(mtd); 693 int page_addr = flctl->seqin_page_addr; 694 int sector, page_sectors; 695 696 page_sectors = flctl->page_size ? 4 : 1; 697 698 set_cmd_regs(mtd, NAND_CMD_PAGEPROG, 699 (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN); 700 701 empty_fifo(flctl); 702 writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl)); 703 writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl)); 704 writel(page_addr << 2, FLADR(flctl)); 705 start_translation(flctl); 706 707 for (sector = 0; sector < page_sectors; sector++) { 708 write_fiforeg(flctl, 512, 512 * sector); 709 write_ec_fiforeg(flctl, 16, mtd->writesize + 16 * sector); 710 } 711 712 wait_completion(flctl); 713 writel(readl(FLCMNCR(flctl)) & ~ACM_SACCES_MODE, FLCMNCR(flctl)); 714 } 715 716 static void execmd_write_oob(struct mtd_info *mtd) 717 { 718 struct sh_flctl *flctl = mtd_to_flctl(mtd); 719 int page_addr = flctl->seqin_page_addr; 720 int sector, page_sectors; 721 722 page_sectors = flctl->page_size ? 4 : 1; 723 724 set_cmd_regs(mtd, NAND_CMD_PAGEPROG, 725 (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN); 726 727 for (sector = 0; sector < page_sectors; sector++) { 728 empty_fifo(flctl); 729 set_addr(mtd, sector * 528 + 512, page_addr); 730 writel(16, FLDTCNTR(flctl)); /* set read size */ 731 732 start_translation(flctl); 733 write_fiforeg(flctl, 16, 16 * sector); 734 wait_completion(flctl); 735 } 736 } 737 738 static void flctl_cmdfunc(struct nand_chip *chip, unsigned int command, 739 int column, int page_addr) 740 { 741 struct mtd_info *mtd = nand_to_mtd(chip); 742 struct sh_flctl *flctl = mtd_to_flctl(mtd); 743 uint32_t read_cmd = 0; 744 745 pm_runtime_get_sync(&flctl->pdev->dev); 746 747 flctl->read_bytes = 0; 748 if (command != NAND_CMD_PAGEPROG) 749 flctl->index = 0; 750 751 switch (command) { 752 case NAND_CMD_READ1: 753 case NAND_CMD_READ0: 754 if (flctl->hwecc) { 755 /* read page with hwecc */ 756 execmd_read_page_sector(mtd, page_addr); 757 break; 758 } 759 if (flctl->page_size) 760 set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8) 761 | command); 762 else 763 set_cmd_regs(mtd, command, command); 764 765 set_addr(mtd, 0, page_addr); 766 767 flctl->read_bytes = mtd->writesize + mtd->oobsize; 768 if (flctl->chip.options & NAND_BUSWIDTH_16) 769 column >>= 1; 770 flctl->index += column; 771 goto read_normal_exit; 772 773 case NAND_CMD_READOOB: 774 if (flctl->hwecc) { 775 /* read page with hwecc */ 776 execmd_read_oob(mtd, page_addr); 777 break; 778 } 779 780 if (flctl->page_size) { 781 set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8) 782 | NAND_CMD_READ0); 783 set_addr(mtd, mtd->writesize, page_addr); 784 } else { 785 set_cmd_regs(mtd, command, command); 786 set_addr(mtd, 0, page_addr); 787 } 788 flctl->read_bytes = mtd->oobsize; 789 goto read_normal_exit; 790 791 case NAND_CMD_RNDOUT: 792 if (flctl->hwecc) 793 break; 794 795 if (flctl->page_size) 796 set_cmd_regs(mtd, command, (NAND_CMD_RNDOUTSTART << 8) 797 | command); 798 else 799 set_cmd_regs(mtd, command, command); 800 801 set_addr(mtd, column, 0); 802 803 flctl->read_bytes = mtd->writesize + mtd->oobsize - column; 804 goto read_normal_exit; 805 806 case NAND_CMD_READID: 807 set_cmd_regs(mtd, command, command); 808 809 /* READID is always performed using an 8-bit bus */ 810 if (flctl->chip.options & NAND_BUSWIDTH_16) 811 column <<= 1; 812 set_addr(mtd, column, 0); 813 814 flctl->read_bytes = 8; 815 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */ 816 empty_fifo(flctl); 817 start_translation(flctl); 818 read_fiforeg(flctl, flctl->read_bytes, 0); 819 wait_completion(flctl); 820 break; 821 822 case NAND_CMD_ERASE1: 823 flctl->erase1_page_addr = page_addr; 824 break; 825 826 case NAND_CMD_ERASE2: 827 set_cmd_regs(mtd, NAND_CMD_ERASE1, 828 (command << 8) | NAND_CMD_ERASE1); 829 set_addr(mtd, -1, flctl->erase1_page_addr); 830 start_translation(flctl); 831 wait_completion(flctl); 832 break; 833 834 case NAND_CMD_SEQIN: 835 if (!flctl->page_size) { 836 /* output read command */ 837 if (column >= mtd->writesize) { 838 column -= mtd->writesize; 839 read_cmd = NAND_CMD_READOOB; 840 } else if (column < 256) { 841 read_cmd = NAND_CMD_READ0; 842 } else { 843 column -= 256; 844 read_cmd = NAND_CMD_READ1; 845 } 846 } 847 flctl->seqin_column = column; 848 flctl->seqin_page_addr = page_addr; 849 flctl->seqin_read_cmd = read_cmd; 850 break; 851 852 case NAND_CMD_PAGEPROG: 853 empty_fifo(flctl); 854 if (!flctl->page_size) { 855 set_cmd_regs(mtd, NAND_CMD_SEQIN, 856 flctl->seqin_read_cmd); 857 set_addr(mtd, -1, -1); 858 writel(0, FLDTCNTR(flctl)); /* set 0 size */ 859 start_translation(flctl); 860 wait_completion(flctl); 861 } 862 if (flctl->hwecc) { 863 /* write page with hwecc */ 864 if (flctl->seqin_column == mtd->writesize) 865 execmd_write_oob(mtd); 866 else if (!flctl->seqin_column) 867 execmd_write_page_sector(mtd); 868 else 869 pr_err("Invalid address !?\n"); 870 break; 871 } 872 set_cmd_regs(mtd, command, (command << 8) | NAND_CMD_SEQIN); 873 set_addr(mtd, flctl->seqin_column, flctl->seqin_page_addr); 874 writel(flctl->index, FLDTCNTR(flctl)); /* set write size */ 875 start_translation(flctl); 876 write_fiforeg(flctl, flctl->index, 0); 877 wait_completion(flctl); 878 break; 879 880 case NAND_CMD_STATUS: 881 set_cmd_regs(mtd, command, command); 882 set_addr(mtd, -1, -1); 883 884 flctl->read_bytes = 1; 885 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */ 886 start_translation(flctl); 887 read_datareg(flctl, 0); /* read and end */ 888 break; 889 890 case NAND_CMD_RESET: 891 set_cmd_regs(mtd, command, command); 892 set_addr(mtd, -1, -1); 893 894 writel(0, FLDTCNTR(flctl)); /* set 0 size */ 895 start_translation(flctl); 896 wait_completion(flctl); 897 break; 898 899 default: 900 break; 901 } 902 goto runtime_exit; 903 904 read_normal_exit: 905 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */ 906 empty_fifo(flctl); 907 start_translation(flctl); 908 read_fiforeg(flctl, flctl->read_bytes, 0); 909 wait_completion(flctl); 910 runtime_exit: 911 pm_runtime_put_sync(&flctl->pdev->dev); 912 return; 913 } 914 915 static void flctl_select_chip(struct nand_chip *chip, int chipnr) 916 { 917 struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip)); 918 int ret; 919 920 switch (chipnr) { 921 case -1: 922 flctl->flcmncr_base &= ~CE0_ENABLE; 923 924 pm_runtime_get_sync(&flctl->pdev->dev); 925 writel(flctl->flcmncr_base, FLCMNCR(flctl)); 926 927 if (flctl->qos_request) { 928 dev_pm_qos_remove_request(&flctl->pm_qos); 929 flctl->qos_request = 0; 930 } 931 932 pm_runtime_put_sync(&flctl->pdev->dev); 933 break; 934 case 0: 935 flctl->flcmncr_base |= CE0_ENABLE; 936 937 if (!flctl->qos_request) { 938 ret = dev_pm_qos_add_request(&flctl->pdev->dev, 939 &flctl->pm_qos, 940 DEV_PM_QOS_RESUME_LATENCY, 941 100); 942 if (ret < 0) 943 dev_err(&flctl->pdev->dev, 944 "PM QoS request failed: %d\n", ret); 945 flctl->qos_request = 1; 946 } 947 948 if (flctl->holden) { 949 pm_runtime_get_sync(&flctl->pdev->dev); 950 writel(HOLDEN, FLHOLDCR(flctl)); 951 pm_runtime_put_sync(&flctl->pdev->dev); 952 } 953 break; 954 default: 955 BUG(); 956 } 957 } 958 959 static void flctl_write_buf(struct nand_chip *chip, const uint8_t *buf, int len) 960 { 961 struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip)); 962 963 memcpy(&flctl->done_buff[flctl->index], buf, len); 964 flctl->index += len; 965 } 966 967 static uint8_t flctl_read_byte(struct nand_chip *chip) 968 { 969 struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip)); 970 uint8_t data; 971 972 data = flctl->done_buff[flctl->index]; 973 flctl->index++; 974 return data; 975 } 976 977 static void flctl_read_buf(struct nand_chip *chip, uint8_t *buf, int len) 978 { 979 struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip)); 980 981 memcpy(buf, &flctl->done_buff[flctl->index], len); 982 flctl->index += len; 983 } 984 985 static int flctl_chip_attach_chip(struct nand_chip *chip) 986 { 987 u64 targetsize = nanddev_target_size(&chip->base); 988 struct mtd_info *mtd = nand_to_mtd(chip); 989 struct sh_flctl *flctl = mtd_to_flctl(mtd); 990 991 /* 992 * NAND_BUSWIDTH_16 may have been set by nand_scan_ident(). 993 * Add the SEL_16BIT flag in flctl->flcmncr_base. 994 */ 995 if (chip->options & NAND_BUSWIDTH_16) 996 flctl->flcmncr_base |= SEL_16BIT; 997 998 if (mtd->writesize == 512) { 999 flctl->page_size = 0; 1000 if (targetsize > (32 << 20)) { 1001 /* big than 32MB */ 1002 flctl->rw_ADRCNT = ADRCNT_4; 1003 flctl->erase_ADRCNT = ADRCNT_3; 1004 } else if (targetsize > (2 << 16)) { 1005 /* big than 128KB */ 1006 flctl->rw_ADRCNT = ADRCNT_3; 1007 flctl->erase_ADRCNT = ADRCNT_2; 1008 } else { 1009 flctl->rw_ADRCNT = ADRCNT_2; 1010 flctl->erase_ADRCNT = ADRCNT_1; 1011 } 1012 } else { 1013 flctl->page_size = 1; 1014 if (targetsize > (128 << 20)) { 1015 /* big than 128MB */ 1016 flctl->rw_ADRCNT = ADRCNT2_E; 1017 flctl->erase_ADRCNT = ADRCNT_3; 1018 } else if (targetsize > (8 << 16)) { 1019 /* big than 512KB */ 1020 flctl->rw_ADRCNT = ADRCNT_4; 1021 flctl->erase_ADRCNT = ADRCNT_2; 1022 } else { 1023 flctl->rw_ADRCNT = ADRCNT_3; 1024 flctl->erase_ADRCNT = ADRCNT_1; 1025 } 1026 } 1027 1028 if (flctl->hwecc) { 1029 if (mtd->writesize == 512) { 1030 mtd_set_ooblayout(mtd, &flctl_4secc_oob_smallpage_ops); 1031 chip->badblock_pattern = &flctl_4secc_smallpage; 1032 } else { 1033 mtd_set_ooblayout(mtd, &flctl_4secc_oob_largepage_ops); 1034 chip->badblock_pattern = &flctl_4secc_largepage; 1035 } 1036 1037 chip->ecc.size = 512; 1038 chip->ecc.bytes = 10; 1039 chip->ecc.strength = 4; 1040 chip->ecc.read_page = flctl_read_page_hwecc; 1041 chip->ecc.write_page = flctl_write_page_hwecc; 1042 chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; 1043 1044 /* 4 symbols ECC enabled */ 1045 flctl->flcmncr_base |= _4ECCEN; 1046 } else { 1047 chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; 1048 chip->ecc.algo = NAND_ECC_ALGO_HAMMING; 1049 } 1050 1051 return 0; 1052 } 1053 1054 static const struct nand_controller_ops flctl_nand_controller_ops = { 1055 .attach_chip = flctl_chip_attach_chip, 1056 }; 1057 1058 static irqreturn_t flctl_handle_flste(int irq, void *dev_id) 1059 { 1060 struct sh_flctl *flctl = dev_id; 1061 1062 dev_err(&flctl->pdev->dev, "flste irq: %x\n", readl(FLINTDMACR(flctl))); 1063 writel(flctl->flintdmacr_base, FLINTDMACR(flctl)); 1064 1065 return IRQ_HANDLED; 1066 } 1067 1068 struct flctl_soc_config { 1069 unsigned long flcmncr_val; 1070 unsigned has_hwecc:1; 1071 unsigned use_holden:1; 1072 }; 1073 1074 static struct flctl_soc_config flctl_sh7372_config = { 1075 .flcmncr_val = CLK_16B_12L_4H | TYPESEL_SET | SHBUSSEL, 1076 .has_hwecc = 1, 1077 .use_holden = 1, 1078 }; 1079 1080 static const struct of_device_id of_flctl_match[] = { 1081 { .compatible = "renesas,shmobile-flctl-sh7372", 1082 .data = &flctl_sh7372_config }, 1083 {}, 1084 }; 1085 MODULE_DEVICE_TABLE(of, of_flctl_match); 1086 1087 static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev) 1088 { 1089 const struct flctl_soc_config *config; 1090 struct sh_flctl_platform_data *pdata; 1091 1092 config = of_device_get_match_data(dev); 1093 if (!config) { 1094 dev_err(dev, "%s: no OF configuration attached\n", __func__); 1095 return NULL; 1096 } 1097 1098 pdata = devm_kzalloc(dev, sizeof(struct sh_flctl_platform_data), 1099 GFP_KERNEL); 1100 if (!pdata) 1101 return NULL; 1102 1103 /* set SoC specific options */ 1104 pdata->flcmncr_val = config->flcmncr_val; 1105 pdata->has_hwecc = config->has_hwecc; 1106 pdata->use_holden = config->use_holden; 1107 1108 return pdata; 1109 } 1110 1111 static int flctl_probe(struct platform_device *pdev) 1112 { 1113 struct resource *res; 1114 struct sh_flctl *flctl; 1115 struct mtd_info *flctl_mtd; 1116 struct nand_chip *nand; 1117 struct sh_flctl_platform_data *pdata; 1118 int ret; 1119 int irq; 1120 1121 flctl = devm_kzalloc(&pdev->dev, sizeof(struct sh_flctl), GFP_KERNEL); 1122 if (!flctl) 1123 return -ENOMEM; 1124 1125 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1126 flctl->reg = devm_ioremap_resource(&pdev->dev, res); 1127 if (IS_ERR(flctl->reg)) 1128 return PTR_ERR(flctl->reg); 1129 flctl->fifo = res->start + 0x24; /* FLDTFIFO */ 1130 1131 irq = platform_get_irq(pdev, 0); 1132 if (irq < 0) 1133 return irq; 1134 1135 ret = devm_request_irq(&pdev->dev, irq, flctl_handle_flste, IRQF_SHARED, 1136 "flste", flctl); 1137 if (ret) { 1138 dev_err(&pdev->dev, "request interrupt failed.\n"); 1139 return ret; 1140 } 1141 1142 if (pdev->dev.of_node) 1143 pdata = flctl_parse_dt(&pdev->dev); 1144 else 1145 pdata = dev_get_platdata(&pdev->dev); 1146 1147 if (!pdata) { 1148 dev_err(&pdev->dev, "no setup data defined\n"); 1149 return -EINVAL; 1150 } 1151 1152 platform_set_drvdata(pdev, flctl); 1153 nand = &flctl->chip; 1154 flctl_mtd = nand_to_mtd(nand); 1155 nand_set_flash_node(nand, pdev->dev.of_node); 1156 flctl_mtd->dev.parent = &pdev->dev; 1157 flctl->pdev = pdev; 1158 flctl->hwecc = pdata->has_hwecc; 1159 flctl->holden = pdata->use_holden; 1160 flctl->flcmncr_base = pdata->flcmncr_val; 1161 flctl->flintdmacr_base = flctl->hwecc ? (STERINTE | ECERB) : STERINTE; 1162 1163 /* Set address of hardware control function */ 1164 /* 20 us command delay time */ 1165 nand->legacy.chip_delay = 20; 1166 1167 nand->legacy.read_byte = flctl_read_byte; 1168 nand->legacy.write_buf = flctl_write_buf; 1169 nand->legacy.read_buf = flctl_read_buf; 1170 nand->legacy.select_chip = flctl_select_chip; 1171 nand->legacy.cmdfunc = flctl_cmdfunc; 1172 nand->legacy.set_features = nand_get_set_features_notsupp; 1173 nand->legacy.get_features = nand_get_set_features_notsupp; 1174 1175 if (pdata->flcmncr_val & SEL_16BIT) 1176 nand->options |= NAND_BUSWIDTH_16; 1177 1178 nand->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE; 1179 1180 pm_runtime_enable(&pdev->dev); 1181 pm_runtime_resume(&pdev->dev); 1182 1183 flctl_setup_dma(flctl); 1184 1185 nand->legacy.dummy_controller.ops = &flctl_nand_controller_ops; 1186 ret = nand_scan(nand, 1); 1187 if (ret) 1188 goto err_chip; 1189 1190 ret = mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts); 1191 if (ret) 1192 goto cleanup_nand; 1193 1194 return 0; 1195 1196 cleanup_nand: 1197 nand_cleanup(nand); 1198 err_chip: 1199 flctl_release_dma(flctl); 1200 pm_runtime_disable(&pdev->dev); 1201 return ret; 1202 } 1203 1204 static int flctl_remove(struct platform_device *pdev) 1205 { 1206 struct sh_flctl *flctl = platform_get_drvdata(pdev); 1207 struct nand_chip *chip = &flctl->chip; 1208 int ret; 1209 1210 flctl_release_dma(flctl); 1211 ret = mtd_device_unregister(nand_to_mtd(chip)); 1212 WARN_ON(ret); 1213 nand_cleanup(chip); 1214 pm_runtime_disable(&pdev->dev); 1215 1216 return 0; 1217 } 1218 1219 static struct platform_driver flctl_driver = { 1220 .remove = flctl_remove, 1221 .driver = { 1222 .name = "sh_flctl", 1223 .of_match_table = of_match_ptr(of_flctl_match), 1224 }, 1225 }; 1226 1227 module_platform_driver_probe(flctl_driver, flctl_probe); 1228 1229 MODULE_LICENSE("GPL v2"); 1230 MODULE_AUTHOR("Yoshihiro Shimoda"); 1231 MODULE_DESCRIPTION("SuperH FLCTL driver"); 1232 MODULE_ALIAS("platform:sh_flctl"); 1233