1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NAND Flash Controller Device Driver 4 * Copyright © 2009-2010, Intel Corporation and its suppliers. 5 * 6 * Copyright (c) 2017 Socionext Inc. 7 * Reworked by Masahiro Yamada <yamada.masahiro@socionext.com> 8 */ 9 10 #include <linux/bitfield.h> 11 #include <linux/completion.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/interrupt.h> 14 #include <linux/io.h> 15 #include <linux/module.h> 16 #include <linux/mtd/mtd.h> 17 #include <linux/mtd/rawnand.h> 18 #include <linux/slab.h> 19 #include <linux/spinlock.h> 20 21 #include "denali.h" 22 23 #define DENALI_NAND_NAME "denali-nand" 24 #define DENALI_DEFAULT_OOB_SKIP_BYTES 8 25 26 /* for Indexed Addressing */ 27 #define DENALI_INDEXED_CTRL 0x00 28 #define DENALI_INDEXED_DATA 0x10 29 30 #define DENALI_MAP00 (0 << 26) /* direct access to buffer */ 31 #define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */ 32 #define DENALI_MAP10 (2 << 26) /* high-level control plane */ 33 #define DENALI_MAP11 (3 << 26) /* direct controller access */ 34 35 /* MAP11 access cycle type */ 36 #define DENALI_MAP11_CMD ((DENALI_MAP11) | 0) /* command cycle */ 37 #define DENALI_MAP11_ADDR ((DENALI_MAP11) | 1) /* address cycle */ 38 #define DENALI_MAP11_DATA ((DENALI_MAP11) | 2) /* data cycle */ 39 40 #define DENALI_BANK(denali) ((denali)->active_bank << 24) 41 42 #define DENALI_INVALID_BANK -1 43 #define DENALI_NR_BANKS 4 44 45 static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd) 46 { 47 return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand); 48 } 49 50 /* 51 * Direct Addressing - the slave address forms the control information (command 52 * type, bank, block, and page address). The slave data is the actual data to 53 * be transferred. This mode requires 28 bits of address region allocated. 54 */ 55 static u32 denali_direct_read(struct denali_nand_info *denali, u32 addr) 56 { 57 return ioread32(denali->host + addr); 58 } 59 60 static void denali_direct_write(struct denali_nand_info *denali, u32 addr, 61 u32 data) 62 { 63 iowrite32(data, denali->host + addr); 64 } 65 66 /* 67 * Indexed Addressing - address translation module intervenes in passing the 68 * control information. This mode reduces the required address range. The 69 * control information and transferred data are latched by the registers in 70 * the translation module. 71 */ 72 static u32 denali_indexed_read(struct denali_nand_info *denali, u32 addr) 73 { 74 iowrite32(addr, denali->host + DENALI_INDEXED_CTRL); 75 return ioread32(denali->host + DENALI_INDEXED_DATA); 76 } 77 78 static void denali_indexed_write(struct denali_nand_info *denali, u32 addr, 79 u32 data) 80 { 81 iowrite32(addr, denali->host + DENALI_INDEXED_CTRL); 82 iowrite32(data, denali->host + DENALI_INDEXED_DATA); 83 } 84 85 /* 86 * Use the configuration feature register to determine the maximum number of 87 * banks that the hardware supports. 88 */ 89 static void denali_detect_max_banks(struct denali_nand_info *denali) 90 { 91 uint32_t features = ioread32(denali->reg + FEATURES); 92 93 denali->max_banks = 1 << FIELD_GET(FEATURES__N_BANKS, features); 94 95 /* the encoding changed from rev 5.0 to 5.1 */ 96 if (denali->revision < 0x0501) 97 denali->max_banks <<= 1; 98 } 99 100 static void denali_enable_irq(struct denali_nand_info *denali) 101 { 102 int i; 103 104 for (i = 0; i < DENALI_NR_BANKS; i++) 105 iowrite32(U32_MAX, denali->reg + INTR_EN(i)); 106 iowrite32(GLOBAL_INT_EN_FLAG, denali->reg + GLOBAL_INT_ENABLE); 107 } 108 109 static void denali_disable_irq(struct denali_nand_info *denali) 110 { 111 int i; 112 113 for (i = 0; i < DENALI_NR_BANKS; i++) 114 iowrite32(0, denali->reg + INTR_EN(i)); 115 iowrite32(0, denali->reg + GLOBAL_INT_ENABLE); 116 } 117 118 static void denali_clear_irq(struct denali_nand_info *denali, 119 int bank, uint32_t irq_status) 120 { 121 /* write one to clear bits */ 122 iowrite32(irq_status, denali->reg + INTR_STATUS(bank)); 123 } 124 125 static void denali_clear_irq_all(struct denali_nand_info *denali) 126 { 127 int i; 128 129 for (i = 0; i < DENALI_NR_BANKS; i++) 130 denali_clear_irq(denali, i, U32_MAX); 131 } 132 133 static irqreturn_t denali_isr(int irq, void *dev_id) 134 { 135 struct denali_nand_info *denali = dev_id; 136 irqreturn_t ret = IRQ_NONE; 137 uint32_t irq_status; 138 int i; 139 140 spin_lock(&denali->irq_lock); 141 142 for (i = 0; i < DENALI_NR_BANKS; i++) { 143 irq_status = ioread32(denali->reg + INTR_STATUS(i)); 144 if (irq_status) 145 ret = IRQ_HANDLED; 146 147 denali_clear_irq(denali, i, irq_status); 148 149 if (i != denali->active_bank) 150 continue; 151 152 denali->irq_status |= irq_status; 153 154 if (denali->irq_status & denali->irq_mask) 155 complete(&denali->complete); 156 } 157 158 spin_unlock(&denali->irq_lock); 159 160 return ret; 161 } 162 163 static void denali_reset_irq(struct denali_nand_info *denali) 164 { 165 unsigned long flags; 166 167 spin_lock_irqsave(&denali->irq_lock, flags); 168 denali->irq_status = 0; 169 denali->irq_mask = 0; 170 spin_unlock_irqrestore(&denali->irq_lock, flags); 171 } 172 173 static uint32_t denali_wait_for_irq(struct denali_nand_info *denali, 174 uint32_t irq_mask) 175 { 176 unsigned long time_left, flags; 177 uint32_t irq_status; 178 179 spin_lock_irqsave(&denali->irq_lock, flags); 180 181 irq_status = denali->irq_status; 182 183 if (irq_mask & irq_status) { 184 /* return immediately if the IRQ has already happened. */ 185 spin_unlock_irqrestore(&denali->irq_lock, flags); 186 return irq_status; 187 } 188 189 denali->irq_mask = irq_mask; 190 reinit_completion(&denali->complete); 191 spin_unlock_irqrestore(&denali->irq_lock, flags); 192 193 time_left = wait_for_completion_timeout(&denali->complete, 194 msecs_to_jiffies(1000)); 195 if (!time_left) { 196 dev_err(denali->dev, "timeout while waiting for irq 0x%x\n", 197 irq_mask); 198 return 0; 199 } 200 201 return denali->irq_status; 202 } 203 204 static void denali_read_buf(struct nand_chip *chip, uint8_t *buf, int len) 205 { 206 struct mtd_info *mtd = nand_to_mtd(chip); 207 struct denali_nand_info *denali = mtd_to_denali(mtd); 208 u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); 209 int i; 210 211 for (i = 0; i < len; i++) 212 buf[i] = denali->host_read(denali, addr); 213 } 214 215 static void denali_write_buf(struct nand_chip *chip, const uint8_t *buf, 216 int len) 217 { 218 struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip)); 219 u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); 220 int i; 221 222 for (i = 0; i < len; i++) 223 denali->host_write(denali, addr, buf[i]); 224 } 225 226 static void denali_read_buf16(struct nand_chip *chip, uint8_t *buf, int len) 227 { 228 struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip)); 229 u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); 230 uint16_t *buf16 = (uint16_t *)buf; 231 int i; 232 233 for (i = 0; i < len / 2; i++) 234 buf16[i] = denali->host_read(denali, addr); 235 } 236 237 static void denali_write_buf16(struct nand_chip *chip, const uint8_t *buf, 238 int len) 239 { 240 struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip)); 241 u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); 242 const uint16_t *buf16 = (const uint16_t *)buf; 243 int i; 244 245 for (i = 0; i < len / 2; i++) 246 denali->host_write(denali, addr, buf16[i]); 247 } 248 249 static uint8_t denali_read_byte(struct nand_chip *chip) 250 { 251 uint8_t byte; 252 253 denali_read_buf(chip, &byte, 1); 254 255 return byte; 256 } 257 258 static void denali_write_byte(struct nand_chip *chip, uint8_t byte) 259 { 260 denali_write_buf(chip, &byte, 1); 261 } 262 263 static void denali_cmd_ctrl(struct nand_chip *chip, int dat, unsigned int ctrl) 264 { 265 struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip)); 266 uint32_t type; 267 268 if (ctrl & NAND_CLE) 269 type = DENALI_MAP11_CMD; 270 else if (ctrl & NAND_ALE) 271 type = DENALI_MAP11_ADDR; 272 else 273 return; 274 275 /* 276 * Some commands are followed by chip->legacy.waitfunc. 277 * irq_status must be cleared here to catch the R/B# interrupt later. 278 */ 279 if (ctrl & NAND_CTRL_CHANGE) 280 denali_reset_irq(denali); 281 282 denali->host_write(denali, DENALI_BANK(denali) | type, dat); 283 } 284 285 static int denali_check_erased_page(struct mtd_info *mtd, 286 struct nand_chip *chip, uint8_t *buf, 287 unsigned long uncor_ecc_flags, 288 unsigned int max_bitflips) 289 { 290 struct denali_nand_info *denali = mtd_to_denali(mtd); 291 uint8_t *ecc_code = chip->oob_poi + denali->oob_skip_bytes; 292 int ecc_steps = chip->ecc.steps; 293 int ecc_size = chip->ecc.size; 294 int ecc_bytes = chip->ecc.bytes; 295 int i, stat; 296 297 for (i = 0; i < ecc_steps; i++) { 298 if (!(uncor_ecc_flags & BIT(i))) 299 continue; 300 301 stat = nand_check_erased_ecc_chunk(buf, ecc_size, 302 ecc_code, ecc_bytes, 303 NULL, 0, 304 chip->ecc.strength); 305 if (stat < 0) { 306 mtd->ecc_stats.failed++; 307 } else { 308 mtd->ecc_stats.corrected += stat; 309 max_bitflips = max_t(unsigned int, max_bitflips, stat); 310 } 311 312 buf += ecc_size; 313 ecc_code += ecc_bytes; 314 } 315 316 return max_bitflips; 317 } 318 319 static int denali_hw_ecc_fixup(struct mtd_info *mtd, 320 struct denali_nand_info *denali, 321 unsigned long *uncor_ecc_flags) 322 { 323 struct nand_chip *chip = mtd_to_nand(mtd); 324 int bank = denali->active_bank; 325 uint32_t ecc_cor; 326 unsigned int max_bitflips; 327 328 ecc_cor = ioread32(denali->reg + ECC_COR_INFO(bank)); 329 ecc_cor >>= ECC_COR_INFO__SHIFT(bank); 330 331 if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) { 332 /* 333 * This flag is set when uncorrectable error occurs at least in 334 * one ECC sector. We can not know "how many sectors", or 335 * "which sector(s)". We need erase-page check for all sectors. 336 */ 337 *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0); 338 return 0; 339 } 340 341 max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor); 342 343 /* 344 * The register holds the maximum of per-sector corrected bitflips. 345 * This is suitable for the return value of the ->read_page() callback. 346 * Unfortunately, we can not know the total number of corrected bits in 347 * the page. Increase the stats by max_bitflips. (compromised solution) 348 */ 349 mtd->ecc_stats.corrected += max_bitflips; 350 351 return max_bitflips; 352 } 353 354 static int denali_sw_ecc_fixup(struct mtd_info *mtd, 355 struct denali_nand_info *denali, 356 unsigned long *uncor_ecc_flags, uint8_t *buf) 357 { 358 unsigned int ecc_size = denali->nand.ecc.size; 359 unsigned int bitflips = 0; 360 unsigned int max_bitflips = 0; 361 uint32_t err_addr, err_cor_info; 362 unsigned int err_byte, err_sector, err_device; 363 uint8_t err_cor_value; 364 unsigned int prev_sector = 0; 365 uint32_t irq_status; 366 367 denali_reset_irq(denali); 368 369 do { 370 err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS); 371 err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr); 372 err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr); 373 374 err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO); 375 err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE, 376 err_cor_info); 377 err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE, 378 err_cor_info); 379 380 /* reset the bitflip counter when crossing ECC sector */ 381 if (err_sector != prev_sector) 382 bitflips = 0; 383 384 if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) { 385 /* 386 * Check later if this is a real ECC error, or 387 * an erased sector. 388 */ 389 *uncor_ecc_flags |= BIT(err_sector); 390 } else if (err_byte < ecc_size) { 391 /* 392 * If err_byte is larger than ecc_size, means error 393 * happened in OOB, so we ignore it. It's no need for 394 * us to correct it err_device is represented the NAND 395 * error bits are happened in if there are more than 396 * one NAND connected. 397 */ 398 int offset; 399 unsigned int flips_in_byte; 400 401 offset = (err_sector * ecc_size + err_byte) * 402 denali->devs_per_cs + err_device; 403 404 /* correct the ECC error */ 405 flips_in_byte = hweight8(buf[offset] ^ err_cor_value); 406 buf[offset] ^= err_cor_value; 407 mtd->ecc_stats.corrected += flips_in_byte; 408 bitflips += flips_in_byte; 409 410 max_bitflips = max(max_bitflips, bitflips); 411 } 412 413 prev_sector = err_sector; 414 } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR)); 415 416 /* 417 * Once handle all ECC errors, controller will trigger an 418 * ECC_TRANSACTION_DONE interrupt. 419 */ 420 irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE); 421 if (!(irq_status & INTR__ECC_TRANSACTION_DONE)) 422 return -EIO; 423 424 return max_bitflips; 425 } 426 427 static void denali_setup_dma64(struct denali_nand_info *denali, 428 dma_addr_t dma_addr, int page, int write) 429 { 430 uint32_t mode; 431 const int page_count = 1; 432 433 mode = DENALI_MAP10 | DENALI_BANK(denali) | page; 434 435 /* DMA is a three step process */ 436 437 /* 438 * 1. setup transfer type, interrupt when complete, 439 * burst len = 64 bytes, the number of pages 440 */ 441 denali->host_write(denali, mode, 442 0x01002000 | (64 << 16) | (write << 8) | page_count); 443 444 /* 2. set memory low address */ 445 denali->host_write(denali, mode, lower_32_bits(dma_addr)); 446 447 /* 3. set memory high address */ 448 denali->host_write(denali, mode, upper_32_bits(dma_addr)); 449 } 450 451 static void denali_setup_dma32(struct denali_nand_info *denali, 452 dma_addr_t dma_addr, int page, int write) 453 { 454 uint32_t mode; 455 const int page_count = 1; 456 457 mode = DENALI_MAP10 | DENALI_BANK(denali); 458 459 /* DMA is a four step process */ 460 461 /* 1. setup transfer type and # of pages */ 462 denali->host_write(denali, mode | page, 463 0x2000 | (write << 8) | page_count); 464 465 /* 2. set memory high address bits 23:8 */ 466 denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200); 467 468 /* 3. set memory low address bits 23:8 */ 469 denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300); 470 471 /* 4. interrupt when complete, burst len = 64 bytes */ 472 denali->host_write(denali, mode | 0x14000, 0x2400); 473 } 474 475 static int denali_pio_read(struct denali_nand_info *denali, void *buf, 476 size_t size, int page) 477 { 478 u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; 479 uint32_t *buf32 = (uint32_t *)buf; 480 uint32_t irq_status, ecc_err_mask; 481 int i; 482 483 if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) 484 ecc_err_mask = INTR__ECC_UNCOR_ERR; 485 else 486 ecc_err_mask = INTR__ECC_ERR; 487 488 denali_reset_irq(denali); 489 490 for (i = 0; i < size / 4; i++) 491 *buf32++ = denali->host_read(denali, addr); 492 493 irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC); 494 if (!(irq_status & INTR__PAGE_XFER_INC)) 495 return -EIO; 496 497 if (irq_status & INTR__ERASED_PAGE) 498 memset(buf, 0xff, size); 499 500 return irq_status & ecc_err_mask ? -EBADMSG : 0; 501 } 502 503 static int denali_pio_write(struct denali_nand_info *denali, 504 const void *buf, size_t size, int page) 505 { 506 u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; 507 const uint32_t *buf32 = (uint32_t *)buf; 508 uint32_t irq_status; 509 int i; 510 511 denali_reset_irq(denali); 512 513 for (i = 0; i < size / 4; i++) 514 denali->host_write(denali, addr, *buf32++); 515 516 irq_status = denali_wait_for_irq(denali, 517 INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL); 518 if (!(irq_status & INTR__PROGRAM_COMP)) 519 return -EIO; 520 521 return 0; 522 } 523 524 static int denali_pio_xfer(struct denali_nand_info *denali, void *buf, 525 size_t size, int page, int write) 526 { 527 if (write) 528 return denali_pio_write(denali, buf, size, page); 529 else 530 return denali_pio_read(denali, buf, size, page); 531 } 532 533 static int denali_dma_xfer(struct denali_nand_info *denali, void *buf, 534 size_t size, int page, int write) 535 { 536 dma_addr_t dma_addr; 537 uint32_t irq_mask, irq_status, ecc_err_mask; 538 enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 539 int ret = 0; 540 541 dma_addr = dma_map_single(denali->dev, buf, size, dir); 542 if (dma_mapping_error(denali->dev, dma_addr)) { 543 dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n"); 544 return denali_pio_xfer(denali, buf, size, page, write); 545 } 546 547 if (write) { 548 /* 549 * INTR__PROGRAM_COMP is never asserted for the DMA transfer. 550 * We can use INTR__DMA_CMD_COMP instead. This flag is asserted 551 * when the page program is completed. 552 */ 553 irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL; 554 ecc_err_mask = 0; 555 } else if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) { 556 irq_mask = INTR__DMA_CMD_COMP; 557 ecc_err_mask = INTR__ECC_UNCOR_ERR; 558 } else { 559 irq_mask = INTR__DMA_CMD_COMP; 560 ecc_err_mask = INTR__ECC_ERR; 561 } 562 563 iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE); 564 /* 565 * The ->setup_dma() hook kicks DMA by using the data/command 566 * interface, which belongs to a different AXI port from the 567 * register interface. Read back the register to avoid a race. 568 */ 569 ioread32(denali->reg + DMA_ENABLE); 570 571 denali_reset_irq(denali); 572 denali->setup_dma(denali, dma_addr, page, write); 573 574 irq_status = denali_wait_for_irq(denali, irq_mask); 575 if (!(irq_status & INTR__DMA_CMD_COMP)) 576 ret = -EIO; 577 else if (irq_status & ecc_err_mask) 578 ret = -EBADMSG; 579 580 iowrite32(0, denali->reg + DMA_ENABLE); 581 582 dma_unmap_single(denali->dev, dma_addr, size, dir); 583 584 if (irq_status & INTR__ERASED_PAGE) 585 memset(buf, 0xff, size); 586 587 return ret; 588 } 589 590 static int denali_data_xfer(struct denali_nand_info *denali, void *buf, 591 size_t size, int page, int raw, int write) 592 { 593 iowrite32(raw ? 0 : ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE); 594 iowrite32(raw ? TRANSFER_SPARE_REG__FLAG : 0, 595 denali->reg + TRANSFER_SPARE_REG); 596 597 if (denali->dma_avail) 598 return denali_dma_xfer(denali, buf, size, page, write); 599 else 600 return denali_pio_xfer(denali, buf, size, page, write); 601 } 602 603 static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip, 604 int page, int write) 605 { 606 struct denali_nand_info *denali = mtd_to_denali(mtd); 607 int writesize = mtd->writesize; 608 int oobsize = mtd->oobsize; 609 uint8_t *bufpoi = chip->oob_poi; 610 int ecc_steps = chip->ecc.steps; 611 int ecc_size = chip->ecc.size; 612 int ecc_bytes = chip->ecc.bytes; 613 int oob_skip = denali->oob_skip_bytes; 614 size_t size = writesize + oobsize; 615 int i, pos, len; 616 617 /* BBM at the beginning of the OOB area */ 618 if (write) 619 nand_prog_page_begin_op(chip, page, writesize, bufpoi, 620 oob_skip); 621 else 622 nand_read_page_op(chip, page, writesize, bufpoi, oob_skip); 623 bufpoi += oob_skip; 624 625 /* OOB ECC */ 626 for (i = 0; i < ecc_steps; i++) { 627 pos = ecc_size + i * (ecc_size + ecc_bytes); 628 len = ecc_bytes; 629 630 if (pos >= writesize) 631 pos += oob_skip; 632 else if (pos + len > writesize) 633 len = writesize - pos; 634 635 if (write) 636 nand_change_write_column_op(chip, pos, bufpoi, len, 637 false); 638 else 639 nand_change_read_column_op(chip, pos, bufpoi, len, 640 false); 641 bufpoi += len; 642 if (len < ecc_bytes) { 643 len = ecc_bytes - len; 644 if (write) 645 nand_change_write_column_op(chip, writesize + 646 oob_skip, bufpoi, 647 len, false); 648 else 649 nand_change_read_column_op(chip, writesize + 650 oob_skip, bufpoi, 651 len, false); 652 bufpoi += len; 653 } 654 } 655 656 /* OOB free */ 657 len = oobsize - (bufpoi - chip->oob_poi); 658 if (write) 659 nand_change_write_column_op(chip, size - len, bufpoi, len, 660 false); 661 else 662 nand_change_read_column_op(chip, size - len, bufpoi, len, 663 false); 664 } 665 666 static int denali_read_page_raw(struct nand_chip *chip, uint8_t *buf, 667 int oob_required, int page) 668 { 669 struct mtd_info *mtd = nand_to_mtd(chip); 670 struct denali_nand_info *denali = mtd_to_denali(mtd); 671 int writesize = mtd->writesize; 672 int oobsize = mtd->oobsize; 673 int ecc_steps = chip->ecc.steps; 674 int ecc_size = chip->ecc.size; 675 int ecc_bytes = chip->ecc.bytes; 676 void *tmp_buf = denali->buf; 677 int oob_skip = denali->oob_skip_bytes; 678 size_t size = writesize + oobsize; 679 int ret, i, pos, len; 680 681 ret = denali_data_xfer(denali, tmp_buf, size, page, 1, 0); 682 if (ret) 683 return ret; 684 685 /* Arrange the buffer for syndrome payload/ecc layout */ 686 if (buf) { 687 for (i = 0; i < ecc_steps; i++) { 688 pos = i * (ecc_size + ecc_bytes); 689 len = ecc_size; 690 691 if (pos >= writesize) 692 pos += oob_skip; 693 else if (pos + len > writesize) 694 len = writesize - pos; 695 696 memcpy(buf, tmp_buf + pos, len); 697 buf += len; 698 if (len < ecc_size) { 699 len = ecc_size - len; 700 memcpy(buf, tmp_buf + writesize + oob_skip, 701 len); 702 buf += len; 703 } 704 } 705 } 706 707 if (oob_required) { 708 uint8_t *oob = chip->oob_poi; 709 710 /* BBM at the beginning of the OOB area */ 711 memcpy(oob, tmp_buf + writesize, oob_skip); 712 oob += oob_skip; 713 714 /* OOB ECC */ 715 for (i = 0; i < ecc_steps; i++) { 716 pos = ecc_size + i * (ecc_size + ecc_bytes); 717 len = ecc_bytes; 718 719 if (pos >= writesize) 720 pos += oob_skip; 721 else if (pos + len > writesize) 722 len = writesize - pos; 723 724 memcpy(oob, tmp_buf + pos, len); 725 oob += len; 726 if (len < ecc_bytes) { 727 len = ecc_bytes - len; 728 memcpy(oob, tmp_buf + writesize + oob_skip, 729 len); 730 oob += len; 731 } 732 } 733 734 /* OOB free */ 735 len = oobsize - (oob - chip->oob_poi); 736 memcpy(oob, tmp_buf + size - len, len); 737 } 738 739 return 0; 740 } 741 742 static int denali_read_oob(struct nand_chip *chip, int page) 743 { 744 struct mtd_info *mtd = nand_to_mtd(chip); 745 746 denali_oob_xfer(mtd, chip, page, 0); 747 748 return 0; 749 } 750 751 static int denali_write_oob(struct nand_chip *chip, int page) 752 { 753 struct mtd_info *mtd = nand_to_mtd(chip); 754 755 denali_oob_xfer(mtd, chip, page, 1); 756 757 return nand_prog_page_end_op(chip); 758 } 759 760 static int denali_read_page(struct nand_chip *chip, uint8_t *buf, 761 int oob_required, int page) 762 { 763 struct mtd_info *mtd = nand_to_mtd(chip); 764 struct denali_nand_info *denali = mtd_to_denali(mtd); 765 unsigned long uncor_ecc_flags = 0; 766 int stat = 0; 767 int ret; 768 769 ret = denali_data_xfer(denali, buf, mtd->writesize, page, 0, 0); 770 if (ret && ret != -EBADMSG) 771 return ret; 772 773 if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) 774 stat = denali_hw_ecc_fixup(mtd, denali, &uncor_ecc_flags); 775 else if (ret == -EBADMSG) 776 stat = denali_sw_ecc_fixup(mtd, denali, &uncor_ecc_flags, buf); 777 778 if (stat < 0) 779 return stat; 780 781 if (uncor_ecc_flags) { 782 ret = denali_read_oob(chip, page); 783 if (ret) 784 return ret; 785 786 stat = denali_check_erased_page(mtd, chip, buf, 787 uncor_ecc_flags, stat); 788 } 789 790 return stat; 791 } 792 793 static int denali_write_page_raw(struct nand_chip *chip, const uint8_t *buf, 794 int oob_required, int page) 795 { 796 struct mtd_info *mtd = nand_to_mtd(chip); 797 struct denali_nand_info *denali = mtd_to_denali(mtd); 798 int writesize = mtd->writesize; 799 int oobsize = mtd->oobsize; 800 int ecc_steps = chip->ecc.steps; 801 int ecc_size = chip->ecc.size; 802 int ecc_bytes = chip->ecc.bytes; 803 void *tmp_buf = denali->buf; 804 int oob_skip = denali->oob_skip_bytes; 805 size_t size = writesize + oobsize; 806 int i, pos, len; 807 808 /* 809 * Fill the buffer with 0xff first except the full page transfer. 810 * This simplifies the logic. 811 */ 812 if (!buf || !oob_required) 813 memset(tmp_buf, 0xff, size); 814 815 /* Arrange the buffer for syndrome payload/ecc layout */ 816 if (buf) { 817 for (i = 0; i < ecc_steps; i++) { 818 pos = i * (ecc_size + ecc_bytes); 819 len = ecc_size; 820 821 if (pos >= writesize) 822 pos += oob_skip; 823 else if (pos + len > writesize) 824 len = writesize - pos; 825 826 memcpy(tmp_buf + pos, buf, len); 827 buf += len; 828 if (len < ecc_size) { 829 len = ecc_size - len; 830 memcpy(tmp_buf + writesize + oob_skip, buf, 831 len); 832 buf += len; 833 } 834 } 835 } 836 837 if (oob_required) { 838 const uint8_t *oob = chip->oob_poi; 839 840 /* BBM at the beginning of the OOB area */ 841 memcpy(tmp_buf + writesize, oob, oob_skip); 842 oob += oob_skip; 843 844 /* OOB ECC */ 845 for (i = 0; i < ecc_steps; i++) { 846 pos = ecc_size + i * (ecc_size + ecc_bytes); 847 len = ecc_bytes; 848 849 if (pos >= writesize) 850 pos += oob_skip; 851 else if (pos + len > writesize) 852 len = writesize - pos; 853 854 memcpy(tmp_buf + pos, oob, len); 855 oob += len; 856 if (len < ecc_bytes) { 857 len = ecc_bytes - len; 858 memcpy(tmp_buf + writesize + oob_skip, oob, 859 len); 860 oob += len; 861 } 862 } 863 864 /* OOB free */ 865 len = oobsize - (oob - chip->oob_poi); 866 memcpy(tmp_buf + size - len, oob, len); 867 } 868 869 return denali_data_xfer(denali, tmp_buf, size, page, 1, 1); 870 } 871 872 static int denali_write_page(struct nand_chip *chip, const uint8_t *buf, 873 int oob_required, int page) 874 { 875 struct mtd_info *mtd = nand_to_mtd(chip); 876 struct denali_nand_info *denali = mtd_to_denali(mtd); 877 878 return denali_data_xfer(denali, (void *)buf, mtd->writesize, 879 page, 0, 1); 880 } 881 882 static void denali_select_chip(struct nand_chip *chip, int cs) 883 { 884 struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip)); 885 886 denali->active_bank = cs; 887 } 888 889 static int denali_waitfunc(struct nand_chip *chip) 890 { 891 struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip)); 892 uint32_t irq_status; 893 894 /* R/B# pin transitioned from low to high? */ 895 irq_status = denali_wait_for_irq(denali, INTR__INT_ACT); 896 897 return irq_status & INTR__INT_ACT ? 0 : NAND_STATUS_FAIL; 898 } 899 900 static int denali_setup_data_interface(struct nand_chip *chip, int chipnr, 901 const struct nand_data_interface *conf) 902 { 903 struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip)); 904 const struct nand_sdr_timings *timings; 905 unsigned long t_x, mult_x; 906 int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data; 907 int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup; 908 int addr_2_data_mask; 909 uint32_t tmp; 910 911 timings = nand_get_sdr_timings(conf); 912 if (IS_ERR(timings)) 913 return PTR_ERR(timings); 914 915 /* clk_x period in picoseconds */ 916 t_x = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate); 917 if (!t_x) 918 return -EINVAL; 919 920 /* 921 * The bus interface clock, clk_x, is phase aligned with the core clock. 922 * The clk_x is an integral multiple N of the core clk. The value N is 923 * configured at IP delivery time, and its available value is 4, 5, 6. 924 */ 925 mult_x = DIV_ROUND_CLOSEST_ULL(denali->clk_x_rate, denali->clk_rate); 926 if (mult_x < 4 || mult_x > 6) 927 return -EINVAL; 928 929 if (chipnr == NAND_DATA_IFACE_CHECK_ONLY) 930 return 0; 931 932 /* tREA -> ACC_CLKS */ 933 acc_clks = DIV_ROUND_UP(timings->tREA_max, t_x); 934 acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE); 935 936 tmp = ioread32(denali->reg + ACC_CLKS); 937 tmp &= ~ACC_CLKS__VALUE; 938 tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks); 939 iowrite32(tmp, denali->reg + ACC_CLKS); 940 941 /* tRWH -> RE_2_WE */ 942 re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_x); 943 re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE); 944 945 tmp = ioread32(denali->reg + RE_2_WE); 946 tmp &= ~RE_2_WE__VALUE; 947 tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we); 948 iowrite32(tmp, denali->reg + RE_2_WE); 949 950 /* tRHZ -> RE_2_RE */ 951 re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_x); 952 re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE); 953 954 tmp = ioread32(denali->reg + RE_2_RE); 955 tmp &= ~RE_2_RE__VALUE; 956 tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re); 957 iowrite32(tmp, denali->reg + RE_2_RE); 958 959 /* 960 * tCCS, tWHR -> WE_2_RE 961 * 962 * With WE_2_RE properly set, the Denali controller automatically takes 963 * care of the delay; the driver need not set NAND_WAIT_TCCS. 964 */ 965 we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min), t_x); 966 we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE); 967 968 tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE); 969 tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE; 970 tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re); 971 iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE); 972 973 /* tADL -> ADDR_2_DATA */ 974 975 /* for older versions, ADDR_2_DATA is only 6 bit wide */ 976 addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA; 977 if (denali->revision < 0x0501) 978 addr_2_data_mask >>= 1; 979 980 addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_x); 981 addr_2_data = min_t(int, addr_2_data, addr_2_data_mask); 982 983 tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA); 984 tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA; 985 tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data); 986 iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA); 987 988 /* tREH, tWH -> RDWR_EN_HI_CNT */ 989 rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min), 990 t_x); 991 rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE); 992 993 tmp = ioread32(denali->reg + RDWR_EN_HI_CNT); 994 tmp &= ~RDWR_EN_HI_CNT__VALUE; 995 tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi); 996 iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT); 997 998 /* tRP, tWP -> RDWR_EN_LO_CNT */ 999 rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min), t_x); 1000 rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min), 1001 t_x); 1002 rdwr_en_lo_hi = max_t(int, rdwr_en_lo_hi, mult_x); 1003 rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi); 1004 rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE); 1005 1006 tmp = ioread32(denali->reg + RDWR_EN_LO_CNT); 1007 tmp &= ~RDWR_EN_LO_CNT__VALUE; 1008 tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo); 1009 iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT); 1010 1011 /* tCS, tCEA -> CS_SETUP_CNT */ 1012 cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_x) - rdwr_en_lo, 1013 (int)DIV_ROUND_UP(timings->tCEA_max, t_x) - acc_clks, 1014 0); 1015 cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE); 1016 1017 tmp = ioread32(denali->reg + CS_SETUP_CNT); 1018 tmp &= ~CS_SETUP_CNT__VALUE; 1019 tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup); 1020 iowrite32(tmp, denali->reg + CS_SETUP_CNT); 1021 1022 return 0; 1023 } 1024 1025 static void denali_hw_init(struct denali_nand_info *denali) 1026 { 1027 /* 1028 * The REVISION register may not be reliable. Platforms are allowed to 1029 * override it. 1030 */ 1031 if (!denali->revision) 1032 denali->revision = swab16(ioread32(denali->reg + REVISION)); 1033 1034 /* 1035 * Set how many bytes should be skipped before writing data in OOB. 1036 * If a non-zero value has already been set (by firmware or something), 1037 * just use it. Otherwise, set the driver default. 1038 */ 1039 denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES); 1040 if (!denali->oob_skip_bytes) { 1041 denali->oob_skip_bytes = DENALI_DEFAULT_OOB_SKIP_BYTES; 1042 iowrite32(denali->oob_skip_bytes, 1043 denali->reg + SPARE_AREA_SKIP_BYTES); 1044 } 1045 1046 denali_detect_max_banks(denali); 1047 iowrite32(0x0F, denali->reg + RB_PIN_ENABLED); 1048 iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE); 1049 1050 iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER); 1051 } 1052 1053 int denali_calc_ecc_bytes(int step_size, int strength) 1054 { 1055 /* BCH code. Denali requires ecc.bytes to be multiple of 2 */ 1056 return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2; 1057 } 1058 EXPORT_SYMBOL(denali_calc_ecc_bytes); 1059 1060 static int denali_ooblayout_ecc(struct mtd_info *mtd, int section, 1061 struct mtd_oob_region *oobregion) 1062 { 1063 struct denali_nand_info *denali = mtd_to_denali(mtd); 1064 struct nand_chip *chip = mtd_to_nand(mtd); 1065 1066 if (section) 1067 return -ERANGE; 1068 1069 oobregion->offset = denali->oob_skip_bytes; 1070 oobregion->length = chip->ecc.total; 1071 1072 return 0; 1073 } 1074 1075 static int denali_ooblayout_free(struct mtd_info *mtd, int section, 1076 struct mtd_oob_region *oobregion) 1077 { 1078 struct denali_nand_info *denali = mtd_to_denali(mtd); 1079 struct nand_chip *chip = mtd_to_nand(mtd); 1080 1081 if (section) 1082 return -ERANGE; 1083 1084 oobregion->offset = chip->ecc.total + denali->oob_skip_bytes; 1085 oobregion->length = mtd->oobsize - oobregion->offset; 1086 1087 return 0; 1088 } 1089 1090 static const struct mtd_ooblayout_ops denali_ooblayout_ops = { 1091 .ecc = denali_ooblayout_ecc, 1092 .free = denali_ooblayout_free, 1093 }; 1094 1095 static int denali_multidev_fixup(struct denali_nand_info *denali) 1096 { 1097 struct nand_chip *chip = &denali->nand; 1098 struct mtd_info *mtd = nand_to_mtd(chip); 1099 1100 /* 1101 * Support for multi device: 1102 * When the IP configuration is x16 capable and two x8 chips are 1103 * connected in parallel, DEVICES_CONNECTED should be set to 2. 1104 * In this case, the core framework knows nothing about this fact, 1105 * so we should tell it the _logical_ pagesize and anything necessary. 1106 */ 1107 denali->devs_per_cs = ioread32(denali->reg + DEVICES_CONNECTED); 1108 1109 /* 1110 * On some SoCs, DEVICES_CONNECTED is not auto-detected. 1111 * For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case. 1112 */ 1113 if (denali->devs_per_cs == 0) { 1114 denali->devs_per_cs = 1; 1115 iowrite32(1, denali->reg + DEVICES_CONNECTED); 1116 } 1117 1118 if (denali->devs_per_cs == 1) 1119 return 0; 1120 1121 if (denali->devs_per_cs != 2) { 1122 dev_err(denali->dev, "unsupported number of devices %d\n", 1123 denali->devs_per_cs); 1124 return -EINVAL; 1125 } 1126 1127 /* 2 chips in parallel */ 1128 mtd->size <<= 1; 1129 mtd->erasesize <<= 1; 1130 mtd->writesize <<= 1; 1131 mtd->oobsize <<= 1; 1132 chip->chipsize <<= 1; 1133 chip->page_shift += 1; 1134 chip->phys_erase_shift += 1; 1135 chip->bbt_erase_shift += 1; 1136 chip->chip_shift += 1; 1137 chip->pagemask <<= 1; 1138 chip->ecc.size <<= 1; 1139 chip->ecc.bytes <<= 1; 1140 chip->ecc.strength <<= 1; 1141 denali->oob_skip_bytes <<= 1; 1142 1143 return 0; 1144 } 1145 1146 static int denali_attach_chip(struct nand_chip *chip) 1147 { 1148 struct mtd_info *mtd = nand_to_mtd(chip); 1149 struct denali_nand_info *denali = mtd_to_denali(mtd); 1150 int ret; 1151 1152 if (ioread32(denali->reg + FEATURES) & FEATURES__DMA) 1153 denali->dma_avail = 1; 1154 1155 if (denali->dma_avail) { 1156 int dma_bit = denali->caps & DENALI_CAP_DMA_64BIT ? 64 : 32; 1157 1158 ret = dma_set_mask(denali->dev, DMA_BIT_MASK(dma_bit)); 1159 if (ret) { 1160 dev_info(denali->dev, 1161 "Failed to set DMA mask. Disabling DMA.\n"); 1162 denali->dma_avail = 0; 1163 } 1164 } 1165 1166 if (denali->dma_avail) { 1167 chip->options |= NAND_USE_BOUNCE_BUFFER; 1168 chip->buf_align = 16; 1169 if (denali->caps & DENALI_CAP_DMA_64BIT) 1170 denali->setup_dma = denali_setup_dma64; 1171 else 1172 denali->setup_dma = denali_setup_dma32; 1173 } 1174 1175 chip->bbt_options |= NAND_BBT_USE_FLASH; 1176 chip->bbt_options |= NAND_BBT_NO_OOB; 1177 chip->ecc.mode = NAND_ECC_HW_SYNDROME; 1178 chip->options |= NAND_NO_SUBPAGE_WRITE; 1179 1180 ret = nand_ecc_choose_conf(chip, denali->ecc_caps, 1181 mtd->oobsize - denali->oob_skip_bytes); 1182 if (ret) { 1183 dev_err(denali->dev, "Failed to setup ECC settings.\n"); 1184 return ret; 1185 } 1186 1187 dev_dbg(denali->dev, 1188 "chosen ECC settings: step=%d, strength=%d, bytes=%d\n", 1189 chip->ecc.size, chip->ecc.strength, chip->ecc.bytes); 1190 1191 iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) | 1192 FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength), 1193 denali->reg + ECC_CORRECTION); 1194 iowrite32(mtd->erasesize / mtd->writesize, 1195 denali->reg + PAGES_PER_BLOCK); 1196 iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0, 1197 denali->reg + DEVICE_WIDTH); 1198 iowrite32(chip->options & NAND_ROW_ADDR_3 ? 0 : TWO_ROW_ADDR_CYCLES__FLAG, 1199 denali->reg + TWO_ROW_ADDR_CYCLES); 1200 iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE); 1201 iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE); 1202 1203 iowrite32(chip->ecc.size, denali->reg + CFG_DATA_BLOCK_SIZE); 1204 iowrite32(chip->ecc.size, denali->reg + CFG_LAST_DATA_BLOCK_SIZE); 1205 /* chip->ecc.steps is set by nand_scan_tail(); not available here */ 1206 iowrite32(mtd->writesize / chip->ecc.size, 1207 denali->reg + CFG_NUM_DATA_BLOCKS); 1208 1209 mtd_set_ooblayout(mtd, &denali_ooblayout_ops); 1210 1211 if (chip->options & NAND_BUSWIDTH_16) { 1212 chip->legacy.read_buf = denali_read_buf16; 1213 chip->legacy.write_buf = denali_write_buf16; 1214 } else { 1215 chip->legacy.read_buf = denali_read_buf; 1216 chip->legacy.write_buf = denali_write_buf; 1217 } 1218 chip->ecc.read_page = denali_read_page; 1219 chip->ecc.read_page_raw = denali_read_page_raw; 1220 chip->ecc.write_page = denali_write_page; 1221 chip->ecc.write_page_raw = denali_write_page_raw; 1222 chip->ecc.read_oob = denali_read_oob; 1223 chip->ecc.write_oob = denali_write_oob; 1224 1225 ret = denali_multidev_fixup(denali); 1226 if (ret) 1227 return ret; 1228 1229 /* 1230 * This buffer is DMA-mapped by denali_{read,write}_page_raw. Do not 1231 * use devm_kmalloc() because the memory allocated by devm_ does not 1232 * guarantee DMA-safe alignment. 1233 */ 1234 denali->buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL); 1235 if (!denali->buf) 1236 return -ENOMEM; 1237 1238 return 0; 1239 } 1240 1241 static void denali_detach_chip(struct nand_chip *chip) 1242 { 1243 struct mtd_info *mtd = nand_to_mtd(chip); 1244 struct denali_nand_info *denali = mtd_to_denali(mtd); 1245 1246 kfree(denali->buf); 1247 } 1248 1249 static const struct nand_controller_ops denali_controller_ops = { 1250 .attach_chip = denali_attach_chip, 1251 .detach_chip = denali_detach_chip, 1252 .setup_data_interface = denali_setup_data_interface, 1253 }; 1254 1255 int denali_init(struct denali_nand_info *denali) 1256 { 1257 struct nand_chip *chip = &denali->nand; 1258 struct mtd_info *mtd = nand_to_mtd(chip); 1259 u32 features = ioread32(denali->reg + FEATURES); 1260 int ret; 1261 1262 mtd->dev.parent = denali->dev; 1263 denali_hw_init(denali); 1264 1265 init_completion(&denali->complete); 1266 spin_lock_init(&denali->irq_lock); 1267 1268 denali_clear_irq_all(denali); 1269 1270 ret = devm_request_irq(denali->dev, denali->irq, denali_isr, 1271 IRQF_SHARED, DENALI_NAND_NAME, denali); 1272 if (ret) { 1273 dev_err(denali->dev, "Unable to request IRQ\n"); 1274 return ret; 1275 } 1276 1277 denali_enable_irq(denali); 1278 1279 denali->active_bank = DENALI_INVALID_BANK; 1280 1281 nand_set_flash_node(chip, denali->dev->of_node); 1282 /* Fallback to the default name if DT did not give "label" property */ 1283 if (!mtd->name) 1284 mtd->name = "denali-nand"; 1285 1286 chip->legacy.select_chip = denali_select_chip; 1287 chip->legacy.read_byte = denali_read_byte; 1288 chip->legacy.write_byte = denali_write_byte; 1289 chip->legacy.cmd_ctrl = denali_cmd_ctrl; 1290 chip->legacy.waitfunc = denali_waitfunc; 1291 1292 if (features & FEATURES__INDEX_ADDR) { 1293 denali->host_read = denali_indexed_read; 1294 denali->host_write = denali_indexed_write; 1295 } else { 1296 denali->host_read = denali_direct_read; 1297 denali->host_write = denali_direct_write; 1298 } 1299 1300 /* clk rate info is needed for setup_data_interface */ 1301 if (!denali->clk_rate || !denali->clk_x_rate) 1302 chip->options |= NAND_KEEP_TIMINGS; 1303 1304 chip->legacy.dummy_controller.ops = &denali_controller_ops; 1305 ret = nand_scan(chip, denali->max_banks); 1306 if (ret) 1307 goto disable_irq; 1308 1309 ret = mtd_device_register(mtd, NULL, 0); 1310 if (ret) { 1311 dev_err(denali->dev, "Failed to register MTD: %d\n", ret); 1312 goto cleanup_nand; 1313 } 1314 1315 return 0; 1316 1317 cleanup_nand: 1318 nand_cleanup(chip); 1319 disable_irq: 1320 denali_disable_irq(denali); 1321 1322 return ret; 1323 } 1324 EXPORT_SYMBOL(denali_init); 1325 1326 void denali_remove(struct denali_nand_info *denali) 1327 { 1328 nand_release(&denali->nand); 1329 denali_disable_irq(denali); 1330 } 1331 EXPORT_SYMBOL(denali_remove); 1332 1333 MODULE_DESCRIPTION("Driver core for Denali NAND controller"); 1334 MODULE_AUTHOR("Intel Corporation and its suppliers"); 1335 MODULE_LICENSE("GPL v2"); 1336