1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2018 Stefan Agner <stefan@agner.ch> 4 * Copyright (C) 2014-2015 Lucas Stach <dev@lynxeye.de> 5 * Copyright (C) 2012 Avionic Design GmbH 6 */ 7 8 #include <linux/clk.h> 9 #include <linux/completion.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/err.h> 12 #include <linux/gpio/consumer.h> 13 #include <linux/interrupt.h> 14 #include <linux/io.h> 15 #include <linux/module.h> 16 #include <linux/mtd/partitions.h> 17 #include <linux/mtd/rawnand.h> 18 #include <linux/of.h> 19 #include <linux/platform_device.h> 20 #include <linux/pm_runtime.h> 21 #include <linux/reset.h> 22 23 #include <soc/tegra/common.h> 24 25 #define COMMAND 0x00 26 #define COMMAND_GO BIT(31) 27 #define COMMAND_CLE BIT(30) 28 #define COMMAND_ALE BIT(29) 29 #define COMMAND_PIO BIT(28) 30 #define COMMAND_TX BIT(27) 31 #define COMMAND_RX BIT(26) 32 #define COMMAND_SEC_CMD BIT(25) 33 #define COMMAND_AFT_DAT BIT(24) 34 #define COMMAND_TRANS_SIZE(size) ((((size) - 1) & 0xf) << 20) 35 #define COMMAND_A_VALID BIT(19) 36 #define COMMAND_B_VALID BIT(18) 37 #define COMMAND_RD_STATUS_CHK BIT(17) 38 #define COMMAND_RBSY_CHK BIT(16) 39 #define COMMAND_CE(x) BIT(8 + ((x) & 0x7)) 40 #define COMMAND_CLE_SIZE(size) ((((size) - 1) & 0x3) << 4) 41 #define COMMAND_ALE_SIZE(size) ((((size) - 1) & 0xf) << 0) 42 43 #define STATUS 0x04 44 45 #define ISR 0x08 46 #define ISR_CORRFAIL_ERR BIT(24) 47 #define ISR_UND BIT(7) 48 #define ISR_OVR BIT(6) 49 #define ISR_CMD_DONE BIT(5) 50 #define ISR_ECC_ERR BIT(4) 51 52 #define IER 0x0c 53 #define IER_ERR_TRIG_VAL(x) (((x) & 0xf) << 16) 54 #define IER_UND BIT(7) 55 #define IER_OVR BIT(6) 56 #define IER_CMD_DONE BIT(5) 57 #define IER_ECC_ERR BIT(4) 58 #define IER_GIE BIT(0) 59 60 #define CONFIG 0x10 61 #define CONFIG_HW_ECC BIT(31) 62 #define CONFIG_ECC_SEL BIT(30) 63 #define CONFIG_ERR_COR BIT(29) 64 #define CONFIG_PIPE_EN BIT(28) 65 #define CONFIG_TVAL_4 (0 << 24) 66 #define CONFIG_TVAL_6 (1 << 24) 67 #define CONFIG_TVAL_8 (2 << 24) 68 #define CONFIG_SKIP_SPARE BIT(23) 69 #define CONFIG_BUS_WIDTH_16 BIT(21) 70 #define CONFIG_COM_BSY BIT(20) 71 #define CONFIG_PS_256 (0 << 16) 72 #define CONFIG_PS_512 (1 << 16) 73 #define CONFIG_PS_1024 (2 << 16) 74 #define CONFIG_PS_2048 (3 << 16) 75 #define CONFIG_PS_4096 (4 << 16) 76 #define CONFIG_SKIP_SPARE_SIZE_4 (0 << 14) 77 #define CONFIG_SKIP_SPARE_SIZE_8 (1 << 14) 78 #define CONFIG_SKIP_SPARE_SIZE_12 (2 << 14) 79 #define CONFIG_SKIP_SPARE_SIZE_16 (3 << 14) 80 #define CONFIG_TAG_BYTE_SIZE(x) ((x) & 0xff) 81 82 #define TIMING_1 0x14 83 #define TIMING_TRP_RESP(x) (((x) & 0xf) << 28) 84 #define TIMING_TWB(x) (((x) & 0xf) << 24) 85 #define TIMING_TCR_TAR_TRR(x) (((x) & 0xf) << 20) 86 #define TIMING_TWHR(x) (((x) & 0xf) << 16) 87 #define TIMING_TCS(x) (((x) & 0x3) << 14) 88 #define TIMING_TWH(x) (((x) & 0x3) << 12) 89 #define TIMING_TWP(x) (((x) & 0xf) << 8) 90 #define TIMING_TRH(x) (((x) & 0x3) << 4) 91 #define TIMING_TRP(x) (((x) & 0xf) << 0) 92 93 #define RESP 0x18 94 95 #define TIMING_2 0x1c 96 #define TIMING_TADL(x) ((x) & 0xf) 97 98 #define CMD_REG1 0x20 99 #define CMD_REG2 0x24 100 #define ADDR_REG1 0x28 101 #define ADDR_REG2 0x2c 102 103 #define DMA_MST_CTRL 0x30 104 #define DMA_MST_CTRL_GO BIT(31) 105 #define DMA_MST_CTRL_IN (0 << 30) 106 #define DMA_MST_CTRL_OUT BIT(30) 107 #define DMA_MST_CTRL_PERF_EN BIT(29) 108 #define DMA_MST_CTRL_IE_DONE BIT(28) 109 #define DMA_MST_CTRL_REUSE BIT(27) 110 #define DMA_MST_CTRL_BURST_1 (2 << 24) 111 #define DMA_MST_CTRL_BURST_4 (3 << 24) 112 #define DMA_MST_CTRL_BURST_8 (4 << 24) 113 #define DMA_MST_CTRL_BURST_16 (5 << 24) 114 #define DMA_MST_CTRL_IS_DONE BIT(20) 115 #define DMA_MST_CTRL_EN_A BIT(2) 116 #define DMA_MST_CTRL_EN_B BIT(1) 117 118 #define DMA_CFG_A 0x34 119 #define DMA_CFG_B 0x38 120 121 #define FIFO_CTRL 0x3c 122 #define FIFO_CTRL_CLR_ALL BIT(3) 123 124 #define DATA_PTR 0x40 125 #define TAG_PTR 0x44 126 #define ECC_PTR 0x48 127 128 #define DEC_STATUS 0x4c 129 #define DEC_STATUS_A_ECC_FAIL BIT(1) 130 #define DEC_STATUS_ERR_COUNT_MASK 0x00ff0000 131 #define DEC_STATUS_ERR_COUNT_SHIFT 16 132 133 #define HWSTATUS_CMD 0x50 134 #define HWSTATUS_MASK 0x54 135 #define HWSTATUS_RDSTATUS_MASK(x) (((x) & 0xff) << 24) 136 #define HWSTATUS_RDSTATUS_VALUE(x) (((x) & 0xff) << 16) 137 #define HWSTATUS_RBSY_MASK(x) (((x) & 0xff) << 8) 138 #define HWSTATUS_RBSY_VALUE(x) (((x) & 0xff) << 0) 139 140 #define BCH_CONFIG 0xcc 141 #define BCH_ENABLE BIT(0) 142 #define BCH_TVAL_4 (0 << 4) 143 #define BCH_TVAL_8 (1 << 4) 144 #define BCH_TVAL_14 (2 << 4) 145 #define BCH_TVAL_16 (3 << 4) 146 147 #define DEC_STAT_RESULT 0xd0 148 #define DEC_STAT_BUF 0xd4 149 #define DEC_STAT_BUF_FAIL_SEC_FLAG_MASK 0xff000000 150 #define DEC_STAT_BUF_FAIL_SEC_FLAG_SHIFT 24 151 #define DEC_STAT_BUF_CORR_SEC_FLAG_MASK 0x00ff0000 152 #define DEC_STAT_BUF_CORR_SEC_FLAG_SHIFT 16 153 #define DEC_STAT_BUF_MAX_CORR_CNT_MASK 0x00001f00 154 #define DEC_STAT_BUF_MAX_CORR_CNT_SHIFT 8 155 156 #define OFFSET(val, off) ((val) < (off) ? 0 : (val) - (off)) 157 158 #define SKIP_SPARE_BYTES 4 159 #define BITS_PER_STEP_RS 18 160 #define BITS_PER_STEP_BCH 13 161 162 #define INT_MASK (IER_UND | IER_OVR | IER_CMD_DONE | IER_GIE) 163 #define HWSTATUS_CMD_DEFAULT NAND_STATUS_READY 164 #define HWSTATUS_MASK_DEFAULT (HWSTATUS_RDSTATUS_MASK(1) | \ 165 HWSTATUS_RDSTATUS_VALUE(0) | \ 166 HWSTATUS_RBSY_MASK(NAND_STATUS_READY) | \ 167 HWSTATUS_RBSY_VALUE(NAND_STATUS_READY)) 168 169 struct tegra_nand_controller { 170 struct nand_controller controller; 171 struct device *dev; 172 void __iomem *regs; 173 int irq; 174 struct clk *clk; 175 struct completion command_complete; 176 struct completion dma_complete; 177 bool last_read_error; 178 int cur_cs; 179 struct nand_chip *chip; 180 }; 181 182 struct tegra_nand_chip { 183 struct nand_chip chip; 184 struct gpio_desc *wp_gpio; 185 struct mtd_oob_region ecc; 186 u32 config; 187 u32 config_ecc; 188 u32 bch_config; 189 int cs[1]; 190 }; 191 192 static inline struct tegra_nand_controller * 193 to_tegra_ctrl(struct nand_controller *hw_ctrl) 194 { 195 return container_of(hw_ctrl, struct tegra_nand_controller, controller); 196 } 197 198 static inline struct tegra_nand_chip *to_tegra_chip(struct nand_chip *chip) 199 { 200 return container_of(chip, struct tegra_nand_chip, chip); 201 } 202 203 static int tegra_nand_ooblayout_rs_ecc(struct mtd_info *mtd, int section, 204 struct mtd_oob_region *oobregion) 205 { 206 struct nand_chip *chip = mtd_to_nand(mtd); 207 int bytes_per_step = DIV_ROUND_UP(BITS_PER_STEP_RS * chip->ecc.strength, 208 BITS_PER_BYTE); 209 210 if (section > 0) 211 return -ERANGE; 212 213 oobregion->offset = SKIP_SPARE_BYTES; 214 oobregion->length = round_up(bytes_per_step * chip->ecc.steps, 4); 215 216 return 0; 217 } 218 219 static int tegra_nand_ooblayout_no_free(struct mtd_info *mtd, int section, 220 struct mtd_oob_region *oobregion) 221 { 222 return -ERANGE; 223 } 224 225 static const struct mtd_ooblayout_ops tegra_nand_oob_rs_ops = { 226 .ecc = tegra_nand_ooblayout_rs_ecc, 227 .free = tegra_nand_ooblayout_no_free, 228 }; 229 230 static int tegra_nand_ooblayout_bch_ecc(struct mtd_info *mtd, int section, 231 struct mtd_oob_region *oobregion) 232 { 233 struct nand_chip *chip = mtd_to_nand(mtd); 234 int bytes_per_step = DIV_ROUND_UP(BITS_PER_STEP_BCH * chip->ecc.strength, 235 BITS_PER_BYTE); 236 237 if (section > 0) 238 return -ERANGE; 239 240 oobregion->offset = SKIP_SPARE_BYTES; 241 oobregion->length = round_up(bytes_per_step * chip->ecc.steps, 4); 242 243 return 0; 244 } 245 246 static const struct mtd_ooblayout_ops tegra_nand_oob_bch_ops = { 247 .ecc = tegra_nand_ooblayout_bch_ecc, 248 .free = tegra_nand_ooblayout_no_free, 249 }; 250 251 static irqreturn_t tegra_nand_irq(int irq, void *data) 252 { 253 struct tegra_nand_controller *ctrl = data; 254 u32 isr, dma; 255 256 isr = readl_relaxed(ctrl->regs + ISR); 257 dma = readl_relaxed(ctrl->regs + DMA_MST_CTRL); 258 dev_dbg(ctrl->dev, "isr %08x\n", isr); 259 260 if (!isr && !(dma & DMA_MST_CTRL_IS_DONE)) 261 return IRQ_NONE; 262 263 /* 264 * The bit name is somewhat missleading: This is also set when 265 * HW ECC was successful. The data sheet states: 266 * Correctable OR Un-correctable errors occurred in the DMA transfer... 267 */ 268 if (isr & ISR_CORRFAIL_ERR) 269 ctrl->last_read_error = true; 270 271 if (isr & ISR_CMD_DONE) 272 complete(&ctrl->command_complete); 273 274 if (isr & ISR_UND) 275 dev_err(ctrl->dev, "FIFO underrun\n"); 276 277 if (isr & ISR_OVR) 278 dev_err(ctrl->dev, "FIFO overrun\n"); 279 280 /* handle DMA interrupts */ 281 if (dma & DMA_MST_CTRL_IS_DONE) { 282 writel_relaxed(dma, ctrl->regs + DMA_MST_CTRL); 283 complete(&ctrl->dma_complete); 284 } 285 286 /* clear interrupts */ 287 writel_relaxed(isr, ctrl->regs + ISR); 288 289 return IRQ_HANDLED; 290 } 291 292 static const char * const tegra_nand_reg_names[] = { 293 "COMMAND", 294 "STATUS", 295 "ISR", 296 "IER", 297 "CONFIG", 298 "TIMING", 299 NULL, 300 "TIMING2", 301 "CMD_REG1", 302 "CMD_REG2", 303 "ADDR_REG1", 304 "ADDR_REG2", 305 "DMA_MST_CTRL", 306 "DMA_CFG_A", 307 "DMA_CFG_B", 308 "FIFO_CTRL", 309 }; 310 311 static void tegra_nand_dump_reg(struct tegra_nand_controller *ctrl) 312 { 313 u32 reg; 314 int i; 315 316 dev_err(ctrl->dev, "Tegra NAND controller register dump\n"); 317 for (i = 0; i < ARRAY_SIZE(tegra_nand_reg_names); i++) { 318 const char *reg_name = tegra_nand_reg_names[i]; 319 320 if (!reg_name) 321 continue; 322 323 reg = readl_relaxed(ctrl->regs + (i * 4)); 324 dev_err(ctrl->dev, "%s: 0x%08x\n", reg_name, reg); 325 } 326 } 327 328 static void tegra_nand_controller_abort(struct tegra_nand_controller *ctrl) 329 { 330 u32 isr, dma; 331 332 disable_irq(ctrl->irq); 333 334 /* Abort current command/DMA operation */ 335 writel_relaxed(0, ctrl->regs + DMA_MST_CTRL); 336 writel_relaxed(0, ctrl->regs + COMMAND); 337 338 /* clear interrupts */ 339 isr = readl_relaxed(ctrl->regs + ISR); 340 writel_relaxed(isr, ctrl->regs + ISR); 341 dma = readl_relaxed(ctrl->regs + DMA_MST_CTRL); 342 writel_relaxed(dma, ctrl->regs + DMA_MST_CTRL); 343 344 reinit_completion(&ctrl->command_complete); 345 reinit_completion(&ctrl->dma_complete); 346 347 enable_irq(ctrl->irq); 348 } 349 350 static int tegra_nand_cmd(struct nand_chip *chip, 351 const struct nand_subop *subop) 352 { 353 const struct nand_op_instr *instr; 354 const struct nand_op_instr *instr_data_in = NULL; 355 struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller); 356 unsigned int op_id, size = 0, offset = 0; 357 bool first_cmd = true; 358 u32 reg, cmd = 0; 359 int ret; 360 361 for (op_id = 0; op_id < subop->ninstrs; op_id++) { 362 unsigned int naddrs, i; 363 const u8 *addrs; 364 u32 addr1 = 0, addr2 = 0; 365 366 instr = &subop->instrs[op_id]; 367 368 switch (instr->type) { 369 case NAND_OP_CMD_INSTR: 370 if (first_cmd) { 371 cmd |= COMMAND_CLE; 372 writel_relaxed(instr->ctx.cmd.opcode, 373 ctrl->regs + CMD_REG1); 374 } else { 375 cmd |= COMMAND_SEC_CMD; 376 writel_relaxed(instr->ctx.cmd.opcode, 377 ctrl->regs + CMD_REG2); 378 } 379 first_cmd = false; 380 break; 381 382 case NAND_OP_ADDR_INSTR: 383 offset = nand_subop_get_addr_start_off(subop, op_id); 384 naddrs = nand_subop_get_num_addr_cyc(subop, op_id); 385 addrs = &instr->ctx.addr.addrs[offset]; 386 387 cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(naddrs); 388 for (i = 0; i < min_t(unsigned int, 4, naddrs); i++) 389 addr1 |= *addrs++ << (BITS_PER_BYTE * i); 390 naddrs -= i; 391 for (i = 0; i < min_t(unsigned int, 4, naddrs); i++) 392 addr2 |= *addrs++ << (BITS_PER_BYTE * i); 393 394 writel_relaxed(addr1, ctrl->regs + ADDR_REG1); 395 writel_relaxed(addr2, ctrl->regs + ADDR_REG2); 396 break; 397 398 case NAND_OP_DATA_IN_INSTR: 399 size = nand_subop_get_data_len(subop, op_id); 400 offset = nand_subop_get_data_start_off(subop, op_id); 401 402 cmd |= COMMAND_TRANS_SIZE(size) | COMMAND_PIO | 403 COMMAND_RX | COMMAND_A_VALID; 404 405 instr_data_in = instr; 406 break; 407 408 case NAND_OP_DATA_OUT_INSTR: 409 size = nand_subop_get_data_len(subop, op_id); 410 offset = nand_subop_get_data_start_off(subop, op_id); 411 412 cmd |= COMMAND_TRANS_SIZE(size) | COMMAND_PIO | 413 COMMAND_TX | COMMAND_A_VALID; 414 memcpy(®, instr->ctx.data.buf.out + offset, size); 415 416 writel_relaxed(reg, ctrl->regs + RESP); 417 break; 418 419 case NAND_OP_WAITRDY_INSTR: 420 cmd |= COMMAND_RBSY_CHK; 421 break; 422 } 423 } 424 425 cmd |= COMMAND_GO | COMMAND_CE(ctrl->cur_cs); 426 writel_relaxed(cmd, ctrl->regs + COMMAND); 427 ret = wait_for_completion_timeout(&ctrl->command_complete, 428 msecs_to_jiffies(500)); 429 if (!ret) { 430 dev_err(ctrl->dev, "COMMAND timeout\n"); 431 tegra_nand_dump_reg(ctrl); 432 tegra_nand_controller_abort(ctrl); 433 return -ETIMEDOUT; 434 } 435 436 if (instr_data_in) { 437 reg = readl_relaxed(ctrl->regs + RESP); 438 memcpy(instr_data_in->ctx.data.buf.in + offset, ®, size); 439 } 440 441 return 0; 442 } 443 444 static const struct nand_op_parser tegra_nand_op_parser = NAND_OP_PARSER( 445 NAND_OP_PARSER_PATTERN(tegra_nand_cmd, 446 NAND_OP_PARSER_PAT_CMD_ELEM(true), 447 NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8), 448 NAND_OP_PARSER_PAT_CMD_ELEM(true), 449 NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)), 450 NAND_OP_PARSER_PATTERN(tegra_nand_cmd, 451 NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 4)), 452 NAND_OP_PARSER_PATTERN(tegra_nand_cmd, 453 NAND_OP_PARSER_PAT_CMD_ELEM(true), 454 NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8), 455 NAND_OP_PARSER_PAT_CMD_ELEM(true), 456 NAND_OP_PARSER_PAT_WAITRDY_ELEM(true), 457 NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 4)), 458 ); 459 460 static void tegra_nand_select_target(struct nand_chip *chip, 461 unsigned int die_nr) 462 { 463 struct tegra_nand_chip *nand = to_tegra_chip(chip); 464 struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller); 465 466 ctrl->cur_cs = nand->cs[die_nr]; 467 } 468 469 static int tegra_nand_exec_op(struct nand_chip *chip, 470 const struct nand_operation *op, 471 bool check_only) 472 { 473 if (!check_only) 474 tegra_nand_select_target(chip, op->cs); 475 476 return nand_op_parser_exec_op(chip, &tegra_nand_op_parser, op, 477 check_only); 478 } 479 480 static void tegra_nand_hw_ecc(struct tegra_nand_controller *ctrl, 481 struct nand_chip *chip, bool enable) 482 { 483 struct tegra_nand_chip *nand = to_tegra_chip(chip); 484 485 if (chip->ecc.algo == NAND_ECC_ALGO_BCH && enable) 486 writel_relaxed(nand->bch_config, ctrl->regs + BCH_CONFIG); 487 else 488 writel_relaxed(0, ctrl->regs + BCH_CONFIG); 489 490 if (enable) 491 writel_relaxed(nand->config_ecc, ctrl->regs + CONFIG); 492 else 493 writel_relaxed(nand->config, ctrl->regs + CONFIG); 494 } 495 496 static int tegra_nand_page_xfer(struct mtd_info *mtd, struct nand_chip *chip, 497 void *buf, void *oob_buf, int oob_len, int page, 498 bool read) 499 { 500 struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller); 501 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 502 dma_addr_t dma_addr = 0, dma_addr_oob = 0; 503 u32 addr1, cmd, dma_ctrl; 504 int ret; 505 506 tegra_nand_select_target(chip, chip->cur_cs); 507 508 if (read) { 509 writel_relaxed(NAND_CMD_READ0, ctrl->regs + CMD_REG1); 510 writel_relaxed(NAND_CMD_READSTART, ctrl->regs + CMD_REG2); 511 } else { 512 writel_relaxed(NAND_CMD_SEQIN, ctrl->regs + CMD_REG1); 513 writel_relaxed(NAND_CMD_PAGEPROG, ctrl->regs + CMD_REG2); 514 } 515 cmd = COMMAND_CLE | COMMAND_SEC_CMD; 516 517 /* Lower 16-bits are column, by default 0 */ 518 addr1 = page << 16; 519 520 if (!buf) 521 addr1 |= mtd->writesize; 522 writel_relaxed(addr1, ctrl->regs + ADDR_REG1); 523 524 if (chip->options & NAND_ROW_ADDR_3) { 525 writel_relaxed(page >> 16, ctrl->regs + ADDR_REG2); 526 cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(5); 527 } else { 528 cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(4); 529 } 530 531 if (buf) { 532 dma_addr = dma_map_single(ctrl->dev, buf, mtd->writesize, dir); 533 ret = dma_mapping_error(ctrl->dev, dma_addr); 534 if (ret) { 535 dev_err(ctrl->dev, "dma mapping error\n"); 536 return -EINVAL; 537 } 538 539 writel_relaxed(mtd->writesize - 1, ctrl->regs + DMA_CFG_A); 540 writel_relaxed(dma_addr, ctrl->regs + DATA_PTR); 541 } 542 543 if (oob_buf) { 544 dma_addr_oob = dma_map_single(ctrl->dev, oob_buf, mtd->oobsize, 545 dir); 546 ret = dma_mapping_error(ctrl->dev, dma_addr_oob); 547 if (ret) { 548 dev_err(ctrl->dev, "dma mapping error\n"); 549 ret = -EINVAL; 550 goto err_unmap_dma_page; 551 } 552 553 writel_relaxed(oob_len - 1, ctrl->regs + DMA_CFG_B); 554 writel_relaxed(dma_addr_oob, ctrl->regs + TAG_PTR); 555 } 556 557 dma_ctrl = DMA_MST_CTRL_GO | DMA_MST_CTRL_PERF_EN | 558 DMA_MST_CTRL_IE_DONE | DMA_MST_CTRL_IS_DONE | 559 DMA_MST_CTRL_BURST_16; 560 561 if (buf) 562 dma_ctrl |= DMA_MST_CTRL_EN_A; 563 if (oob_buf) 564 dma_ctrl |= DMA_MST_CTRL_EN_B; 565 566 if (read) 567 dma_ctrl |= DMA_MST_CTRL_IN | DMA_MST_CTRL_REUSE; 568 else 569 dma_ctrl |= DMA_MST_CTRL_OUT; 570 571 writel_relaxed(dma_ctrl, ctrl->regs + DMA_MST_CTRL); 572 573 cmd |= COMMAND_GO | COMMAND_RBSY_CHK | COMMAND_TRANS_SIZE(9) | 574 COMMAND_CE(ctrl->cur_cs); 575 576 if (buf) 577 cmd |= COMMAND_A_VALID; 578 if (oob_buf) 579 cmd |= COMMAND_B_VALID; 580 581 if (read) 582 cmd |= COMMAND_RX; 583 else 584 cmd |= COMMAND_TX | COMMAND_AFT_DAT; 585 586 writel_relaxed(cmd, ctrl->regs + COMMAND); 587 588 ret = wait_for_completion_timeout(&ctrl->command_complete, 589 msecs_to_jiffies(500)); 590 if (!ret) { 591 dev_err(ctrl->dev, "COMMAND timeout\n"); 592 tegra_nand_dump_reg(ctrl); 593 tegra_nand_controller_abort(ctrl); 594 ret = -ETIMEDOUT; 595 goto err_unmap_dma; 596 } 597 598 ret = wait_for_completion_timeout(&ctrl->dma_complete, 599 msecs_to_jiffies(500)); 600 if (!ret) { 601 dev_err(ctrl->dev, "DMA timeout\n"); 602 tegra_nand_dump_reg(ctrl); 603 tegra_nand_controller_abort(ctrl); 604 ret = -ETIMEDOUT; 605 goto err_unmap_dma; 606 } 607 ret = 0; 608 609 err_unmap_dma: 610 if (oob_buf) 611 dma_unmap_single(ctrl->dev, dma_addr_oob, mtd->oobsize, dir); 612 err_unmap_dma_page: 613 if (buf) 614 dma_unmap_single(ctrl->dev, dma_addr, mtd->writesize, dir); 615 616 return ret; 617 } 618 619 static int tegra_nand_read_page_raw(struct nand_chip *chip, u8 *buf, 620 int oob_required, int page) 621 { 622 struct mtd_info *mtd = nand_to_mtd(chip); 623 void *oob_buf = oob_required ? chip->oob_poi : NULL; 624 625 return tegra_nand_page_xfer(mtd, chip, buf, oob_buf, 626 mtd->oobsize, page, true); 627 } 628 629 static int tegra_nand_write_page_raw(struct nand_chip *chip, const u8 *buf, 630 int oob_required, int page) 631 { 632 struct mtd_info *mtd = nand_to_mtd(chip); 633 void *oob_buf = oob_required ? chip->oob_poi : NULL; 634 635 return tegra_nand_page_xfer(mtd, chip, (void *)buf, oob_buf, 636 mtd->oobsize, page, false); 637 } 638 639 static int tegra_nand_read_oob(struct nand_chip *chip, int page) 640 { 641 struct mtd_info *mtd = nand_to_mtd(chip); 642 643 return tegra_nand_page_xfer(mtd, chip, NULL, chip->oob_poi, 644 mtd->oobsize, page, true); 645 } 646 647 static int tegra_nand_write_oob(struct nand_chip *chip, int page) 648 { 649 struct mtd_info *mtd = nand_to_mtd(chip); 650 651 return tegra_nand_page_xfer(mtd, chip, NULL, chip->oob_poi, 652 mtd->oobsize, page, false); 653 } 654 655 static int tegra_nand_read_page_hwecc(struct nand_chip *chip, u8 *buf, 656 int oob_required, int page) 657 { 658 struct mtd_info *mtd = nand_to_mtd(chip); 659 struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller); 660 struct tegra_nand_chip *nand = to_tegra_chip(chip); 661 void *oob_buf = oob_required ? chip->oob_poi : NULL; 662 u32 dec_stat, max_corr_cnt; 663 unsigned long fail_sec_flag; 664 int ret; 665 666 tegra_nand_hw_ecc(ctrl, chip, true); 667 ret = tegra_nand_page_xfer(mtd, chip, buf, oob_buf, 0, page, true); 668 tegra_nand_hw_ecc(ctrl, chip, false); 669 if (ret) 670 return ret; 671 672 /* No correctable or un-correctable errors, page must have 0 bitflips */ 673 if (!ctrl->last_read_error) 674 return 0; 675 676 /* 677 * Correctable or un-correctable errors occurred. Use DEC_STAT_BUF 678 * which contains information for all ECC selections. 679 * 680 * Note that since we do not use Command Queues DEC_RESULT does not 681 * state the number of pages we can read from the DEC_STAT_BUF. But 682 * since CORRFAIL_ERR did occur during page read we do have a valid 683 * result in DEC_STAT_BUF. 684 */ 685 ctrl->last_read_error = false; 686 dec_stat = readl_relaxed(ctrl->regs + DEC_STAT_BUF); 687 688 fail_sec_flag = (dec_stat & DEC_STAT_BUF_FAIL_SEC_FLAG_MASK) >> 689 DEC_STAT_BUF_FAIL_SEC_FLAG_SHIFT; 690 691 max_corr_cnt = (dec_stat & DEC_STAT_BUF_MAX_CORR_CNT_MASK) >> 692 DEC_STAT_BUF_MAX_CORR_CNT_SHIFT; 693 694 if (fail_sec_flag) { 695 int bit, max_bitflips = 0; 696 697 /* 698 * Since we do not support subpage writes, a complete page 699 * is either written or not. We can take a shortcut here by 700 * checking wheather any of the sector has been successful 701 * read. If at least one sectors has been read successfully, 702 * the page must have been a written previously. It cannot 703 * be an erased page. 704 * 705 * E.g. controller might return fail_sec_flag with 0x4, which 706 * would mean only the third sector failed to correct. The 707 * page must have been written and the third sector is really 708 * not correctable anymore. 709 */ 710 if (fail_sec_flag ^ GENMASK(chip->ecc.steps - 1, 0)) { 711 mtd->ecc_stats.failed += hweight8(fail_sec_flag); 712 return max_corr_cnt; 713 } 714 715 /* 716 * All sectors failed to correct, but the ECC isn't smart 717 * enough to figure out if a page is really just erased. 718 * Read OOB data and check whether data/OOB is completely 719 * erased or if error correction just failed for all sub- 720 * pages. 721 */ 722 ret = tegra_nand_read_oob(chip, page); 723 if (ret < 0) 724 return ret; 725 726 for_each_set_bit(bit, &fail_sec_flag, chip->ecc.steps) { 727 u8 *data = buf + (chip->ecc.size * bit); 728 u8 *oob = chip->oob_poi + nand->ecc.offset + 729 (chip->ecc.bytes * bit); 730 731 ret = nand_check_erased_ecc_chunk(data, chip->ecc.size, 732 oob, chip->ecc.bytes, 733 NULL, 0, 734 chip->ecc.strength); 735 if (ret < 0) { 736 mtd->ecc_stats.failed++; 737 } else { 738 mtd->ecc_stats.corrected += ret; 739 max_bitflips = max(ret, max_bitflips); 740 } 741 } 742 743 return max_t(unsigned int, max_corr_cnt, max_bitflips); 744 } else { 745 int corr_sec_flag; 746 747 corr_sec_flag = (dec_stat & DEC_STAT_BUF_CORR_SEC_FLAG_MASK) >> 748 DEC_STAT_BUF_CORR_SEC_FLAG_SHIFT; 749 750 /* 751 * The value returned in the register is the maximum of 752 * bitflips encountered in any of the ECC regions. As there is 753 * no way to get the number of bitflips in a specific regions 754 * we are not able to deliver correct stats but instead 755 * overestimate the number of corrected bitflips by assuming 756 * that all regions where errors have been corrected 757 * encountered the maximum number of bitflips. 758 */ 759 mtd->ecc_stats.corrected += max_corr_cnt * hweight8(corr_sec_flag); 760 761 return max_corr_cnt; 762 } 763 } 764 765 static int tegra_nand_write_page_hwecc(struct nand_chip *chip, const u8 *buf, 766 int oob_required, int page) 767 { 768 struct mtd_info *mtd = nand_to_mtd(chip); 769 struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller); 770 void *oob_buf = oob_required ? chip->oob_poi : NULL; 771 int ret; 772 773 tegra_nand_hw_ecc(ctrl, chip, true); 774 ret = tegra_nand_page_xfer(mtd, chip, (void *)buf, oob_buf, 775 0, page, false); 776 tegra_nand_hw_ecc(ctrl, chip, false); 777 778 return ret; 779 } 780 781 static void tegra_nand_setup_timing(struct tegra_nand_controller *ctrl, 782 const struct nand_sdr_timings *timings) 783 { 784 /* 785 * The period (and all other timings in this function) is in ps, 786 * so need to take care here to avoid integer overflows. 787 */ 788 unsigned int rate = clk_get_rate(ctrl->clk) / 1000000; 789 unsigned int period = DIV_ROUND_UP(1000000, rate); 790 u32 val, reg = 0; 791 792 val = DIV_ROUND_UP(max3(timings->tAR_min, timings->tRR_min, 793 timings->tRC_min), period); 794 reg |= TIMING_TCR_TAR_TRR(OFFSET(val, 3)); 795 796 val = DIV_ROUND_UP(max(max(timings->tCS_min, timings->tCH_min), 797 max(timings->tALS_min, timings->tALH_min)), 798 period); 799 reg |= TIMING_TCS(OFFSET(val, 2)); 800 801 val = DIV_ROUND_UP(max(timings->tRP_min, timings->tREA_max) + 6000, 802 period); 803 reg |= TIMING_TRP(OFFSET(val, 1)) | TIMING_TRP_RESP(OFFSET(val, 1)); 804 805 reg |= TIMING_TWB(OFFSET(DIV_ROUND_UP(timings->tWB_max, period), 1)); 806 reg |= TIMING_TWHR(OFFSET(DIV_ROUND_UP(timings->tWHR_min, period), 1)); 807 reg |= TIMING_TWH(OFFSET(DIV_ROUND_UP(timings->tWH_min, period), 1)); 808 reg |= TIMING_TWP(OFFSET(DIV_ROUND_UP(timings->tWP_min, period), 1)); 809 reg |= TIMING_TRH(OFFSET(DIV_ROUND_UP(timings->tREH_min, period), 1)); 810 811 writel_relaxed(reg, ctrl->regs + TIMING_1); 812 813 val = DIV_ROUND_UP(timings->tADL_min, period); 814 reg = TIMING_TADL(OFFSET(val, 3)); 815 816 writel_relaxed(reg, ctrl->regs + TIMING_2); 817 } 818 819 static int tegra_nand_setup_interface(struct nand_chip *chip, int csline, 820 const struct nand_interface_config *conf) 821 { 822 struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller); 823 const struct nand_sdr_timings *timings; 824 825 timings = nand_get_sdr_timings(conf); 826 if (IS_ERR(timings)) 827 return PTR_ERR(timings); 828 829 if (csline == NAND_DATA_IFACE_CHECK_ONLY) 830 return 0; 831 832 tegra_nand_setup_timing(ctrl, timings); 833 834 return 0; 835 } 836 837 static const int rs_strength_bootable[] = { 4 }; 838 static const int rs_strength[] = { 4, 6, 8 }; 839 static const int bch_strength_bootable[] = { 8, 16 }; 840 static const int bch_strength[] = { 4, 8, 14, 16 }; 841 842 static int tegra_nand_get_strength(struct nand_chip *chip, const int *strength, 843 int strength_len, int bits_per_step, 844 int oobsize) 845 { 846 struct nand_device *base = mtd_to_nanddev(nand_to_mtd(chip)); 847 const struct nand_ecc_props *requirements = 848 nanddev_get_ecc_requirements(base); 849 bool maximize = base->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH; 850 int i; 851 852 /* 853 * Loop through available strengths. Backwards in case we try to 854 * maximize the BCH strength. 855 */ 856 for (i = 0; i < strength_len; i++) { 857 int strength_sel, bytes_per_step, bytes_per_page; 858 859 if (maximize) { 860 strength_sel = strength[strength_len - i - 1]; 861 } else { 862 strength_sel = strength[i]; 863 864 if (strength_sel < requirements->strength) 865 continue; 866 } 867 868 bytes_per_step = DIV_ROUND_UP(bits_per_step * strength_sel, 869 BITS_PER_BYTE); 870 bytes_per_page = round_up(bytes_per_step * chip->ecc.steps, 4); 871 872 /* Check whether strength fits OOB */ 873 if (bytes_per_page < (oobsize - SKIP_SPARE_BYTES)) 874 return strength_sel; 875 } 876 877 return -EINVAL; 878 } 879 880 static int tegra_nand_select_strength(struct nand_chip *chip, int oobsize) 881 { 882 const int *strength; 883 int strength_len, bits_per_step; 884 885 switch (chip->ecc.algo) { 886 case NAND_ECC_ALGO_RS: 887 bits_per_step = BITS_PER_STEP_RS; 888 if (chip->options & NAND_IS_BOOT_MEDIUM) { 889 strength = rs_strength_bootable; 890 strength_len = ARRAY_SIZE(rs_strength_bootable); 891 } else { 892 strength = rs_strength; 893 strength_len = ARRAY_SIZE(rs_strength); 894 } 895 break; 896 case NAND_ECC_ALGO_BCH: 897 bits_per_step = BITS_PER_STEP_BCH; 898 if (chip->options & NAND_IS_BOOT_MEDIUM) { 899 strength = bch_strength_bootable; 900 strength_len = ARRAY_SIZE(bch_strength_bootable); 901 } else { 902 strength = bch_strength; 903 strength_len = ARRAY_SIZE(bch_strength); 904 } 905 break; 906 default: 907 return -EINVAL; 908 } 909 910 return tegra_nand_get_strength(chip, strength, strength_len, 911 bits_per_step, oobsize); 912 } 913 914 static int tegra_nand_attach_chip(struct nand_chip *chip) 915 { 916 struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller); 917 const struct nand_ecc_props *requirements = 918 nanddev_get_ecc_requirements(&chip->base); 919 struct tegra_nand_chip *nand = to_tegra_chip(chip); 920 struct mtd_info *mtd = nand_to_mtd(chip); 921 int bits_per_step; 922 int ret; 923 924 if (chip->bbt_options & NAND_BBT_USE_FLASH) 925 chip->bbt_options |= NAND_BBT_NO_OOB; 926 927 chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; 928 chip->ecc.size = 512; 929 chip->ecc.steps = mtd->writesize / chip->ecc.size; 930 if (requirements->step_size != 512) { 931 dev_err(ctrl->dev, "Unsupported step size %d\n", 932 requirements->step_size); 933 return -EINVAL; 934 } 935 936 chip->ecc.read_page = tegra_nand_read_page_hwecc; 937 chip->ecc.write_page = tegra_nand_write_page_hwecc; 938 chip->ecc.read_page_raw = tegra_nand_read_page_raw; 939 chip->ecc.write_page_raw = tegra_nand_write_page_raw; 940 chip->ecc.read_oob = tegra_nand_read_oob; 941 chip->ecc.write_oob = tegra_nand_write_oob; 942 943 if (chip->options & NAND_BUSWIDTH_16) 944 nand->config |= CONFIG_BUS_WIDTH_16; 945 946 if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) { 947 if (mtd->writesize < 2048) 948 chip->ecc.algo = NAND_ECC_ALGO_RS; 949 else 950 chip->ecc.algo = NAND_ECC_ALGO_BCH; 951 } 952 953 if (chip->ecc.algo == NAND_ECC_ALGO_BCH && mtd->writesize < 2048) { 954 dev_err(ctrl->dev, "BCH supports 2K or 4K page size only\n"); 955 return -EINVAL; 956 } 957 958 if (!chip->ecc.strength) { 959 ret = tegra_nand_select_strength(chip, mtd->oobsize); 960 if (ret < 0) { 961 dev_err(ctrl->dev, 962 "No valid strength found, minimum %d\n", 963 requirements->strength); 964 return ret; 965 } 966 967 chip->ecc.strength = ret; 968 } 969 970 nand->config_ecc = CONFIG_PIPE_EN | CONFIG_SKIP_SPARE | 971 CONFIG_SKIP_SPARE_SIZE_4; 972 973 switch (chip->ecc.algo) { 974 case NAND_ECC_ALGO_RS: 975 bits_per_step = BITS_PER_STEP_RS * chip->ecc.strength; 976 mtd_set_ooblayout(mtd, &tegra_nand_oob_rs_ops); 977 nand->config_ecc |= CONFIG_HW_ECC | CONFIG_ECC_SEL | 978 CONFIG_ERR_COR; 979 switch (chip->ecc.strength) { 980 case 4: 981 nand->config_ecc |= CONFIG_TVAL_4; 982 break; 983 case 6: 984 nand->config_ecc |= CONFIG_TVAL_6; 985 break; 986 case 8: 987 nand->config_ecc |= CONFIG_TVAL_8; 988 break; 989 default: 990 dev_err(ctrl->dev, "ECC strength %d not supported\n", 991 chip->ecc.strength); 992 return -EINVAL; 993 } 994 break; 995 case NAND_ECC_ALGO_BCH: 996 bits_per_step = BITS_PER_STEP_BCH * chip->ecc.strength; 997 mtd_set_ooblayout(mtd, &tegra_nand_oob_bch_ops); 998 nand->bch_config = BCH_ENABLE; 999 switch (chip->ecc.strength) { 1000 case 4: 1001 nand->bch_config |= BCH_TVAL_4; 1002 break; 1003 case 8: 1004 nand->bch_config |= BCH_TVAL_8; 1005 break; 1006 case 14: 1007 nand->bch_config |= BCH_TVAL_14; 1008 break; 1009 case 16: 1010 nand->bch_config |= BCH_TVAL_16; 1011 break; 1012 default: 1013 dev_err(ctrl->dev, "ECC strength %d not supported\n", 1014 chip->ecc.strength); 1015 return -EINVAL; 1016 } 1017 break; 1018 default: 1019 dev_err(ctrl->dev, "ECC algorithm not supported\n"); 1020 return -EINVAL; 1021 } 1022 1023 dev_info(ctrl->dev, "Using %s with strength %d per 512 byte step\n", 1024 chip->ecc.algo == NAND_ECC_ALGO_BCH ? "BCH" : "RS", 1025 chip->ecc.strength); 1026 1027 chip->ecc.bytes = DIV_ROUND_UP(bits_per_step, BITS_PER_BYTE); 1028 1029 switch (mtd->writesize) { 1030 case 256: 1031 nand->config |= CONFIG_PS_256; 1032 break; 1033 case 512: 1034 nand->config |= CONFIG_PS_512; 1035 break; 1036 case 1024: 1037 nand->config |= CONFIG_PS_1024; 1038 break; 1039 case 2048: 1040 nand->config |= CONFIG_PS_2048; 1041 break; 1042 case 4096: 1043 nand->config |= CONFIG_PS_4096; 1044 break; 1045 default: 1046 dev_err(ctrl->dev, "Unsupported writesize %d\n", 1047 mtd->writesize); 1048 return -ENODEV; 1049 } 1050 1051 /* Store complete configuration for HW ECC in config_ecc */ 1052 nand->config_ecc |= nand->config; 1053 1054 /* Non-HW ECC read/writes complete OOB */ 1055 nand->config |= CONFIG_TAG_BYTE_SIZE(mtd->oobsize - 1); 1056 writel_relaxed(nand->config, ctrl->regs + CONFIG); 1057 1058 return 0; 1059 } 1060 1061 static const struct nand_controller_ops tegra_nand_controller_ops = { 1062 .attach_chip = &tegra_nand_attach_chip, 1063 .exec_op = tegra_nand_exec_op, 1064 .setup_interface = tegra_nand_setup_interface, 1065 }; 1066 1067 static int tegra_nand_chips_init(struct device *dev, 1068 struct tegra_nand_controller *ctrl) 1069 { 1070 struct device_node *np = dev->of_node; 1071 struct device_node *np_nand; 1072 int nsels, nchips = of_get_child_count(np); 1073 struct tegra_nand_chip *nand; 1074 struct mtd_info *mtd; 1075 struct nand_chip *chip; 1076 int ret; 1077 u32 cs; 1078 1079 if (nchips != 1) { 1080 dev_err(dev, "Currently only one NAND chip supported\n"); 1081 return -EINVAL; 1082 } 1083 1084 np_nand = of_get_next_child(np, NULL); 1085 1086 nsels = of_property_count_elems_of_size(np_nand, "reg", sizeof(u32)); 1087 if (nsels != 1) { 1088 dev_err(dev, "Missing/invalid reg property\n"); 1089 return -EINVAL; 1090 } 1091 1092 /* Retrieve CS id, currently only single die NAND supported */ 1093 ret = of_property_read_u32(np_nand, "reg", &cs); 1094 if (ret) { 1095 dev_err(dev, "could not retrieve reg property: %d\n", ret); 1096 return ret; 1097 } 1098 1099 nand = devm_kzalloc(dev, sizeof(*nand), GFP_KERNEL); 1100 if (!nand) 1101 return -ENOMEM; 1102 1103 nand->cs[0] = cs; 1104 1105 nand->wp_gpio = devm_gpiod_get_optional(dev, "wp", GPIOD_OUT_LOW); 1106 1107 if (IS_ERR(nand->wp_gpio)) { 1108 ret = PTR_ERR(nand->wp_gpio); 1109 dev_err(dev, "Failed to request WP GPIO: %d\n", ret); 1110 return ret; 1111 } 1112 1113 chip = &nand->chip; 1114 chip->controller = &ctrl->controller; 1115 1116 mtd = nand_to_mtd(chip); 1117 1118 mtd->dev.parent = dev; 1119 mtd->owner = THIS_MODULE; 1120 1121 nand_set_flash_node(chip, np_nand); 1122 1123 if (!mtd->name) 1124 mtd->name = "tegra_nand"; 1125 1126 chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA; 1127 1128 ret = nand_scan(chip, 1); 1129 if (ret) 1130 return ret; 1131 1132 mtd_ooblayout_ecc(mtd, 0, &nand->ecc); 1133 1134 ret = mtd_device_register(mtd, NULL, 0); 1135 if (ret) { 1136 dev_err(dev, "Failed to register mtd device: %d\n", ret); 1137 nand_cleanup(chip); 1138 return ret; 1139 } 1140 1141 ctrl->chip = chip; 1142 1143 return 0; 1144 } 1145 1146 static int tegra_nand_probe(struct platform_device *pdev) 1147 { 1148 struct reset_control *rst; 1149 struct tegra_nand_controller *ctrl; 1150 int err = 0; 1151 1152 ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL); 1153 if (!ctrl) 1154 return -ENOMEM; 1155 1156 ctrl->dev = &pdev->dev; 1157 platform_set_drvdata(pdev, ctrl); 1158 nand_controller_init(&ctrl->controller); 1159 ctrl->controller.ops = &tegra_nand_controller_ops; 1160 1161 ctrl->regs = devm_platform_ioremap_resource(pdev, 0); 1162 if (IS_ERR(ctrl->regs)) 1163 return PTR_ERR(ctrl->regs); 1164 1165 rst = devm_reset_control_get(&pdev->dev, "nand"); 1166 if (IS_ERR(rst)) 1167 return PTR_ERR(rst); 1168 1169 ctrl->clk = devm_clk_get(&pdev->dev, "nand"); 1170 if (IS_ERR(ctrl->clk)) 1171 return PTR_ERR(ctrl->clk); 1172 1173 err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev); 1174 if (err) 1175 return err; 1176 1177 /* 1178 * This driver doesn't support active power management yet, 1179 * so we will simply keep device resumed. 1180 */ 1181 pm_runtime_enable(&pdev->dev); 1182 err = pm_runtime_resume_and_get(&pdev->dev); 1183 if (err) 1184 goto err_dis_pm; 1185 1186 err = reset_control_reset(rst); 1187 if (err) { 1188 dev_err(ctrl->dev, "Failed to reset HW: %d\n", err); 1189 goto err_put_pm; 1190 } 1191 1192 writel_relaxed(HWSTATUS_CMD_DEFAULT, ctrl->regs + HWSTATUS_CMD); 1193 writel_relaxed(HWSTATUS_MASK_DEFAULT, ctrl->regs + HWSTATUS_MASK); 1194 writel_relaxed(INT_MASK, ctrl->regs + IER); 1195 1196 init_completion(&ctrl->command_complete); 1197 init_completion(&ctrl->dma_complete); 1198 1199 ctrl->irq = platform_get_irq(pdev, 0); 1200 if (ctrl->irq < 0) { 1201 err = ctrl->irq; 1202 goto err_put_pm; 1203 } 1204 err = devm_request_irq(&pdev->dev, ctrl->irq, tegra_nand_irq, 0, 1205 dev_name(&pdev->dev), ctrl); 1206 if (err) { 1207 dev_err(ctrl->dev, "Failed to get IRQ: %d\n", err); 1208 goto err_put_pm; 1209 } 1210 1211 writel_relaxed(DMA_MST_CTRL_IS_DONE, ctrl->regs + DMA_MST_CTRL); 1212 1213 err = tegra_nand_chips_init(ctrl->dev, ctrl); 1214 if (err) 1215 goto err_put_pm; 1216 1217 return 0; 1218 1219 err_put_pm: 1220 pm_runtime_put_sync_suspend(ctrl->dev); 1221 pm_runtime_force_suspend(ctrl->dev); 1222 err_dis_pm: 1223 pm_runtime_disable(&pdev->dev); 1224 return err; 1225 } 1226 1227 static void tegra_nand_remove(struct platform_device *pdev) 1228 { 1229 struct tegra_nand_controller *ctrl = platform_get_drvdata(pdev); 1230 struct nand_chip *chip = ctrl->chip; 1231 struct mtd_info *mtd = nand_to_mtd(chip); 1232 1233 WARN_ON(mtd_device_unregister(mtd)); 1234 1235 nand_cleanup(chip); 1236 1237 pm_runtime_put_sync_suspend(ctrl->dev); 1238 pm_runtime_force_suspend(ctrl->dev); 1239 } 1240 1241 static int __maybe_unused tegra_nand_runtime_resume(struct device *dev) 1242 { 1243 struct tegra_nand_controller *ctrl = dev_get_drvdata(dev); 1244 int err; 1245 1246 err = clk_prepare_enable(ctrl->clk); 1247 if (err) { 1248 dev_err(dev, "Failed to enable clock: %d\n", err); 1249 return err; 1250 } 1251 1252 return 0; 1253 } 1254 1255 static int __maybe_unused tegra_nand_runtime_suspend(struct device *dev) 1256 { 1257 struct tegra_nand_controller *ctrl = dev_get_drvdata(dev); 1258 1259 clk_disable_unprepare(ctrl->clk); 1260 1261 return 0; 1262 } 1263 1264 static const struct dev_pm_ops tegra_nand_pm = { 1265 SET_RUNTIME_PM_OPS(tegra_nand_runtime_suspend, tegra_nand_runtime_resume, 1266 NULL) 1267 }; 1268 1269 static const struct of_device_id tegra_nand_of_match[] = { 1270 { .compatible = "nvidia,tegra20-nand" }, 1271 { /* sentinel */ } 1272 }; 1273 MODULE_DEVICE_TABLE(of, tegra_nand_of_match); 1274 1275 static struct platform_driver tegra_nand_driver = { 1276 .driver = { 1277 .name = "tegra-nand", 1278 .of_match_table = tegra_nand_of_match, 1279 .pm = &tegra_nand_pm, 1280 }, 1281 .probe = tegra_nand_probe, 1282 .remove = tegra_nand_remove, 1283 }; 1284 module_platform_driver(tegra_nand_driver); 1285 1286 MODULE_DESCRIPTION("NVIDIA Tegra NAND driver"); 1287 MODULE_AUTHOR("Thierry Reding <thierry.reding@nvidia.com>"); 1288 MODULE_AUTHOR("Lucas Stach <dev@lynxeye.de>"); 1289 MODULE_AUTHOR("Stefan Agner <stefan@agner.ch>"); 1290 MODULE_LICENSE("GPL v2"); 1291