1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /* 3 * MTK ECC controller driver. 4 * Copyright (C) 2016 MediaTek Inc. 5 * Authors: Xiaolei Li <xiaolei.li@mediatek.com> 6 * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org> 7 */ 8 9 #include <linux/platform_device.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/interrupt.h> 12 #include <linux/clk.h> 13 #include <linux/module.h> 14 #include <linux/iopoll.h> 15 #include <linux/of.h> 16 #include <linux/of_platform.h> 17 #include <linux/mutex.h> 18 #include <linux/mtd/nand-ecc-mtk.h> 19 20 #define ECC_IDLE_MASK BIT(0) 21 #define ECC_IRQ_EN BIT(0) 22 #define ECC_PG_IRQ_SEL BIT(1) 23 #define ECC_OP_ENABLE (1) 24 #define ECC_OP_DISABLE (0) 25 26 #define ECC_ENCCON (0x00) 27 #define ECC_ENCCNFG (0x04) 28 #define ECC_MS_SHIFT (16) 29 #define ECC_ENCDIADDR (0x08) 30 #define ECC_ENCIDLE (0x0C) 31 #define ECC_DECCON (0x100) 32 #define ECC_DECCNFG (0x104) 33 #define DEC_EMPTY_EN BIT(31) 34 #define DEC_CNFG_CORRECT (0x3 << 12) 35 #define ECC_DECIDLE (0x10C) 36 #define ECC_DECENUM0 (0x114) 37 38 #define ECC_TIMEOUT (500000) 39 40 #define ECC_IDLE_REG(op) ((op) == ECC_ENCODE ? ECC_ENCIDLE : ECC_DECIDLE) 41 #define ECC_CTL_REG(op) ((op) == ECC_ENCODE ? ECC_ENCCON : ECC_DECCON) 42 43 #define ECC_ERRMASK_MT7622 GENMASK(4, 0) 44 #define ECC_ERRMASK_MT2701 GENMASK(5, 0) 45 #define ECC_ERRMASK_MT2712 GENMASK(6, 0) 46 47 struct mtk_ecc_caps { 48 u32 err_mask; 49 u32 err_shift; 50 const u8 *ecc_strength; 51 const u32 *ecc_regs; 52 u8 num_ecc_strength; 53 u8 ecc_mode_shift; 54 u32 parity_bits; 55 int pg_irq_sel; 56 }; 57 58 struct mtk_ecc { 59 struct device *dev; 60 const struct mtk_ecc_caps *caps; 61 void __iomem *regs; 62 struct clk *clk; 63 64 struct completion done; 65 struct mutex lock; 66 u32 sectors; 67 68 u8 *eccdata; 69 }; 70 71 /* ecc strength that each IP supports */ 72 static const u8 ecc_strength_mt2701[] = { 73 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36, 74 40, 44, 48, 52, 56, 60 75 }; 76 77 static const u8 ecc_strength_mt2712[] = { 78 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36, 79 40, 44, 48, 52, 56, 60, 68, 72, 80 80 }; 81 82 static const u8 ecc_strength_mt7622[] = { 83 4, 6, 8, 10, 12 84 }; 85 86 static const u8 ecc_strength_mt7986[] = { 87 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24 88 }; 89 90 enum mtk_ecc_regs { 91 ECC_ENCPAR00, 92 ECC_ENCIRQ_EN, 93 ECC_ENCIRQ_STA, 94 ECC_DECDONE, 95 ECC_DECIRQ_EN, 96 ECC_DECIRQ_STA, 97 }; 98 99 static int mt2701_ecc_regs[] = { 100 [ECC_ENCPAR00] = 0x10, 101 [ECC_ENCIRQ_EN] = 0x80, 102 [ECC_ENCIRQ_STA] = 0x84, 103 [ECC_DECDONE] = 0x124, 104 [ECC_DECIRQ_EN] = 0x200, 105 [ECC_DECIRQ_STA] = 0x204, 106 }; 107 108 static int mt2712_ecc_regs[] = { 109 [ECC_ENCPAR00] = 0x300, 110 [ECC_ENCIRQ_EN] = 0x80, 111 [ECC_ENCIRQ_STA] = 0x84, 112 [ECC_DECDONE] = 0x124, 113 [ECC_DECIRQ_EN] = 0x200, 114 [ECC_DECIRQ_STA] = 0x204, 115 }; 116 117 static int mt7622_ecc_regs[] = { 118 [ECC_ENCPAR00] = 0x10, 119 [ECC_ENCIRQ_EN] = 0x30, 120 [ECC_ENCIRQ_STA] = 0x34, 121 [ECC_DECDONE] = 0x11c, 122 [ECC_DECIRQ_EN] = 0x140, 123 [ECC_DECIRQ_STA] = 0x144, 124 }; 125 126 static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc, 127 enum mtk_ecc_operation op) 128 { 129 struct device *dev = ecc->dev; 130 u32 val; 131 int ret; 132 133 ret = readl_poll_timeout_atomic(ecc->regs + ECC_IDLE_REG(op), val, 134 val & ECC_IDLE_MASK, 135 10, ECC_TIMEOUT); 136 if (ret) 137 dev_warn(dev, "%s NOT idle\n", 138 op == ECC_ENCODE ? "encoder" : "decoder"); 139 } 140 141 static irqreturn_t mtk_ecc_irq(int irq, void *id) 142 { 143 struct mtk_ecc *ecc = id; 144 u32 dec, enc; 145 146 dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA]) 147 & ECC_IRQ_EN; 148 if (dec) { 149 dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]); 150 if (dec & ecc->sectors) { 151 /* 152 * Clear decode IRQ status once again to ensure that 153 * there will be no extra IRQ. 154 */ 155 readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA]); 156 ecc->sectors = 0; 157 complete(&ecc->done); 158 } else { 159 return IRQ_HANDLED; 160 } 161 } else { 162 enc = readl(ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_STA]) 163 & ECC_IRQ_EN; 164 if (enc) 165 complete(&ecc->done); 166 else 167 return IRQ_NONE; 168 } 169 170 return IRQ_HANDLED; 171 } 172 173 static int mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config) 174 { 175 u32 ecc_bit, dec_sz, enc_sz; 176 u32 reg, i; 177 178 for (i = 0; i < ecc->caps->num_ecc_strength; i++) { 179 if (ecc->caps->ecc_strength[i] == config->strength) 180 break; 181 } 182 183 if (i == ecc->caps->num_ecc_strength) { 184 dev_err(ecc->dev, "invalid ecc strength %d\n", 185 config->strength); 186 return -EINVAL; 187 } 188 189 ecc_bit = i; 190 191 if (config->op == ECC_ENCODE) { 192 /* configure ECC encoder (in bits) */ 193 enc_sz = config->len << 3; 194 195 reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift); 196 reg |= (enc_sz << ECC_MS_SHIFT); 197 writel(reg, ecc->regs + ECC_ENCCNFG); 198 199 if (config->mode != ECC_NFI_MODE) 200 writel(lower_32_bits(config->addr), 201 ecc->regs + ECC_ENCDIADDR); 202 203 } else { 204 /* configure ECC decoder (in bits) */ 205 dec_sz = (config->len << 3) + 206 config->strength * ecc->caps->parity_bits; 207 208 reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift); 209 reg |= (dec_sz << ECC_MS_SHIFT) | DEC_CNFG_CORRECT; 210 reg |= DEC_EMPTY_EN; 211 writel(reg, ecc->regs + ECC_DECCNFG); 212 213 if (config->sectors) 214 ecc->sectors = 1 << (config->sectors - 1); 215 } 216 217 return 0; 218 } 219 220 void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats, 221 int sectors) 222 { 223 u32 offset, i, err; 224 u32 bitflips = 0; 225 226 stats->corrected = 0; 227 stats->failed = 0; 228 229 for (i = 0; i < sectors; i++) { 230 offset = (i >> 2) << 2; 231 err = readl(ecc->regs + ECC_DECENUM0 + offset); 232 err = err >> ((i % 4) * ecc->caps->err_shift); 233 err &= ecc->caps->err_mask; 234 if (err == ecc->caps->err_mask) { 235 /* uncorrectable errors */ 236 stats->failed++; 237 continue; 238 } 239 240 stats->corrected += err; 241 bitflips = max_t(u32, bitflips, err); 242 } 243 244 stats->bitflips = bitflips; 245 } 246 EXPORT_SYMBOL(mtk_ecc_get_stats); 247 248 void mtk_ecc_release(struct mtk_ecc *ecc) 249 { 250 clk_disable_unprepare(ecc->clk); 251 put_device(ecc->dev); 252 } 253 EXPORT_SYMBOL(mtk_ecc_release); 254 255 static void mtk_ecc_hw_init(struct mtk_ecc *ecc) 256 { 257 mtk_ecc_wait_idle(ecc, ECC_ENCODE); 258 writew(ECC_OP_DISABLE, ecc->regs + ECC_ENCCON); 259 260 mtk_ecc_wait_idle(ecc, ECC_DECODE); 261 writel(ECC_OP_DISABLE, ecc->regs + ECC_DECCON); 262 } 263 264 static struct mtk_ecc *mtk_ecc_get(struct device_node *np) 265 { 266 struct platform_device *pdev; 267 struct mtk_ecc *ecc; 268 269 pdev = of_find_device_by_node(np); 270 if (!pdev) 271 return ERR_PTR(-EPROBE_DEFER); 272 273 ecc = platform_get_drvdata(pdev); 274 if (!ecc) { 275 put_device(&pdev->dev); 276 return ERR_PTR(-EPROBE_DEFER); 277 } 278 279 clk_prepare_enable(ecc->clk); 280 mtk_ecc_hw_init(ecc); 281 282 return ecc; 283 } 284 285 struct mtk_ecc *of_mtk_ecc_get(struct device_node *of_node) 286 { 287 struct mtk_ecc *ecc = NULL; 288 struct device_node *np; 289 290 np = of_parse_phandle(of_node, "nand-ecc-engine", 0); 291 /* for backward compatibility */ 292 if (!np) 293 np = of_parse_phandle(of_node, "ecc-engine", 0); 294 if (np) { 295 ecc = mtk_ecc_get(np); 296 of_node_put(np); 297 } 298 299 return ecc; 300 } 301 EXPORT_SYMBOL(of_mtk_ecc_get); 302 303 int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config) 304 { 305 enum mtk_ecc_operation op = config->op; 306 u16 reg_val; 307 int ret; 308 309 ret = mutex_lock_interruptible(&ecc->lock); 310 if (ret) { 311 dev_err(ecc->dev, "interrupted when attempting to lock\n"); 312 return ret; 313 } 314 315 mtk_ecc_wait_idle(ecc, op); 316 317 ret = mtk_ecc_config(ecc, config); 318 if (ret) { 319 mutex_unlock(&ecc->lock); 320 return ret; 321 } 322 323 if (config->mode != ECC_NFI_MODE || op != ECC_ENCODE) { 324 init_completion(&ecc->done); 325 reg_val = ECC_IRQ_EN; 326 /* 327 * For ECC_NFI_MODE, if ecc->caps->pg_irq_sel is 1, then it 328 * means this chip can only generate one ecc irq during page 329 * read / write. If is 0, generate one ecc irq each ecc step. 330 */ 331 if (ecc->caps->pg_irq_sel && config->mode == ECC_NFI_MODE) 332 reg_val |= ECC_PG_IRQ_SEL; 333 if (op == ECC_ENCODE) 334 writew(reg_val, ecc->regs + 335 ecc->caps->ecc_regs[ECC_ENCIRQ_EN]); 336 else 337 writew(reg_val, ecc->regs + 338 ecc->caps->ecc_regs[ECC_DECIRQ_EN]); 339 } 340 341 writew(ECC_OP_ENABLE, ecc->regs + ECC_CTL_REG(op)); 342 343 return 0; 344 } 345 EXPORT_SYMBOL(mtk_ecc_enable); 346 347 void mtk_ecc_disable(struct mtk_ecc *ecc) 348 { 349 enum mtk_ecc_operation op = ECC_ENCODE; 350 351 /* find out the running operation */ 352 if (readw(ecc->regs + ECC_CTL_REG(op)) != ECC_OP_ENABLE) 353 op = ECC_DECODE; 354 355 /* disable it */ 356 mtk_ecc_wait_idle(ecc, op); 357 if (op == ECC_DECODE) { 358 /* 359 * Clear decode IRQ status in case there is a timeout to wait 360 * decode IRQ. 361 */ 362 readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]); 363 writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_EN]); 364 } else { 365 writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_EN]); 366 } 367 368 writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op)); 369 370 mutex_unlock(&ecc->lock); 371 } 372 EXPORT_SYMBOL(mtk_ecc_disable); 373 374 int mtk_ecc_wait_done(struct mtk_ecc *ecc, enum mtk_ecc_operation op) 375 { 376 int ret; 377 378 ret = wait_for_completion_timeout(&ecc->done, msecs_to_jiffies(500)); 379 if (!ret) { 380 dev_err(ecc->dev, "%s timeout - interrupt did not arrive)\n", 381 (op == ECC_ENCODE) ? "encoder" : "decoder"); 382 return -ETIMEDOUT; 383 } 384 385 return 0; 386 } 387 EXPORT_SYMBOL(mtk_ecc_wait_done); 388 389 int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config, 390 u8 *data, u32 bytes) 391 { 392 dma_addr_t addr; 393 u32 len; 394 int ret; 395 396 addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE); 397 ret = dma_mapping_error(ecc->dev, addr); 398 if (ret) { 399 dev_err(ecc->dev, "dma mapping error\n"); 400 return -EINVAL; 401 } 402 403 config->op = ECC_ENCODE; 404 config->addr = addr; 405 ret = mtk_ecc_enable(ecc, config); 406 if (ret) { 407 dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE); 408 return ret; 409 } 410 411 ret = mtk_ecc_wait_done(ecc, ECC_ENCODE); 412 if (ret) 413 goto timeout; 414 415 mtk_ecc_wait_idle(ecc, ECC_ENCODE); 416 417 /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */ 418 len = (config->strength * ecc->caps->parity_bits + 7) >> 3; 419 420 /* write the parity bytes generated by the ECC back to temp buffer */ 421 __ioread32_copy(ecc->eccdata, 422 ecc->regs + ecc->caps->ecc_regs[ECC_ENCPAR00], 423 round_up(len, 4)); 424 425 /* copy into possibly unaligned OOB region with actual length */ 426 memcpy(data + bytes, ecc->eccdata, len); 427 timeout: 428 429 dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE); 430 mtk_ecc_disable(ecc); 431 432 return ret; 433 } 434 EXPORT_SYMBOL(mtk_ecc_encode); 435 436 void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p) 437 { 438 const u8 *ecc_strength = ecc->caps->ecc_strength; 439 int i; 440 441 for (i = 0; i < ecc->caps->num_ecc_strength; i++) { 442 if (*p <= ecc_strength[i]) { 443 if (!i) 444 *p = ecc_strength[i]; 445 else if (*p != ecc_strength[i]) 446 *p = ecc_strength[i - 1]; 447 return; 448 } 449 } 450 451 *p = ecc_strength[ecc->caps->num_ecc_strength - 1]; 452 } 453 EXPORT_SYMBOL(mtk_ecc_adjust_strength); 454 455 unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc) 456 { 457 return ecc->caps->parity_bits; 458 } 459 EXPORT_SYMBOL(mtk_ecc_get_parity_bits); 460 461 static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = { 462 .err_mask = ECC_ERRMASK_MT2701, 463 .err_shift = 8, 464 .ecc_strength = ecc_strength_mt2701, 465 .ecc_regs = mt2701_ecc_regs, 466 .num_ecc_strength = 20, 467 .ecc_mode_shift = 5, 468 .parity_bits = 14, 469 .pg_irq_sel = 0, 470 }; 471 472 static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = { 473 .err_mask = ECC_ERRMASK_MT2712, 474 .err_shift = 8, 475 .ecc_strength = ecc_strength_mt2712, 476 .ecc_regs = mt2712_ecc_regs, 477 .num_ecc_strength = 23, 478 .ecc_mode_shift = 5, 479 .parity_bits = 14, 480 .pg_irq_sel = 1, 481 }; 482 483 static const struct mtk_ecc_caps mtk_ecc_caps_mt7622 = { 484 .err_mask = ECC_ERRMASK_MT7622, 485 .err_shift = 5, 486 .ecc_strength = ecc_strength_mt7622, 487 .ecc_regs = mt7622_ecc_regs, 488 .num_ecc_strength = 5, 489 .ecc_mode_shift = 4, 490 .parity_bits = 13, 491 .pg_irq_sel = 0, 492 }; 493 494 static const struct mtk_ecc_caps mtk_ecc_caps_mt7986 = { 495 .err_mask = ECC_ERRMASK_MT7622, 496 .err_shift = 8, 497 .ecc_strength = ecc_strength_mt7986, 498 .ecc_regs = mt2712_ecc_regs, 499 .num_ecc_strength = 11, 500 .ecc_mode_shift = 5, 501 .parity_bits = 14, 502 .pg_irq_sel = 1, 503 }; 504 505 static const struct of_device_id mtk_ecc_dt_match[] = { 506 { 507 .compatible = "mediatek,mt2701-ecc", 508 .data = &mtk_ecc_caps_mt2701, 509 }, { 510 .compatible = "mediatek,mt2712-ecc", 511 .data = &mtk_ecc_caps_mt2712, 512 }, { 513 .compatible = "mediatek,mt7622-ecc", 514 .data = &mtk_ecc_caps_mt7622, 515 }, { 516 .compatible = "mediatek,mt7986-ecc", 517 .data = &mtk_ecc_caps_mt7986, 518 }, 519 {}, 520 }; 521 522 static int mtk_ecc_probe(struct platform_device *pdev) 523 { 524 struct device *dev = &pdev->dev; 525 struct mtk_ecc *ecc; 526 u32 max_eccdata_size; 527 int irq, ret; 528 529 ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL); 530 if (!ecc) 531 return -ENOMEM; 532 533 ecc->caps = of_device_get_match_data(dev); 534 535 max_eccdata_size = ecc->caps->num_ecc_strength - 1; 536 max_eccdata_size = ecc->caps->ecc_strength[max_eccdata_size]; 537 max_eccdata_size = (max_eccdata_size * ecc->caps->parity_bits + 7) >> 3; 538 max_eccdata_size = round_up(max_eccdata_size, 4); 539 ecc->eccdata = devm_kzalloc(dev, max_eccdata_size, GFP_KERNEL); 540 if (!ecc->eccdata) 541 return -ENOMEM; 542 543 ecc->regs = devm_platform_ioremap_resource(pdev, 0); 544 if (IS_ERR(ecc->regs)) 545 return PTR_ERR(ecc->regs); 546 547 ecc->clk = devm_clk_get(dev, NULL); 548 if (IS_ERR(ecc->clk)) { 549 dev_err(dev, "failed to get clock: %ld\n", PTR_ERR(ecc->clk)); 550 return PTR_ERR(ecc->clk); 551 } 552 553 irq = platform_get_irq(pdev, 0); 554 if (irq < 0) 555 return irq; 556 557 ret = dma_set_mask(dev, DMA_BIT_MASK(32)); 558 if (ret) { 559 dev_err(dev, "failed to set DMA mask\n"); 560 return ret; 561 } 562 563 ret = devm_request_irq(dev, irq, mtk_ecc_irq, 0x0, "mtk-ecc", ecc); 564 if (ret) { 565 dev_err(dev, "failed to request irq\n"); 566 return -EINVAL; 567 } 568 569 ecc->dev = dev; 570 mutex_init(&ecc->lock); 571 platform_set_drvdata(pdev, ecc); 572 dev_info(dev, "probed\n"); 573 574 return 0; 575 } 576 577 #ifdef CONFIG_PM_SLEEP 578 static int mtk_ecc_suspend(struct device *dev) 579 { 580 struct mtk_ecc *ecc = dev_get_drvdata(dev); 581 582 clk_disable_unprepare(ecc->clk); 583 584 return 0; 585 } 586 587 static int mtk_ecc_resume(struct device *dev) 588 { 589 struct mtk_ecc *ecc = dev_get_drvdata(dev); 590 int ret; 591 592 ret = clk_prepare_enable(ecc->clk); 593 if (ret) { 594 dev_err(dev, "failed to enable clk\n"); 595 return ret; 596 } 597 598 return 0; 599 } 600 601 static SIMPLE_DEV_PM_OPS(mtk_ecc_pm_ops, mtk_ecc_suspend, mtk_ecc_resume); 602 #endif 603 604 MODULE_DEVICE_TABLE(of, mtk_ecc_dt_match); 605 606 static struct platform_driver mtk_ecc_driver = { 607 .probe = mtk_ecc_probe, 608 .driver = { 609 .name = "mtk-ecc", 610 .of_match_table = mtk_ecc_dt_match, 611 #ifdef CONFIG_PM_SLEEP 612 .pm = &mtk_ecc_pm_ops, 613 #endif 614 }, 615 }; 616 617 module_platform_driver(mtk_ecc_driver); 618 619 MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>"); 620 MODULE_DESCRIPTION("MTK Nand ECC Driver"); 621 MODULE_LICENSE("Dual MIT/GPL"); 622