1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Freescale GPMI NAND Flash Driver 4 * 5 * Copyright (C) 2010-2015 Freescale Semiconductor, Inc. 6 * Copyright (C) 2008 Embedded Alley Solutions, Inc. 7 */ 8 #include <linux/clk.h> 9 #include <linux/delay.h> 10 #include <linux/slab.h> 11 #include <linux/sched/task_stack.h> 12 #include <linux/interrupt.h> 13 #include <linux/module.h> 14 #include <linux/mtd/partitions.h> 15 #include <linux/of.h> 16 #include <linux/platform_device.h> 17 #include <linux/pm_runtime.h> 18 #include <linux/pinctrl/consumer.h> 19 #include <linux/dma/mxs-dma.h> 20 #include <linux/string_choices.h> 21 #include "gpmi-nand.h" 22 #include "gpmi-regs.h" 23 #include "bch-regs.h" 24 25 /* Resource names for the GPMI NAND driver. */ 26 #define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand" 27 #define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch" 28 #define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch" 29 30 /* Converts time to clock cycles */ 31 #define TO_CYCLES(duration, period) DIV_ROUND_UP_ULL(duration, period) 32 33 #define MXS_SET_ADDR 0x4 34 #define MXS_CLR_ADDR 0x8 35 /* 36 * Clear the bit and poll it cleared. This is usually called with 37 * a reset address and mask being either SFTRST(bit 31) or CLKGATE 38 * (bit 30). 39 */ 40 static int clear_poll_bit(void __iomem *addr, u32 mask) 41 { 42 int timeout = 0x400; 43 44 /* clear the bit */ 45 writel(mask, addr + MXS_CLR_ADDR); 46 47 /* 48 * SFTRST needs 3 GPMI clocks to settle, the reference manual 49 * recommends to wait 1us. 50 */ 51 udelay(1); 52 53 /* poll the bit becoming clear */ 54 while ((readl(addr) & mask) && --timeout) 55 /* nothing */; 56 57 return !timeout; 58 } 59 60 #define MODULE_CLKGATE (1 << 30) 61 #define MODULE_SFTRST (1 << 31) 62 /* 63 * The current mxs_reset_block() will do two things: 64 * [1] enable the module. 65 * [2] reset the module. 66 * 67 * In most of the cases, it's ok. 68 * But in MX23, there is a hardware bug in the BCH block (see erratum #2847). 69 * If you try to soft reset the BCH block, it becomes unusable until 70 * the next hard reset. This case occurs in the NAND boot mode. When the board 71 * boots by NAND, the ROM of the chip will initialize the BCH blocks itself. 72 * So If the driver tries to reset the BCH again, the BCH will not work anymore. 73 * You will see a DMA timeout in this case. The bug has been fixed 74 * in the following chips, such as MX28. 75 * 76 * To avoid this bug, just add a new parameter `just_enable` for 77 * the mxs_reset_block(), and rewrite it here. 78 */ 79 static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable) 80 { 81 int ret; 82 int timeout = 0x400; 83 84 /* clear and poll SFTRST */ 85 ret = clear_poll_bit(reset_addr, MODULE_SFTRST); 86 if (unlikely(ret)) 87 goto error; 88 89 /* clear CLKGATE */ 90 writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR); 91 92 if (!just_enable) { 93 /* set SFTRST to reset the block */ 94 writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR); 95 udelay(1); 96 97 /* poll CLKGATE becoming set */ 98 while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout) 99 /* nothing */; 100 if (unlikely(!timeout)) 101 goto error; 102 } 103 104 /* clear and poll SFTRST */ 105 ret = clear_poll_bit(reset_addr, MODULE_SFTRST); 106 if (unlikely(ret)) 107 goto error; 108 109 /* clear and poll CLKGATE */ 110 ret = clear_poll_bit(reset_addr, MODULE_CLKGATE); 111 if (unlikely(ret)) 112 goto error; 113 114 return 0; 115 116 error: 117 pr_err("%s(%p): module reset timeout\n", __func__, reset_addr); 118 return -ETIMEDOUT; 119 } 120 121 static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v) 122 { 123 struct clk *clk; 124 int ret; 125 int i; 126 127 for (i = 0; i < GPMI_CLK_MAX; i++) { 128 clk = this->resources.clock[i]; 129 if (!clk) 130 break; 131 132 if (v) { 133 ret = clk_prepare_enable(clk); 134 if (ret) 135 goto err_clk; 136 } else { 137 clk_disable_unprepare(clk); 138 } 139 } 140 return 0; 141 142 err_clk: 143 for (; i > 0; i--) 144 clk_disable_unprepare(this->resources.clock[i - 1]); 145 return ret; 146 } 147 148 #define gpmi_enable_clk(x) __gpmi_enable_clk(x, true) 149 #define gpmi_disable_clk(x) __gpmi_enable_clk(x, false) 150 151 static int gpmi_init(struct gpmi_nand_data *this) 152 { 153 struct resources *r = &this->resources; 154 int ret; 155 156 ret = pm_runtime_resume_and_get(this->dev); 157 if (ret < 0) 158 return ret; 159 160 ret = gpmi_reset_block(r->gpmi_regs, false); 161 if (ret) 162 goto err_out; 163 164 /* 165 * Reset BCH here, too. We got failures otherwise :( 166 * See later BCH reset for explanation of MX23 and MX28 handling 167 */ 168 ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this)); 169 if (ret) 170 goto err_out; 171 172 /* Choose NAND mode. */ 173 writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR); 174 175 /* Set the IRQ polarity. */ 176 writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY, 177 r->gpmi_regs + HW_GPMI_CTRL1_SET); 178 179 /* Disable Write-Protection. */ 180 writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET); 181 182 /* Select BCH ECC. */ 183 writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET); 184 185 /* 186 * Decouple the chip select from dma channel. We use dma0 for all 187 * the chips, force all NAND RDY_BUSY inputs to be sourced from 188 * RDY_BUSY0. 189 */ 190 writel(BM_GPMI_CTRL1_DECOUPLE_CS | BM_GPMI_CTRL1_GANGED_RDYBUSY, 191 r->gpmi_regs + HW_GPMI_CTRL1_SET); 192 193 err_out: 194 pm_runtime_mark_last_busy(this->dev); 195 pm_runtime_put_autosuspend(this->dev); 196 return ret; 197 } 198 199 /* This function is very useful. It is called only when the bug occur. */ 200 static void gpmi_dump_info(struct gpmi_nand_data *this) 201 { 202 struct resources *r = &this->resources; 203 struct bch_geometry *geo = &this->bch_geometry; 204 u32 reg; 205 int i; 206 207 dev_err(this->dev, "Show GPMI registers :\n"); 208 for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) { 209 reg = readl(r->gpmi_regs + i * 0x10); 210 dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg); 211 } 212 213 /* start to print out the BCH info */ 214 dev_err(this->dev, "Show BCH registers :\n"); 215 for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) { 216 reg = readl(r->bch_regs + i * 0x10); 217 dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg); 218 } 219 dev_err(this->dev, "BCH Geometry :\n" 220 "GF length : %u\n" 221 "ECC Strength : %u\n" 222 "Page Size in Bytes : %u\n" 223 "Metadata Size in Bytes : %u\n" 224 "ECC0 Chunk Size in Bytes: %u\n" 225 "ECCn Chunk Size in Bytes: %u\n" 226 "ECC Chunk Count : %u\n" 227 "Payload Size in Bytes : %u\n" 228 "Auxiliary Size in Bytes: %u\n" 229 "Auxiliary Status Offset: %u\n" 230 "Block Mark Byte Offset : %u\n" 231 "Block Mark Bit Offset : %u\n", 232 geo->gf_len, 233 geo->ecc_strength, 234 geo->page_size, 235 geo->metadata_size, 236 geo->ecc0_chunk_size, 237 geo->eccn_chunk_size, 238 geo->ecc_chunk_count, 239 geo->payload_size, 240 geo->auxiliary_size, 241 geo->auxiliary_status_offset, 242 geo->block_mark_byte_offset, 243 geo->block_mark_bit_offset); 244 } 245 246 static bool gpmi_check_ecc(struct gpmi_nand_data *this) 247 { 248 struct nand_chip *chip = &this->nand; 249 struct bch_geometry *geo = &this->bch_geometry; 250 struct nand_device *nand = &chip->base; 251 struct nand_ecc_props *conf = &nand->ecc.ctx.conf; 252 253 conf->step_size = geo->eccn_chunk_size; 254 conf->strength = geo->ecc_strength; 255 256 /* Do the sanity check. */ 257 if (GPMI_IS_MXS(this)) { 258 /* The mx23/mx28 only support the GF13. */ 259 if (geo->gf_len == 14) 260 return false; 261 } 262 263 if (geo->ecc_strength > this->devdata->bch_max_ecc_strength) 264 return false; 265 266 if (!nand_ecc_is_strong_enough(nand)) 267 return false; 268 269 return true; 270 } 271 272 /* check if bbm locates in data chunk rather than ecc chunk */ 273 static bool bbm_in_data_chunk(struct gpmi_nand_data *this, 274 unsigned int *chunk_num) 275 { 276 struct bch_geometry *geo = &this->bch_geometry; 277 struct nand_chip *chip = &this->nand; 278 struct mtd_info *mtd = nand_to_mtd(chip); 279 unsigned int i, j; 280 281 if (geo->ecc0_chunk_size != geo->eccn_chunk_size) { 282 dev_err(this->dev, 283 "The size of ecc0_chunk must equal to eccn_chunk\n"); 284 return false; 285 } 286 287 i = (mtd->writesize * 8 - geo->metadata_size * 8) / 288 (geo->gf_len * geo->ecc_strength + 289 geo->eccn_chunk_size * 8); 290 291 j = (mtd->writesize * 8 - geo->metadata_size * 8) - 292 (geo->gf_len * geo->ecc_strength + 293 geo->eccn_chunk_size * 8) * i; 294 295 if (j < geo->eccn_chunk_size * 8) { 296 *chunk_num = i+1; 297 dev_dbg(this->dev, "Set ecc to %d and bbm in chunk %d\n", 298 geo->ecc_strength, *chunk_num); 299 return true; 300 } 301 302 return false; 303 } 304 305 /* 306 * If we can get the ECC information from the nand chip, we do not 307 * need to calculate them ourselves. 308 * 309 * We may have available oob space in this case. 310 */ 311 static int set_geometry_by_ecc_info(struct gpmi_nand_data *this, 312 unsigned int ecc_strength, 313 unsigned int ecc_step) 314 { 315 struct bch_geometry *geo = &this->bch_geometry; 316 struct nand_chip *chip = &this->nand; 317 struct mtd_info *mtd = nand_to_mtd(chip); 318 unsigned int block_mark_bit_offset; 319 320 switch (ecc_step) { 321 case SZ_512: 322 geo->gf_len = 13; 323 break; 324 case SZ_1K: 325 geo->gf_len = 14; 326 break; 327 default: 328 dev_err(this->dev, 329 "unsupported nand chip. ecc bits : %d, ecc size : %d\n", 330 nanddev_get_ecc_requirements(&chip->base)->strength, 331 nanddev_get_ecc_requirements(&chip->base)->step_size); 332 return -EINVAL; 333 } 334 geo->ecc0_chunk_size = ecc_step; 335 geo->eccn_chunk_size = ecc_step; 336 geo->ecc_strength = round_up(ecc_strength, 2); 337 if (!gpmi_check_ecc(this)) 338 return -EINVAL; 339 340 /* Keep the C >= O */ 341 if (geo->eccn_chunk_size < mtd->oobsize) { 342 dev_err(this->dev, 343 "unsupported nand chip. ecc size: %d, oob size : %d\n", 344 ecc_step, mtd->oobsize); 345 return -EINVAL; 346 } 347 348 /* The default value, see comment in the legacy_set_geometry(). */ 349 geo->metadata_size = 10; 350 351 geo->ecc_chunk_count = mtd->writesize / geo->eccn_chunk_size; 352 353 /* 354 * Now, the NAND chip with 2K page(data chunk is 512byte) shows below: 355 * 356 * | P | 357 * |<----------------------------------------------------->| 358 * | | 359 * | (Block Mark) | 360 * | P' | | | | 361 * |<-------------------------------------------->| D | | O' | 362 * | |<---->| |<--->| 363 * V V V V V 364 * +---+----------+-+----------+-+----------+-+----------+-+-----+ 365 * | M | data |E| data |E| data |E| data |E| | 366 * +---+----------+-+----------+-+----------+-+----------+-+-----+ 367 * ^ ^ 368 * | O | 369 * |<------------>| 370 * | | 371 * 372 * P : the page size for BCH module. 373 * E : The ECC strength. 374 * G : the length of Galois Field. 375 * N : The chunk count of per page. 376 * M : the metasize of per page. 377 * C : the ecc chunk size, aka the "data" above. 378 * P': the nand chip's page size. 379 * O : the nand chip's oob size. 380 * O': the free oob. 381 * 382 * The formula for P is : 383 * 384 * E * G * N 385 * P = ------------ + P' + M 386 * 8 387 * 388 * The position of block mark moves forward in the ECC-based view 389 * of page, and the delta is: 390 * 391 * E * G * (N - 1) 392 * D = (---------------- + M) 393 * 8 394 * 395 * Please see the comment in legacy_set_geometry(). 396 * With the condition C >= O , we still can get same result. 397 * So the bit position of the physical block mark within the ECC-based 398 * view of the page is : 399 * (P' - D) * 8 400 */ 401 geo->page_size = mtd->writesize + geo->metadata_size + 402 (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8; 403 404 geo->payload_size = mtd->writesize; 405 406 geo->auxiliary_status_offset = ALIGN(geo->metadata_size, 4); 407 geo->auxiliary_size = ALIGN(geo->metadata_size, 4) 408 + ALIGN(geo->ecc_chunk_count, 4); 409 410 if (!this->swap_block_mark) 411 return 0; 412 413 /* For bit swap. */ 414 block_mark_bit_offset = mtd->writesize * 8 - 415 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1) 416 + geo->metadata_size * 8); 417 418 geo->block_mark_byte_offset = block_mark_bit_offset / 8; 419 geo->block_mark_bit_offset = block_mark_bit_offset % 8; 420 return 0; 421 } 422 423 /* 424 * Calculate the ECC strength by hand: 425 * E : The ECC strength. 426 * G : the length of Galois Field. 427 * N : The chunk count of per page. 428 * O : the oobsize of the NAND chip. 429 * M : the metasize of per page. 430 * 431 * The formula is : 432 * E * G * N 433 * ------------ <= (O - M) 434 * 8 435 * 436 * So, we get E by: 437 * (O - M) * 8 438 * E <= ------------- 439 * G * N 440 */ 441 static inline int get_ecc_strength(struct gpmi_nand_data *this) 442 { 443 struct bch_geometry *geo = &this->bch_geometry; 444 struct mtd_info *mtd = nand_to_mtd(&this->nand); 445 int ecc_strength; 446 447 ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8) 448 / (geo->gf_len * geo->ecc_chunk_count); 449 450 /* We need the minor even number. */ 451 return round_down(ecc_strength, 2); 452 } 453 454 static int set_geometry_for_large_oob(struct gpmi_nand_data *this) 455 { 456 struct bch_geometry *geo = &this->bch_geometry; 457 struct nand_chip *chip = &this->nand; 458 struct mtd_info *mtd = nand_to_mtd(chip); 459 const struct nand_ecc_props *requirements = 460 nanddev_get_ecc_requirements(&chip->base); 461 unsigned int block_mark_bit_offset; 462 unsigned int max_ecc; 463 unsigned int bbm_chunk; 464 unsigned int i; 465 466 /* sanity check for the minimum ecc nand required */ 467 if (!(requirements->strength > 0 && 468 requirements->step_size > 0)) 469 return -EINVAL; 470 geo->ecc_strength = requirements->strength; 471 472 /* check if platform can support this nand */ 473 if (!gpmi_check_ecc(this)) { 474 dev_err(this->dev, 475 "unsupported NAND chip, minimum ecc required %d\n", 476 geo->ecc_strength); 477 return -EINVAL; 478 } 479 480 /* calculate the maximum ecc platform can support*/ 481 geo->metadata_size = 10; 482 geo->gf_len = 14; 483 geo->ecc0_chunk_size = 1024; 484 geo->eccn_chunk_size = 1024; 485 geo->ecc_chunk_count = mtd->writesize / geo->eccn_chunk_size; 486 max_ecc = min(get_ecc_strength(this), 487 this->devdata->bch_max_ecc_strength); 488 489 /* 490 * search a supported ecc strength that makes bbm 491 * located in data chunk 492 */ 493 geo->ecc_strength = max_ecc; 494 while (!(geo->ecc_strength < requirements->strength)) { 495 if (bbm_in_data_chunk(this, &bbm_chunk)) 496 goto geo_setting; 497 geo->ecc_strength -= 2; 498 } 499 500 /* if none of them works, keep using the minimum ecc */ 501 /* nand required but changing ecc page layout */ 502 geo->ecc_strength = requirements->strength; 503 /* add extra ecc for meta data */ 504 geo->ecc0_chunk_size = 0; 505 geo->ecc_chunk_count = (mtd->writesize / geo->eccn_chunk_size) + 1; 506 geo->ecc_for_meta = 1; 507 /* check if oob can afford this extra ecc chunk */ 508 if (mtd->oobsize * 8 < geo->metadata_size * 8 + 509 geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) { 510 dev_err(this->dev, "unsupported NAND chip with new layout\n"); 511 return -EINVAL; 512 } 513 514 /* calculate in which chunk bbm located */ 515 bbm_chunk = (mtd->writesize * 8 - geo->metadata_size * 8 - 516 geo->gf_len * geo->ecc_strength) / 517 (geo->gf_len * geo->ecc_strength + 518 geo->eccn_chunk_size * 8) + 1; 519 520 geo_setting: 521 522 geo->page_size = mtd->writesize + geo->metadata_size + 523 (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8; 524 geo->payload_size = mtd->writesize; 525 526 /* 527 * The auxiliary buffer contains the metadata and the ECC status. The 528 * metadata is padded to the nearest 32-bit boundary. The ECC status 529 * contains one byte for every ECC chunk, and is also padded to the 530 * nearest 32-bit boundary. 531 */ 532 geo->auxiliary_status_offset = ALIGN(geo->metadata_size, 4); 533 geo->auxiliary_size = ALIGN(geo->metadata_size, 4) 534 + ALIGN(geo->ecc_chunk_count, 4); 535 536 if (!this->swap_block_mark) 537 return 0; 538 539 /* calculate the number of ecc chunk behind the bbm */ 540 i = (mtd->writesize / geo->eccn_chunk_size) - bbm_chunk + 1; 541 542 block_mark_bit_offset = mtd->writesize * 8 - 543 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - i) 544 + geo->metadata_size * 8); 545 546 geo->block_mark_byte_offset = block_mark_bit_offset / 8; 547 geo->block_mark_bit_offset = block_mark_bit_offset % 8; 548 549 dev_dbg(this->dev, "BCH Geometry :\n" 550 "GF length : %u\n" 551 "ECC Strength : %u\n" 552 "Page Size in Bytes : %u\n" 553 "Metadata Size in Bytes : %u\n" 554 "ECC0 Chunk Size in Bytes: %u\n" 555 "ECCn Chunk Size in Bytes: %u\n" 556 "ECC Chunk Count : %u\n" 557 "Payload Size in Bytes : %u\n" 558 "Auxiliary Size in Bytes: %u\n" 559 "Auxiliary Status Offset: %u\n" 560 "Block Mark Byte Offset : %u\n" 561 "Block Mark Bit Offset : %u\n" 562 "Block Mark in chunk : %u\n" 563 "Ecc for Meta data : %u\n", 564 geo->gf_len, 565 geo->ecc_strength, 566 geo->page_size, 567 geo->metadata_size, 568 geo->ecc0_chunk_size, 569 geo->eccn_chunk_size, 570 geo->ecc_chunk_count, 571 geo->payload_size, 572 geo->auxiliary_size, 573 geo->auxiliary_status_offset, 574 geo->block_mark_byte_offset, 575 geo->block_mark_bit_offset, 576 bbm_chunk, 577 geo->ecc_for_meta); 578 579 return 0; 580 } 581 582 static int legacy_set_geometry(struct gpmi_nand_data *this) 583 { 584 struct bch_geometry *geo = &this->bch_geometry; 585 struct mtd_info *mtd = nand_to_mtd(&this->nand); 586 unsigned int metadata_size; 587 unsigned int status_size; 588 unsigned int block_mark_bit_offset; 589 590 /* 591 * The size of the metadata can be changed, though we set it to 10 592 * bytes now. But it can't be too large, because we have to save 593 * enough space for BCH. 594 */ 595 geo->metadata_size = 10; 596 597 /* The default for the length of Galois Field. */ 598 geo->gf_len = 13; 599 600 /* The default for chunk size. */ 601 geo->ecc0_chunk_size = 512; 602 geo->eccn_chunk_size = 512; 603 while (geo->eccn_chunk_size < mtd->oobsize) { 604 geo->ecc0_chunk_size *= 2; /* keep C >= O */ 605 geo->eccn_chunk_size *= 2; /* keep C >= O */ 606 geo->gf_len = 14; 607 } 608 609 geo->ecc_chunk_count = mtd->writesize / geo->eccn_chunk_size; 610 611 /* We use the same ECC strength for all chunks. */ 612 geo->ecc_strength = get_ecc_strength(this); 613 if (!gpmi_check_ecc(this)) { 614 dev_err(this->dev, 615 "ecc strength: %d cannot be supported by the controller (%d)\n" 616 "try to use minimum ecc strength that NAND chip required\n", 617 geo->ecc_strength, 618 this->devdata->bch_max_ecc_strength); 619 return -EINVAL; 620 } 621 622 geo->page_size = mtd->writesize + geo->metadata_size + 623 (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8; 624 geo->payload_size = mtd->writesize; 625 626 /* 627 * The auxiliary buffer contains the metadata and the ECC status. The 628 * metadata is padded to the nearest 32-bit boundary. The ECC status 629 * contains one byte for every ECC chunk, and is also padded to the 630 * nearest 32-bit boundary. 631 */ 632 metadata_size = ALIGN(geo->metadata_size, 4); 633 status_size = ALIGN(geo->ecc_chunk_count, 4); 634 635 geo->auxiliary_size = metadata_size + status_size; 636 geo->auxiliary_status_offset = metadata_size; 637 638 if (!this->swap_block_mark) 639 return 0; 640 641 /* 642 * We need to compute the byte and bit offsets of 643 * the physical block mark within the ECC-based view of the page. 644 * 645 * NAND chip with 2K page shows below: 646 * (Block Mark) 647 * | | 648 * | D | 649 * |<---->| 650 * V V 651 * +---+----------+-+----------+-+----------+-+----------+-+ 652 * | M | data |E| data |E| data |E| data |E| 653 * +---+----------+-+----------+-+----------+-+----------+-+ 654 * 655 * The position of block mark moves forward in the ECC-based view 656 * of page, and the delta is: 657 * 658 * E * G * (N - 1) 659 * D = (---------------- + M) 660 * 8 661 * 662 * With the formula to compute the ECC strength, and the condition 663 * : C >= O (C is the ecc chunk size) 664 * 665 * It's easy to deduce to the following result: 666 * 667 * E * G (O - M) C - M C - M 668 * ----------- <= ------- <= -------- < --------- 669 * 8 N N (N - 1) 670 * 671 * So, we get: 672 * 673 * E * G * (N - 1) 674 * D = (---------------- + M) < C 675 * 8 676 * 677 * The above inequality means the position of block mark 678 * within the ECC-based view of the page is still in the data chunk, 679 * and it's NOT in the ECC bits of the chunk. 680 * 681 * Use the following to compute the bit position of the 682 * physical block mark within the ECC-based view of the page: 683 * (page_size - D) * 8 684 * 685 * --Huang Shijie 686 */ 687 block_mark_bit_offset = mtd->writesize * 8 - 688 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1) 689 + geo->metadata_size * 8); 690 691 geo->block_mark_byte_offset = block_mark_bit_offset / 8; 692 geo->block_mark_bit_offset = block_mark_bit_offset % 8; 693 return 0; 694 } 695 696 static int common_nfc_set_geometry(struct gpmi_nand_data *this) 697 { 698 struct nand_chip *chip = &this->nand; 699 struct mtd_info *mtd = nand_to_mtd(&this->nand); 700 const struct nand_ecc_props *requirements = 701 nanddev_get_ecc_requirements(&chip->base); 702 bool use_minimun_ecc; 703 int err; 704 705 use_minimun_ecc = of_property_read_bool(this->dev->of_node, 706 "fsl,use-minimum-ecc"); 707 708 /* use legacy bch geometry settings by default*/ 709 if ((!use_minimun_ecc && mtd->oobsize < 1024) || 710 !(requirements->strength > 0 && requirements->step_size > 0)) { 711 dev_dbg(this->dev, "use legacy bch geometry\n"); 712 err = legacy_set_geometry(this); 713 if (!err) 714 return 0; 715 } 716 717 /* for large oob nand */ 718 if (mtd->oobsize > 1024) { 719 dev_dbg(this->dev, "use large oob bch geometry\n"); 720 err = set_geometry_for_large_oob(this); 721 if (!err) 722 return 0; 723 } 724 725 /* otherwise use the minimum ecc nand chip required */ 726 dev_dbg(this->dev, "use minimum ecc bch geometry\n"); 727 err = set_geometry_by_ecc_info(this, requirements->strength, 728 requirements->step_size); 729 if (err) 730 dev_err(this->dev, "none of the bch geometry setting works\n"); 731 732 return err; 733 } 734 735 /* Configures the geometry for BCH. */ 736 static int bch_set_geometry(struct gpmi_nand_data *this) 737 { 738 struct resources *r = &this->resources; 739 int ret; 740 741 ret = common_nfc_set_geometry(this); 742 if (ret) 743 return ret; 744 745 ret = pm_runtime_resume_and_get(this->dev); 746 if (ret < 0) { 747 return ret; 748 } 749 750 /* 751 * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this 752 * chip, otherwise it will lock up. So we skip resetting BCH on the MX23. 753 * and MX28. 754 */ 755 ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this)); 756 if (ret) 757 goto err_out; 758 759 /* Set *all* chip selects to use layout 0. */ 760 writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT); 761 762 ret = 0; 763 err_out: 764 pm_runtime_mark_last_busy(this->dev); 765 pm_runtime_put_autosuspend(this->dev); 766 767 return ret; 768 } 769 770 /* 771 * <1> Firstly, we should know what's the GPMI-clock means. 772 * The GPMI-clock is the internal clock in the gpmi nand controller. 773 * If you set 100MHz to gpmi nand controller, the GPMI-clock's period 774 * is 10ns. Mark the GPMI-clock's period as GPMI-clock-period. 775 * 776 * <2> Secondly, we should know what's the frequency on the nand chip pins. 777 * The frequency on the nand chip pins is derived from the GPMI-clock. 778 * We can get it from the following equation: 779 * 780 * F = G / (DS + DH) 781 * 782 * F : the frequency on the nand chip pins. 783 * G : the GPMI clock, such as 100MHz. 784 * DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP 785 * DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD 786 * 787 * <3> Thirdly, when the frequency on the nand chip pins is above 33MHz, 788 * the nand EDO(extended Data Out) timing could be applied. 789 * The GPMI implements a feedback read strobe to sample the read data. 790 * The feedback read strobe can be delayed to support the nand EDO timing 791 * where the read strobe may deasserts before the read data is valid, and 792 * read data is valid for some time after read strobe. 793 * 794 * The following figure illustrates some aspects of a NAND Flash read: 795 * 796 * |<---tREA---->| 797 * | | 798 * | | | 799 * |<--tRP-->| | 800 * | | | 801 * __ ___|__________________________________ 802 * RDN \________/ | 803 * | 804 * /---------\ 805 * Read Data --------------< >--------- 806 * \---------/ 807 * | | 808 * |<-D->| 809 * FeedbackRDN ________ ____________ 810 * \___________/ 811 * 812 * D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY. 813 * 814 * 815 * <4> Now, we begin to describe how to compute the right RDN_DELAY. 816 * 817 * 4.1) From the aspect of the nand chip pins: 818 * Delay = (tREA + C - tRP) {1} 819 * 820 * tREA : the maximum read access time. 821 * C : a constant to adjust the delay. default is 4000ps. 822 * tRP : the read pulse width, which is exactly: 823 * tRP = (GPMI-clock-period) * DATA_SETUP 824 * 825 * 4.2) From the aspect of the GPMI nand controller: 826 * Delay = RDN_DELAY * 0.125 * RP {2} 827 * 828 * RP : the DLL reference period. 829 * if (GPMI-clock-period > DLL_THRETHOLD) 830 * RP = GPMI-clock-period / 2; 831 * else 832 * RP = GPMI-clock-period; 833 * 834 * Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period 835 * is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD 836 * is 16000ps, but in mx6q, we use 12000ps. 837 * 838 * 4.3) since {1} equals {2}, we get: 839 * 840 * (tREA + 4000 - tRP) * 8 841 * RDN_DELAY = ----------------------- {3} 842 * RP 843 */ 844 static int gpmi_nfc_compute_timings(struct gpmi_nand_data *this, 845 const struct nand_sdr_timings *sdr) 846 { 847 struct gpmi_nfc_hardware_timing *hw = &this->hw; 848 struct resources *r = &this->resources; 849 unsigned int dll_threshold_ps = this->devdata->max_chain_delay; 850 unsigned int period_ps, reference_period_ps; 851 unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles; 852 unsigned int tRP_ps; 853 bool use_half_period; 854 int sample_delay_ps, sample_delay_factor; 855 unsigned int busy_timeout_cycles; 856 u8 wrn_dly_sel; 857 unsigned long clk_rate, min_rate; 858 u64 busy_timeout_ps; 859 860 if (sdr->tRC_min >= 30000) { 861 /* ONFI non-EDO modes [0-3] */ 862 hw->clk_rate = 22000000; 863 min_rate = 0; 864 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS; 865 } else if (sdr->tRC_min >= 25000) { 866 /* ONFI EDO mode 4 */ 867 hw->clk_rate = 80000000; 868 min_rate = 22000000; 869 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY; 870 } else { 871 /* ONFI EDO mode 5 */ 872 hw->clk_rate = 100000000; 873 min_rate = 80000000; 874 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY; 875 } 876 877 clk_rate = clk_round_rate(r->clock[0], hw->clk_rate); 878 if (clk_rate <= min_rate) { 879 dev_err(this->dev, "clock setting: expected %ld, got %ld\n", 880 hw->clk_rate, clk_rate); 881 return -ENOTSUPP; 882 } 883 884 hw->clk_rate = clk_rate; 885 /* SDR core timings are given in picoseconds */ 886 period_ps = div_u64((u64)NSEC_PER_SEC * 1000, hw->clk_rate); 887 888 addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps); 889 data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps); 890 data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps); 891 busy_timeout_ps = max(sdr->tBERS_max, sdr->tPROG_max); 892 busy_timeout_cycles = TO_CYCLES(busy_timeout_ps, period_ps); 893 894 hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) | 895 BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) | 896 BF_GPMI_TIMING0_DATA_SETUP(data_setup_cycles); 897 hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(DIV_ROUND_UP(busy_timeout_cycles, 4096)); 898 899 /* 900 * Derive NFC ideal delay from {3}: 901 * 902 * (tREA + 4000 - tRP) * 8 903 * RDN_DELAY = ----------------------- 904 * RP 905 */ 906 if (period_ps > dll_threshold_ps) { 907 use_half_period = true; 908 reference_period_ps = period_ps / 2; 909 } else { 910 use_half_period = false; 911 reference_period_ps = period_ps; 912 } 913 914 tRP_ps = data_setup_cycles * period_ps; 915 sample_delay_ps = (sdr->tREA_max + 4000 - tRP_ps) * 8; 916 if (sample_delay_ps > 0) 917 sample_delay_factor = sample_delay_ps / reference_period_ps; 918 else 919 sample_delay_factor = 0; 920 921 hw->ctrl1n = BF_GPMI_CTRL1_WRN_DLY_SEL(wrn_dly_sel); 922 if (sample_delay_factor) 923 hw->ctrl1n |= BF_GPMI_CTRL1_RDN_DELAY(sample_delay_factor) | 924 BM_GPMI_CTRL1_DLL_ENABLE | 925 (use_half_period ? BM_GPMI_CTRL1_HALF_PERIOD : 0); 926 return 0; 927 } 928 929 static int gpmi_nfc_apply_timings(struct gpmi_nand_data *this) 930 { 931 struct gpmi_nfc_hardware_timing *hw = &this->hw; 932 struct resources *r = &this->resources; 933 void __iomem *gpmi_regs = r->gpmi_regs; 934 unsigned int dll_wait_time_us; 935 int ret; 936 937 /* Clock dividers do NOT guarantee a clean clock signal on its output 938 * during the change of the divide factor on i.MX6Q/UL/SX. On i.MX7/8, 939 * all clock dividers provide these guarantee. 940 */ 941 if (GPMI_IS_MX6Q(this) || GPMI_IS_MX6SX(this)) 942 clk_disable_unprepare(r->clock[0]); 943 944 ret = clk_set_rate(r->clock[0], hw->clk_rate); 945 if (ret) { 946 dev_err(this->dev, "cannot set clock rate to %lu Hz: %d\n", hw->clk_rate, ret); 947 return ret; 948 } 949 950 if (GPMI_IS_MX6Q(this) || GPMI_IS_MX6SX(this)) { 951 ret = clk_prepare_enable(r->clock[0]); 952 if (ret) 953 return ret; 954 } 955 956 writel(hw->timing0, gpmi_regs + HW_GPMI_TIMING0); 957 writel(hw->timing1, gpmi_regs + HW_GPMI_TIMING1); 958 959 /* 960 * Clear several CTRL1 fields, DLL must be disabled when setting 961 * RDN_DELAY or HALF_PERIOD. 962 */ 963 writel(BM_GPMI_CTRL1_CLEAR_MASK, gpmi_regs + HW_GPMI_CTRL1_CLR); 964 writel(hw->ctrl1n, gpmi_regs + HW_GPMI_CTRL1_SET); 965 966 /* Wait 64 clock cycles before using the GPMI after enabling the DLL */ 967 dll_wait_time_us = USEC_PER_SEC / hw->clk_rate * 64; 968 if (!dll_wait_time_us) 969 dll_wait_time_us = 1; 970 971 /* Wait for the DLL to settle. */ 972 udelay(dll_wait_time_us); 973 974 return 0; 975 } 976 977 static int gpmi_setup_interface(struct nand_chip *chip, int chipnr, 978 const struct nand_interface_config *conf) 979 { 980 struct gpmi_nand_data *this = nand_get_controller_data(chip); 981 const struct nand_sdr_timings *sdr; 982 int ret; 983 984 /* Retrieve required NAND timings */ 985 sdr = nand_get_sdr_timings(conf); 986 if (IS_ERR(sdr)) 987 return PTR_ERR(sdr); 988 989 /* Only MX28/MX6 GPMI controller can reach EDO timings */ 990 if (sdr->tRC_min <= 25000 && !this->devdata->support_edo_timing) 991 return -ENOTSUPP; 992 993 /* Stop here if this call was just a check */ 994 if (chipnr < 0) 995 return 0; 996 997 /* Do the actual derivation of the controller timings */ 998 ret = gpmi_nfc_compute_timings(this, sdr); 999 if (ret) 1000 return ret; 1001 1002 this->hw.must_apply_timings = true; 1003 1004 return 0; 1005 } 1006 1007 /* Clears a BCH interrupt. */ 1008 static void gpmi_clear_bch(struct gpmi_nand_data *this) 1009 { 1010 struct resources *r = &this->resources; 1011 writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR); 1012 } 1013 1014 static struct dma_chan *get_dma_chan(struct gpmi_nand_data *this) 1015 { 1016 /* We use the DMA channel 0 to access all the nand chips. */ 1017 return this->dma_chans[0]; 1018 } 1019 1020 /* This will be called after the DMA operation is finished. */ 1021 static void dma_irq_callback(void *param) 1022 { 1023 struct gpmi_nand_data *this = param; 1024 struct completion *dma_c = &this->dma_done; 1025 1026 complete(dma_c); 1027 } 1028 1029 static irqreturn_t bch_irq(int irq, void *cookie) 1030 { 1031 struct gpmi_nand_data *this = cookie; 1032 1033 gpmi_clear_bch(this); 1034 complete(&this->bch_done); 1035 return IRQ_HANDLED; 1036 } 1037 1038 static int gpmi_raw_len_to_len(struct gpmi_nand_data *this, int raw_len) 1039 { 1040 /* 1041 * raw_len is the length to read/write including bch data which 1042 * we are passed in exec_op. Calculate the data length from it. 1043 */ 1044 if (this->bch) 1045 return ALIGN_DOWN(raw_len, this->bch_geometry.eccn_chunk_size); 1046 else 1047 return raw_len; 1048 } 1049 1050 /* Can we use the upper's buffer directly for DMA? */ 1051 static bool prepare_data_dma(struct gpmi_nand_data *this, const void *buf, 1052 int raw_len, struct scatterlist *sgl, 1053 enum dma_data_direction dr) 1054 { 1055 int ret; 1056 int len = gpmi_raw_len_to_len(this, raw_len); 1057 1058 /* first try to map the upper buffer directly */ 1059 if (virt_addr_valid(buf) && !object_is_on_stack(buf)) { 1060 sg_init_one(sgl, buf, len); 1061 ret = dma_map_sg(this->dev, sgl, 1, dr); 1062 if (ret == 0) 1063 goto map_fail; 1064 1065 return true; 1066 } 1067 1068 map_fail: 1069 /* We have to use our own DMA buffer. */ 1070 sg_init_one(sgl, this->data_buffer_dma, len); 1071 1072 if (dr == DMA_TO_DEVICE && buf != this->data_buffer_dma) 1073 memcpy(this->data_buffer_dma, buf, len); 1074 1075 dma_map_sg(this->dev, sgl, 1, dr); 1076 1077 return false; 1078 } 1079 1080 /* add our owner bbt descriptor */ 1081 static uint8_t scan_ff_pattern[] = { 0xff }; 1082 static struct nand_bbt_descr gpmi_bbt_descr = { 1083 .options = 0, 1084 .offs = 0, 1085 .len = 1, 1086 .pattern = scan_ff_pattern 1087 }; 1088 1089 /* 1090 * We may change the layout if we can get the ECC info from the datasheet, 1091 * else we will use all the (page + OOB). 1092 */ 1093 static int gpmi_ooblayout_ecc(struct mtd_info *mtd, int section, 1094 struct mtd_oob_region *oobregion) 1095 { 1096 struct nand_chip *chip = mtd_to_nand(mtd); 1097 struct gpmi_nand_data *this = nand_get_controller_data(chip); 1098 struct bch_geometry *geo = &this->bch_geometry; 1099 1100 if (section) 1101 return -ERANGE; 1102 1103 oobregion->offset = 0; 1104 oobregion->length = geo->page_size - mtd->writesize; 1105 1106 return 0; 1107 } 1108 1109 static int gpmi_ooblayout_free(struct mtd_info *mtd, int section, 1110 struct mtd_oob_region *oobregion) 1111 { 1112 struct nand_chip *chip = mtd_to_nand(mtd); 1113 struct gpmi_nand_data *this = nand_get_controller_data(chip); 1114 struct bch_geometry *geo = &this->bch_geometry; 1115 1116 if (section) 1117 return -ERANGE; 1118 1119 /* The available oob size we have. */ 1120 if (geo->page_size < mtd->writesize + mtd->oobsize) { 1121 oobregion->offset = geo->page_size - mtd->writesize; 1122 oobregion->length = mtd->oobsize - oobregion->offset; 1123 } 1124 1125 return 0; 1126 } 1127 1128 static const char * const gpmi_clks_for_mx2x[] = { 1129 "gpmi_io", 1130 }; 1131 1132 static const struct mtd_ooblayout_ops gpmi_ooblayout_ops = { 1133 .ecc = gpmi_ooblayout_ecc, 1134 .free = gpmi_ooblayout_free, 1135 }; 1136 1137 static const struct gpmi_devdata gpmi_devdata_imx23 = { 1138 .type = IS_MX23, 1139 .bch_max_ecc_strength = 20, 1140 .max_chain_delay = 16000, 1141 .clks = gpmi_clks_for_mx2x, 1142 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x), 1143 }; 1144 1145 static const struct gpmi_devdata gpmi_devdata_imx28 = { 1146 .type = IS_MX28, 1147 .bch_max_ecc_strength = 20, 1148 .max_chain_delay = 16000, 1149 .support_edo_timing = true, 1150 .clks = gpmi_clks_for_mx2x, 1151 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x), 1152 }; 1153 1154 static const char * const gpmi_clks_for_mx6[] = { 1155 "gpmi_io", "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch", 1156 }; 1157 1158 static const struct gpmi_devdata gpmi_devdata_imx6q = { 1159 .type = IS_MX6Q, 1160 .bch_max_ecc_strength = 40, 1161 .max_chain_delay = 12000, 1162 .support_edo_timing = true, 1163 .clks = gpmi_clks_for_mx6, 1164 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6), 1165 }; 1166 1167 static const struct gpmi_devdata gpmi_devdata_imx6sx = { 1168 .type = IS_MX6SX, 1169 .bch_max_ecc_strength = 62, 1170 .max_chain_delay = 12000, 1171 .support_edo_timing = true, 1172 .clks = gpmi_clks_for_mx6, 1173 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6), 1174 }; 1175 1176 static const char * const gpmi_clks_for_mx7d[] = { 1177 "gpmi_io", "gpmi_bch_apb", 1178 }; 1179 1180 static const struct gpmi_devdata gpmi_devdata_imx7d = { 1181 .type = IS_MX7D, 1182 .bch_max_ecc_strength = 62, 1183 .max_chain_delay = 12000, 1184 .support_edo_timing = true, 1185 .clks = gpmi_clks_for_mx7d, 1186 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx7d), 1187 }; 1188 1189 static const char *gpmi_clks_for_mx8qxp[GPMI_CLK_MAX] = { 1190 "gpmi_io", "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", 1191 }; 1192 1193 static const struct gpmi_devdata gpmi_devdata_imx8qxp = { 1194 .type = IS_MX8QXP, 1195 .bch_max_ecc_strength = 62, 1196 .max_chain_delay = 12000, 1197 .support_edo_timing = true, 1198 .clks = gpmi_clks_for_mx8qxp, 1199 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx8qxp), 1200 }; 1201 1202 static int acquire_register_block(struct gpmi_nand_data *this, 1203 const char *res_name) 1204 { 1205 struct platform_device *pdev = this->pdev; 1206 struct resources *res = &this->resources; 1207 void __iomem *p; 1208 1209 p = devm_platform_ioremap_resource_byname(pdev, res_name); 1210 if (IS_ERR(p)) 1211 return PTR_ERR(p); 1212 1213 if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME)) 1214 res->gpmi_regs = p; 1215 else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME)) 1216 res->bch_regs = p; 1217 else 1218 dev_err(this->dev, "unknown resource name : %s\n", res_name); 1219 1220 return 0; 1221 } 1222 1223 static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h) 1224 { 1225 struct platform_device *pdev = this->pdev; 1226 const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME; 1227 int err; 1228 1229 err = platform_get_irq_byname(pdev, res_name); 1230 if (err < 0) 1231 return err; 1232 1233 err = devm_request_irq(this->dev, err, irq_h, 0, res_name, this); 1234 if (err) 1235 dev_err(this->dev, "error requesting BCH IRQ\n"); 1236 1237 return err; 1238 } 1239 1240 static void release_dma_channels(struct gpmi_nand_data *this) 1241 { 1242 unsigned int i; 1243 for (i = 0; i < DMA_CHANS; i++) 1244 if (this->dma_chans[i]) { 1245 dma_release_channel(this->dma_chans[i]); 1246 this->dma_chans[i] = NULL; 1247 } 1248 } 1249 1250 static int acquire_dma_channels(struct gpmi_nand_data *this) 1251 { 1252 struct platform_device *pdev = this->pdev; 1253 struct dma_chan *dma_chan; 1254 int ret = 0; 1255 1256 /* request dma channel */ 1257 dma_chan = dma_request_chan(&pdev->dev, "rx-tx"); 1258 if (IS_ERR(dma_chan)) { 1259 ret = dev_err_probe(this->dev, PTR_ERR(dma_chan), 1260 "DMA channel request failed\n"); 1261 release_dma_channels(this); 1262 } else { 1263 this->dma_chans[0] = dma_chan; 1264 } 1265 1266 return ret; 1267 } 1268 1269 static int gpmi_get_clks(struct gpmi_nand_data *this) 1270 { 1271 struct resources *r = &this->resources; 1272 struct clk *clk; 1273 int err, i; 1274 1275 for (i = 0; i < this->devdata->clks_count; i++) { 1276 clk = devm_clk_get(this->dev, this->devdata->clks[i]); 1277 if (IS_ERR(clk)) { 1278 err = PTR_ERR(clk); 1279 goto err_clock; 1280 } 1281 1282 r->clock[i] = clk; 1283 } 1284 1285 return 0; 1286 1287 err_clock: 1288 dev_dbg(this->dev, "failed in finding the clocks.\n"); 1289 return err; 1290 } 1291 1292 static int acquire_resources(struct gpmi_nand_data *this) 1293 { 1294 int ret; 1295 1296 ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME); 1297 if (ret) 1298 goto exit_regs; 1299 1300 ret = acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME); 1301 if (ret) 1302 goto exit_regs; 1303 1304 ret = acquire_bch_irq(this, bch_irq); 1305 if (ret) 1306 goto exit_regs; 1307 1308 ret = acquire_dma_channels(this); 1309 if (ret) 1310 goto exit_regs; 1311 1312 ret = gpmi_get_clks(this); 1313 if (ret) 1314 goto exit_clock; 1315 return 0; 1316 1317 exit_clock: 1318 release_dma_channels(this); 1319 exit_regs: 1320 return ret; 1321 } 1322 1323 static void release_resources(struct gpmi_nand_data *this) 1324 { 1325 release_dma_channels(this); 1326 } 1327 1328 static void gpmi_free_dma_buffer(struct gpmi_nand_data *this) 1329 { 1330 struct device *dev = this->dev; 1331 struct bch_geometry *geo = &this->bch_geometry; 1332 1333 if (this->auxiliary_virt && virt_addr_valid(this->auxiliary_virt)) 1334 dma_free_coherent(dev, geo->auxiliary_size, 1335 this->auxiliary_virt, 1336 this->auxiliary_phys); 1337 kfree(this->data_buffer_dma); 1338 kfree(this->raw_buffer); 1339 1340 this->data_buffer_dma = NULL; 1341 this->raw_buffer = NULL; 1342 } 1343 1344 /* Allocate the DMA buffers */ 1345 static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this) 1346 { 1347 struct bch_geometry *geo = &this->bch_geometry; 1348 struct device *dev = this->dev; 1349 struct mtd_info *mtd = nand_to_mtd(&this->nand); 1350 1351 /* 1352 * [2] Allocate a read/write data buffer. 1353 * The gpmi_alloc_dma_buffer can be called twice. 1354 * We allocate a PAGE_SIZE length buffer if gpmi_alloc_dma_buffer 1355 * is called before the NAND identification; and we allocate a 1356 * buffer of the real NAND page size when the gpmi_alloc_dma_buffer 1357 * is called after. 1358 */ 1359 this->data_buffer_dma = kzalloc(mtd->writesize ?: PAGE_SIZE, 1360 GFP_DMA | GFP_KERNEL); 1361 if (this->data_buffer_dma == NULL) 1362 goto error_alloc; 1363 1364 this->auxiliary_virt = dma_alloc_coherent(dev, geo->auxiliary_size, 1365 &this->auxiliary_phys, GFP_DMA); 1366 if (!this->auxiliary_virt) 1367 goto error_alloc; 1368 1369 this->raw_buffer = kzalloc((mtd->writesize ?: PAGE_SIZE) + mtd->oobsize, GFP_KERNEL); 1370 if (!this->raw_buffer) 1371 goto error_alloc; 1372 1373 return 0; 1374 1375 error_alloc: 1376 gpmi_free_dma_buffer(this); 1377 return -ENOMEM; 1378 } 1379 1380 /* 1381 * Handles block mark swapping. 1382 * It can be called in swapping the block mark, or swapping it back, 1383 * because the operations are the same. 1384 */ 1385 static void block_mark_swapping(struct gpmi_nand_data *this, 1386 void *payload, void *auxiliary) 1387 { 1388 struct bch_geometry *nfc_geo = &this->bch_geometry; 1389 unsigned char *p; 1390 unsigned char *a; 1391 unsigned int bit; 1392 unsigned char mask; 1393 unsigned char from_data; 1394 unsigned char from_oob; 1395 1396 if (!this->swap_block_mark) 1397 return; 1398 1399 /* 1400 * If control arrives here, we're swapping. Make some convenience 1401 * variables. 1402 */ 1403 bit = nfc_geo->block_mark_bit_offset; 1404 p = payload + nfc_geo->block_mark_byte_offset; 1405 a = auxiliary; 1406 1407 /* 1408 * Get the byte from the data area that overlays the block mark. Since 1409 * the ECC engine applies its own view to the bits in the page, the 1410 * physical block mark won't (in general) appear on a byte boundary in 1411 * the data. 1412 */ 1413 from_data = (p[0] >> bit) | (p[1] << (8 - bit)); 1414 1415 /* Get the byte from the OOB. */ 1416 from_oob = a[0]; 1417 1418 /* Swap them. */ 1419 a[0] = from_data; 1420 1421 mask = (0x1 << bit) - 1; 1422 p[0] = (p[0] & mask) | (from_oob << bit); 1423 1424 mask = ~0 << bit; 1425 p[1] = (p[1] & mask) | (from_oob >> (8 - bit)); 1426 } 1427 1428 static int gpmi_count_bitflips(struct nand_chip *chip, void *buf, int first, 1429 int last, int meta) 1430 { 1431 struct gpmi_nand_data *this = nand_get_controller_data(chip); 1432 struct bch_geometry *nfc_geo = &this->bch_geometry; 1433 struct mtd_info *mtd = nand_to_mtd(chip); 1434 int i; 1435 unsigned char *status; 1436 unsigned int max_bitflips = 0; 1437 1438 /* Loop over status bytes, accumulating ECC status. */ 1439 status = this->auxiliary_virt + ALIGN(meta, 4); 1440 1441 for (i = first; i < last; i++, status++) { 1442 if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED)) 1443 continue; 1444 1445 if (*status == STATUS_UNCORRECTABLE) { 1446 int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len; 1447 u8 *eccbuf = this->raw_buffer; 1448 int offset, bitoffset; 1449 int eccbytes; 1450 int flips; 1451 1452 /* Read ECC bytes into our internal raw_buffer */ 1453 offset = nfc_geo->metadata_size * 8; 1454 offset += ((8 * nfc_geo->eccn_chunk_size) + eccbits) * (i + 1); 1455 offset -= eccbits; 1456 bitoffset = offset % 8; 1457 eccbytes = DIV_ROUND_UP(offset + eccbits, 8); 1458 offset /= 8; 1459 eccbytes -= offset; 1460 nand_change_read_column_op(chip, offset, eccbuf, 1461 eccbytes, false); 1462 1463 /* 1464 * ECC data are not byte aligned and we may have 1465 * in-band data in the first and last byte of 1466 * eccbuf. Set non-eccbits to one so that 1467 * nand_check_erased_ecc_chunk() does not count them 1468 * as bitflips. 1469 */ 1470 if (bitoffset) 1471 eccbuf[0] |= GENMASK(bitoffset - 1, 0); 1472 1473 bitoffset = (bitoffset + eccbits) % 8; 1474 if (bitoffset) 1475 eccbuf[eccbytes - 1] |= GENMASK(7, bitoffset); 1476 1477 /* 1478 * The ECC hardware has an uncorrectable ECC status 1479 * code in case we have bitflips in an erased page. As 1480 * nothing was written into this subpage the ECC is 1481 * obviously wrong and we can not trust it. We assume 1482 * at this point that we are reading an erased page and 1483 * try to correct the bitflips in buffer up to 1484 * ecc_strength bitflips. If this is a page with random 1485 * data, we exceed this number of bitflips and have a 1486 * ECC failure. Otherwise we use the corrected buffer. 1487 */ 1488 if (i == 0) { 1489 /* The first block includes metadata */ 1490 flips = nand_check_erased_ecc_chunk( 1491 buf + i * nfc_geo->eccn_chunk_size, 1492 nfc_geo->eccn_chunk_size, 1493 eccbuf, eccbytes, 1494 this->auxiliary_virt, 1495 nfc_geo->metadata_size, 1496 nfc_geo->ecc_strength); 1497 } else { 1498 flips = nand_check_erased_ecc_chunk( 1499 buf + i * nfc_geo->eccn_chunk_size, 1500 nfc_geo->eccn_chunk_size, 1501 eccbuf, eccbytes, 1502 NULL, 0, 1503 nfc_geo->ecc_strength); 1504 } 1505 1506 if (flips > 0) { 1507 max_bitflips = max_t(unsigned int, max_bitflips, 1508 flips); 1509 mtd->ecc_stats.corrected += flips; 1510 continue; 1511 } 1512 1513 mtd->ecc_stats.failed++; 1514 continue; 1515 } 1516 1517 mtd->ecc_stats.corrected += *status; 1518 max_bitflips = max_t(unsigned int, max_bitflips, *status); 1519 } 1520 1521 return max_bitflips; 1522 } 1523 1524 static void gpmi_bch_layout_std(struct gpmi_nand_data *this) 1525 { 1526 struct bch_geometry *geo = &this->bch_geometry; 1527 unsigned int ecc_strength = geo->ecc_strength >> 1; 1528 unsigned int gf_len = geo->gf_len; 1529 unsigned int block0_size = geo->ecc0_chunk_size; 1530 unsigned int blockn_size = geo->eccn_chunk_size; 1531 1532 this->bch_flashlayout0 = 1533 BF_BCH_FLASH0LAYOUT0_NBLOCKS(geo->ecc_chunk_count - 1) | 1534 BF_BCH_FLASH0LAYOUT0_META_SIZE(geo->metadata_size) | 1535 BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) | 1536 BF_BCH_FLASH0LAYOUT0_GF(gf_len, this) | 1537 BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block0_size, this); 1538 1539 this->bch_flashlayout1 = 1540 BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(geo->page_size) | 1541 BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) | 1542 BF_BCH_FLASH0LAYOUT1_GF(gf_len, this) | 1543 BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(blockn_size, this); 1544 } 1545 1546 static int gpmi_ecc_read_page(struct nand_chip *chip, uint8_t *buf, 1547 int oob_required, int page) 1548 { 1549 struct gpmi_nand_data *this = nand_get_controller_data(chip); 1550 struct mtd_info *mtd = nand_to_mtd(chip); 1551 struct bch_geometry *geo = &this->bch_geometry; 1552 unsigned int max_bitflips; 1553 int ret; 1554 1555 gpmi_bch_layout_std(this); 1556 this->bch = true; 1557 1558 ret = nand_read_page_op(chip, page, 0, buf, geo->page_size); 1559 if (ret) 1560 return ret; 1561 1562 max_bitflips = gpmi_count_bitflips(chip, buf, 0, 1563 geo->ecc_chunk_count, 1564 geo->auxiliary_status_offset); 1565 1566 /* handle the block mark swapping */ 1567 block_mark_swapping(this, buf, this->auxiliary_virt); 1568 1569 if (oob_required) { 1570 /* 1571 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob() 1572 * for details about our policy for delivering the OOB. 1573 * 1574 * We fill the caller's buffer with set bits, and then copy the 1575 * block mark to th caller's buffer. Note that, if block mark 1576 * swapping was necessary, it has already been done, so we can 1577 * rely on the first byte of the auxiliary buffer to contain 1578 * the block mark. 1579 */ 1580 memset(chip->oob_poi, ~0, mtd->oobsize); 1581 chip->oob_poi[0] = ((uint8_t *)this->auxiliary_virt)[0]; 1582 } 1583 1584 return max_bitflips; 1585 } 1586 1587 /* Fake a virtual small page for the subpage read */ 1588 static int gpmi_ecc_read_subpage(struct nand_chip *chip, uint32_t offs, 1589 uint32_t len, uint8_t *buf, int page) 1590 { 1591 struct gpmi_nand_data *this = nand_get_controller_data(chip); 1592 struct bch_geometry *geo = &this->bch_geometry; 1593 int size = chip->ecc.size; /* ECC chunk size */ 1594 int meta, n, page_size; 1595 unsigned int max_bitflips; 1596 unsigned int ecc_strength; 1597 int first, last, marker_pos; 1598 int ecc_parity_size; 1599 int col = 0; 1600 int ret; 1601 1602 /* The size of ECC parity */ 1603 ecc_parity_size = geo->gf_len * geo->ecc_strength / 8; 1604 1605 /* Align it with the chunk size */ 1606 first = offs / size; 1607 last = (offs + len - 1) / size; 1608 1609 if (this->swap_block_mark) { 1610 /* 1611 * Find the chunk which contains the Block Marker. 1612 * If this chunk is in the range of [first, last], 1613 * we have to read out the whole page. 1614 * Why? since we had swapped the data at the position of Block 1615 * Marker to the metadata which is bound with the chunk 0. 1616 */ 1617 marker_pos = geo->block_mark_byte_offset / size; 1618 if (last >= marker_pos && first <= marker_pos) { 1619 dev_dbg(this->dev, 1620 "page:%d, first:%d, last:%d, marker at:%d\n", 1621 page, first, last, marker_pos); 1622 return gpmi_ecc_read_page(chip, buf, 0, page); 1623 } 1624 } 1625 1626 /* 1627 * if there is an ECC dedicate for meta: 1628 * - need to add an extra ECC size when calculating col and page_size, 1629 * if the meta size is NOT zero. 1630 * - ecc0_chunk size need to set to the same size as other chunks, 1631 * if the meta size is zero. 1632 */ 1633 1634 meta = geo->metadata_size; 1635 if (first) { 1636 if (geo->ecc_for_meta) 1637 col = meta + ecc_parity_size 1638 + (size + ecc_parity_size) * first; 1639 else 1640 col = meta + (size + ecc_parity_size) * first; 1641 1642 meta = 0; 1643 buf = buf + first * size; 1644 } 1645 1646 ecc_parity_size = geo->gf_len * geo->ecc_strength / 8; 1647 n = last - first + 1; 1648 1649 if (geo->ecc_for_meta && meta) 1650 page_size = meta + ecc_parity_size 1651 + (size + ecc_parity_size) * n; 1652 else 1653 page_size = meta + (size + ecc_parity_size) * n; 1654 1655 ecc_strength = geo->ecc_strength >> 1; 1656 1657 this->bch_flashlayout0 = BF_BCH_FLASH0LAYOUT0_NBLOCKS( 1658 (geo->ecc_for_meta ? n : n - 1)) | 1659 BF_BCH_FLASH0LAYOUT0_META_SIZE(meta) | 1660 BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) | 1661 BF_BCH_FLASH0LAYOUT0_GF(geo->gf_len, this) | 1662 BF_BCH_FLASH0LAYOUT0_DATA0_SIZE((geo->ecc_for_meta ? 1663 0 : geo->ecc0_chunk_size), this); 1664 1665 this->bch_flashlayout1 = BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size) | 1666 BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) | 1667 BF_BCH_FLASH0LAYOUT1_GF(geo->gf_len, this) | 1668 BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(geo->eccn_chunk_size, this); 1669 1670 this->bch = true; 1671 1672 ret = nand_read_page_op(chip, page, col, buf, page_size); 1673 if (ret) 1674 return ret; 1675 1676 dev_dbg(this->dev, "page:%d(%d:%d)%d, chunk:(%d:%d), BCH PG size:%d\n", 1677 page, offs, len, col, first, n, page_size); 1678 1679 max_bitflips = gpmi_count_bitflips(chip, buf, first, last, meta); 1680 1681 return max_bitflips; 1682 } 1683 1684 static int gpmi_ecc_write_page(struct nand_chip *chip, const uint8_t *buf, 1685 int oob_required, int page) 1686 { 1687 struct mtd_info *mtd = nand_to_mtd(chip); 1688 struct gpmi_nand_data *this = nand_get_controller_data(chip); 1689 struct bch_geometry *nfc_geo = &this->bch_geometry; 1690 1691 dev_dbg(this->dev, "ecc write page.\n"); 1692 1693 gpmi_bch_layout_std(this); 1694 this->bch = true; 1695 1696 memcpy(this->auxiliary_virt, chip->oob_poi, nfc_geo->auxiliary_size); 1697 1698 if (this->swap_block_mark) { 1699 /* 1700 * When doing bad block marker swapping we must always copy the 1701 * input buffer as we can't modify the const buffer. 1702 */ 1703 memcpy(this->data_buffer_dma, buf, mtd->writesize); 1704 buf = this->data_buffer_dma; 1705 block_mark_swapping(this, this->data_buffer_dma, 1706 this->auxiliary_virt); 1707 } 1708 1709 return nand_prog_page_op(chip, page, 0, buf, nfc_geo->page_size); 1710 } 1711 1712 /* 1713 * There are several places in this driver where we have to handle the OOB and 1714 * block marks. This is the function where things are the most complicated, so 1715 * this is where we try to explain it all. All the other places refer back to 1716 * here. 1717 * 1718 * These are the rules, in order of decreasing importance: 1719 * 1720 * 1) Nothing the caller does can be allowed to imperil the block mark. 1721 * 1722 * 2) In read operations, the first byte of the OOB we return must reflect the 1723 * true state of the block mark, no matter where that block mark appears in 1724 * the physical page. 1725 * 1726 * 3) ECC-based read operations return an OOB full of set bits (since we never 1727 * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads 1728 * return). 1729 * 1730 * 4) "Raw" read operations return a direct view of the physical bytes in the 1731 * page, using the conventional definition of which bytes are data and which 1732 * are OOB. This gives the caller a way to see the actual, physical bytes 1733 * in the page, without the distortions applied by our ECC engine. 1734 * 1735 * 1736 * What we do for this specific read operation depends on two questions: 1737 * 1738 * 1) Are we doing a "raw" read, or an ECC-based read? 1739 * 1740 * 2) Are we using block mark swapping or transcription? 1741 * 1742 * There are four cases, illustrated by the following Karnaugh map: 1743 * 1744 * | Raw | ECC-based | 1745 * -------------+-------------------------+-------------------------+ 1746 * | Read the conventional | | 1747 * | OOB at the end of the | | 1748 * Swapping | page and return it. It | | 1749 * | contains exactly what | | 1750 * | we want. | Read the block mark and | 1751 * -------------+-------------------------+ return it in a buffer | 1752 * | Read the conventional | full of set bits. | 1753 * | OOB at the end of the | | 1754 * | page and also the block | | 1755 * Transcribing | mark in the metadata. | | 1756 * | Copy the block mark | | 1757 * | into the first byte of | | 1758 * | the OOB. | | 1759 * -------------+-------------------------+-------------------------+ 1760 * 1761 * Note that we break rule #4 in the Transcribing/Raw case because we're not 1762 * giving an accurate view of the actual, physical bytes in the page (we're 1763 * overwriting the block mark). That's OK because it's more important to follow 1764 * rule #2. 1765 * 1766 * It turns out that knowing whether we want an "ECC-based" or "raw" read is not 1767 * easy. When reading a page, for example, the NAND Flash MTD code calls our 1768 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an 1769 * ECC-based or raw view of the page is implicit in which function it calls 1770 * (there is a similar pair of ECC-based/raw functions for writing). 1771 */ 1772 static int gpmi_ecc_read_oob(struct nand_chip *chip, int page) 1773 { 1774 struct mtd_info *mtd = nand_to_mtd(chip); 1775 struct gpmi_nand_data *this = nand_get_controller_data(chip); 1776 int ret; 1777 1778 /* clear the OOB buffer */ 1779 memset(chip->oob_poi, ~0, mtd->oobsize); 1780 1781 /* Read out the conventional OOB. */ 1782 ret = nand_read_page_op(chip, page, mtd->writesize, chip->oob_poi, 1783 mtd->oobsize); 1784 if (ret) 1785 return ret; 1786 1787 /* 1788 * Now, we want to make sure the block mark is correct. In the 1789 * non-transcribing case (!GPMI_IS_MX23()), we already have it. 1790 * Otherwise, we need to explicitly read it. 1791 */ 1792 if (GPMI_IS_MX23(this)) { 1793 /* Read the block mark into the first byte of the OOB buffer. */ 1794 ret = nand_read_page_op(chip, page, 0, chip->oob_poi, 1); 1795 if (ret) 1796 return ret; 1797 } 1798 1799 return 0; 1800 } 1801 1802 static int gpmi_ecc_write_oob(struct nand_chip *chip, int page) 1803 { 1804 struct mtd_info *mtd = nand_to_mtd(chip); 1805 struct mtd_oob_region of = { }; 1806 1807 /* Do we have available oob area? */ 1808 mtd_ooblayout_free(mtd, 0, &of); 1809 if (!of.length) 1810 return -EPERM; 1811 1812 if (!nand_is_slc(chip)) 1813 return -EPERM; 1814 1815 return nand_prog_page_op(chip, page, mtd->writesize + of.offset, 1816 chip->oob_poi + of.offset, of.length); 1817 } 1818 1819 /* 1820 * This function reads a NAND page without involving the ECC engine (no HW 1821 * ECC correction). 1822 * The tricky part in the GPMI/BCH controller is that it stores ECC bits 1823 * inline (interleaved with payload DATA), and do not align data chunk on 1824 * byte boundaries. 1825 * We thus need to take care moving the payload data and ECC bits stored in the 1826 * page into the provided buffers, which is why we're using nand_extract_bits(). 1827 * 1828 * See set_geometry_by_ecc_info inline comments to have a full description 1829 * of the layout used by the GPMI controller. 1830 */ 1831 static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf, 1832 int oob_required, int page) 1833 { 1834 struct mtd_info *mtd = nand_to_mtd(chip); 1835 struct gpmi_nand_data *this = nand_get_controller_data(chip); 1836 struct bch_geometry *nfc_geo = &this->bch_geometry; 1837 int eccsize = nfc_geo->eccn_chunk_size; 1838 int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len; 1839 u8 *tmp_buf = this->raw_buffer; 1840 size_t src_bit_off; 1841 size_t oob_bit_off; 1842 size_t oob_byte_off; 1843 uint8_t *oob = chip->oob_poi; 1844 int step; 1845 int ret; 1846 1847 ret = nand_read_page_op(chip, page, 0, tmp_buf, 1848 mtd->writesize + mtd->oobsize); 1849 if (ret) 1850 return ret; 1851 1852 /* 1853 * If required, swap the bad block marker and the data stored in the 1854 * metadata section, so that we don't wrongly consider a block as bad. 1855 * 1856 * See the layout description for a detailed explanation on why this 1857 * is needed. 1858 */ 1859 if (this->swap_block_mark) 1860 swap(tmp_buf[0], tmp_buf[mtd->writesize]); 1861 1862 /* 1863 * Copy the metadata section into the oob buffer (this section is 1864 * guaranteed to be aligned on a byte boundary). 1865 */ 1866 if (oob_required) 1867 memcpy(oob, tmp_buf, nfc_geo->metadata_size); 1868 1869 oob_bit_off = nfc_geo->metadata_size * 8; 1870 src_bit_off = oob_bit_off; 1871 1872 /* Extract interleaved payload data and ECC bits */ 1873 for (step = 0; step < nfc_geo->ecc_chunk_count; step++) { 1874 if (buf) 1875 nand_extract_bits(buf, step * eccsize * 8, tmp_buf, 1876 src_bit_off, eccsize * 8); 1877 src_bit_off += eccsize * 8; 1878 1879 /* Align last ECC block to align a byte boundary */ 1880 if (step == nfc_geo->ecc_chunk_count - 1 && 1881 (oob_bit_off + eccbits) % 8) 1882 eccbits += 8 - ((oob_bit_off + eccbits) % 8); 1883 1884 if (oob_required) 1885 nand_extract_bits(oob, oob_bit_off, tmp_buf, 1886 src_bit_off, eccbits); 1887 1888 src_bit_off += eccbits; 1889 oob_bit_off += eccbits; 1890 } 1891 1892 if (oob_required) { 1893 oob_byte_off = oob_bit_off / 8; 1894 1895 if (oob_byte_off < mtd->oobsize) 1896 memcpy(oob + oob_byte_off, 1897 tmp_buf + mtd->writesize + oob_byte_off, 1898 mtd->oobsize - oob_byte_off); 1899 } 1900 1901 return 0; 1902 } 1903 1904 /* 1905 * This function writes a NAND page without involving the ECC engine (no HW 1906 * ECC generation). 1907 * The tricky part in the GPMI/BCH controller is that it stores ECC bits 1908 * inline (interleaved with payload DATA), and do not align data chunk on 1909 * byte boundaries. 1910 * We thus need to take care moving the OOB area at the right place in the 1911 * final page, which is why we're using nand_extract_bits(). 1912 * 1913 * See set_geometry_by_ecc_info inline comments to have a full description 1914 * of the layout used by the GPMI controller. 1915 */ 1916 static int gpmi_ecc_write_page_raw(struct nand_chip *chip, const uint8_t *buf, 1917 int oob_required, int page) 1918 { 1919 struct mtd_info *mtd = nand_to_mtd(chip); 1920 struct gpmi_nand_data *this = nand_get_controller_data(chip); 1921 struct bch_geometry *nfc_geo = &this->bch_geometry; 1922 int eccsize = nfc_geo->eccn_chunk_size; 1923 int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len; 1924 u8 *tmp_buf = this->raw_buffer; 1925 uint8_t *oob = chip->oob_poi; 1926 size_t dst_bit_off; 1927 size_t oob_bit_off; 1928 size_t oob_byte_off; 1929 int step; 1930 1931 /* 1932 * Initialize all bits to 1 in case we don't have a buffer for the 1933 * payload or oob data in order to leave unspecified bits of data 1934 * to their initial state. 1935 */ 1936 if (!buf || !oob_required) 1937 memset(tmp_buf, 0xff, mtd->writesize + mtd->oobsize); 1938 1939 /* 1940 * First copy the metadata section (stored in oob buffer) at the 1941 * beginning of the page, as imposed by the GPMI layout. 1942 */ 1943 memcpy(tmp_buf, oob, nfc_geo->metadata_size); 1944 oob_bit_off = nfc_geo->metadata_size * 8; 1945 dst_bit_off = oob_bit_off; 1946 1947 /* Interleave payload data and ECC bits */ 1948 for (step = 0; step < nfc_geo->ecc_chunk_count; step++) { 1949 if (buf) 1950 nand_extract_bits(tmp_buf, dst_bit_off, buf, 1951 step * eccsize * 8, eccsize * 8); 1952 dst_bit_off += eccsize * 8; 1953 1954 /* Align last ECC block to align a byte boundary */ 1955 if (step == nfc_geo->ecc_chunk_count - 1 && 1956 (oob_bit_off + eccbits) % 8) 1957 eccbits += 8 - ((oob_bit_off + eccbits) % 8); 1958 1959 if (oob_required) 1960 nand_extract_bits(tmp_buf, dst_bit_off, oob, 1961 oob_bit_off, eccbits); 1962 1963 dst_bit_off += eccbits; 1964 oob_bit_off += eccbits; 1965 } 1966 1967 oob_byte_off = oob_bit_off / 8; 1968 1969 if (oob_required && oob_byte_off < mtd->oobsize) 1970 memcpy(tmp_buf + mtd->writesize + oob_byte_off, 1971 oob + oob_byte_off, mtd->oobsize - oob_byte_off); 1972 1973 /* 1974 * If required, swap the bad block marker and the first byte of the 1975 * metadata section, so that we don't modify the bad block marker. 1976 * 1977 * See the layout description for a detailed explanation on why this 1978 * is needed. 1979 */ 1980 if (this->swap_block_mark) 1981 swap(tmp_buf[0], tmp_buf[mtd->writesize]); 1982 1983 return nand_prog_page_op(chip, page, 0, tmp_buf, 1984 mtd->writesize + mtd->oobsize); 1985 } 1986 1987 static int gpmi_ecc_read_oob_raw(struct nand_chip *chip, int page) 1988 { 1989 return gpmi_ecc_read_page_raw(chip, NULL, 1, page); 1990 } 1991 1992 static int gpmi_ecc_write_oob_raw(struct nand_chip *chip, int page) 1993 { 1994 return gpmi_ecc_write_page_raw(chip, NULL, 1, page); 1995 } 1996 1997 static int gpmi_block_markbad(struct nand_chip *chip, loff_t ofs) 1998 { 1999 struct mtd_info *mtd = nand_to_mtd(chip); 2000 struct gpmi_nand_data *this = nand_get_controller_data(chip); 2001 int ret = 0; 2002 uint8_t *block_mark; 2003 int column, page, chipnr; 2004 2005 chipnr = (int)(ofs >> chip->chip_shift); 2006 nand_select_target(chip, chipnr); 2007 2008 column = !GPMI_IS_MX23(this) ? mtd->writesize : 0; 2009 2010 /* Write the block mark. */ 2011 block_mark = this->data_buffer_dma; 2012 block_mark[0] = 0; /* bad block marker */ 2013 2014 /* Shift to get page */ 2015 page = (int)(ofs >> chip->page_shift); 2016 2017 ret = nand_prog_page_op(chip, page, column, block_mark, 1); 2018 2019 nand_deselect_target(chip); 2020 2021 return ret; 2022 } 2023 2024 static int nand_boot_set_geometry(struct gpmi_nand_data *this) 2025 { 2026 struct boot_rom_geometry *geometry = &this->rom_geometry; 2027 2028 /* 2029 * Set the boot block stride size. 2030 * 2031 * In principle, we should be reading this from the OTP bits, since 2032 * that's where the ROM is going to get it. In fact, we don't have any 2033 * way to read the OTP bits, so we go with the default and hope for the 2034 * best. 2035 */ 2036 geometry->stride_size_in_pages = 64; 2037 2038 /* 2039 * Set the search area stride exponent. 2040 * 2041 * In principle, we should be reading this from the OTP bits, since 2042 * that's where the ROM is going to get it. In fact, we don't have any 2043 * way to read the OTP bits, so we go with the default and hope for the 2044 * best. 2045 */ 2046 geometry->search_area_stride_exponent = 2; 2047 return 0; 2048 } 2049 2050 static const char *fingerprint = "STMP"; 2051 static int mx23_check_transcription_stamp(struct gpmi_nand_data *this) 2052 { 2053 struct boot_rom_geometry *rom_geo = &this->rom_geometry; 2054 struct device *dev = this->dev; 2055 struct nand_chip *chip = &this->nand; 2056 unsigned int search_area_size_in_strides; 2057 unsigned int stride; 2058 unsigned int page; 2059 u8 *buffer = nand_get_data_buf(chip); 2060 int found_an_ncb_fingerprint = false; 2061 int ret; 2062 2063 /* Compute the number of strides in a search area. */ 2064 search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent; 2065 2066 nand_select_target(chip, 0); 2067 2068 /* 2069 * Loop through the first search area, looking for the NCB fingerprint. 2070 */ 2071 dev_dbg(dev, "Scanning for an NCB fingerprint...\n"); 2072 2073 for (stride = 0; stride < search_area_size_in_strides; stride++) { 2074 /* Compute the page addresses. */ 2075 page = stride * rom_geo->stride_size_in_pages; 2076 2077 dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page); 2078 2079 /* 2080 * Read the NCB fingerprint. The fingerprint is four bytes long 2081 * and starts in the 12th byte of the page. 2082 */ 2083 ret = nand_read_page_op(chip, page, 12, buffer, 2084 strlen(fingerprint)); 2085 if (ret) 2086 continue; 2087 2088 /* Look for the fingerprint. */ 2089 if (!memcmp(buffer, fingerprint, strlen(fingerprint))) { 2090 found_an_ncb_fingerprint = true; 2091 break; 2092 } 2093 2094 } 2095 2096 nand_deselect_target(chip); 2097 2098 if (found_an_ncb_fingerprint) 2099 dev_dbg(dev, "\tFound a fingerprint\n"); 2100 else 2101 dev_dbg(dev, "\tNo fingerprint found\n"); 2102 return found_an_ncb_fingerprint; 2103 } 2104 2105 /* Writes a transcription stamp. */ 2106 static int mx23_write_transcription_stamp(struct gpmi_nand_data *this) 2107 { 2108 struct device *dev = this->dev; 2109 struct boot_rom_geometry *rom_geo = &this->rom_geometry; 2110 struct nand_chip *chip = &this->nand; 2111 struct mtd_info *mtd = nand_to_mtd(chip); 2112 unsigned int block_size_in_pages; 2113 unsigned int search_area_size_in_strides; 2114 unsigned int search_area_size_in_pages; 2115 unsigned int search_area_size_in_blocks; 2116 unsigned int block; 2117 unsigned int stride; 2118 unsigned int page; 2119 u8 *buffer = nand_get_data_buf(chip); 2120 int status; 2121 2122 /* Compute the search area geometry. */ 2123 block_size_in_pages = mtd->erasesize / mtd->writesize; 2124 search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent; 2125 search_area_size_in_pages = search_area_size_in_strides * 2126 rom_geo->stride_size_in_pages; 2127 search_area_size_in_blocks = 2128 (search_area_size_in_pages + (block_size_in_pages - 1)) / 2129 block_size_in_pages; 2130 2131 dev_dbg(dev, "Search Area Geometry :\n"); 2132 dev_dbg(dev, "\tin Blocks : %u\n", search_area_size_in_blocks); 2133 dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides); 2134 dev_dbg(dev, "\tin Pages : %u\n", search_area_size_in_pages); 2135 2136 nand_select_target(chip, 0); 2137 2138 /* Loop over blocks in the first search area, erasing them. */ 2139 dev_dbg(dev, "Erasing the search area...\n"); 2140 2141 for (block = 0; block < search_area_size_in_blocks; block++) { 2142 /* Erase this block. */ 2143 dev_dbg(dev, "\tErasing block 0x%x\n", block); 2144 status = nand_erase_op(chip, block); 2145 if (status) 2146 dev_err(dev, "[%s] Erase failed.\n", __func__); 2147 } 2148 2149 /* Write the NCB fingerprint into the page buffer. */ 2150 memset(buffer, ~0, mtd->writesize); 2151 memcpy(buffer + 12, fingerprint, strlen(fingerprint)); 2152 2153 /* Loop through the first search area, writing NCB fingerprints. */ 2154 dev_dbg(dev, "Writing NCB fingerprints...\n"); 2155 for (stride = 0; stride < search_area_size_in_strides; stride++) { 2156 /* Compute the page addresses. */ 2157 page = stride * rom_geo->stride_size_in_pages; 2158 2159 /* Write the first page of the current stride. */ 2160 dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page); 2161 2162 status = chip->ecc.write_page_raw(chip, buffer, 0, page); 2163 if (status) 2164 dev_err(dev, "[%s] Write failed.\n", __func__); 2165 } 2166 2167 nand_deselect_target(chip); 2168 2169 return 0; 2170 } 2171 2172 static int mx23_boot_init(struct gpmi_nand_data *this) 2173 { 2174 struct device *dev = this->dev; 2175 struct nand_chip *chip = &this->nand; 2176 struct mtd_info *mtd = nand_to_mtd(chip); 2177 unsigned int block_count; 2178 unsigned int block; 2179 int chipnr; 2180 int page; 2181 loff_t byte; 2182 uint8_t block_mark; 2183 int ret = 0; 2184 2185 /* 2186 * If control arrives here, we can't use block mark swapping, which 2187 * means we're forced to use transcription. First, scan for the 2188 * transcription stamp. If we find it, then we don't have to do 2189 * anything -- the block marks are already transcribed. 2190 */ 2191 if (mx23_check_transcription_stamp(this)) 2192 return 0; 2193 2194 /* 2195 * If control arrives here, we couldn't find a transcription stamp, so 2196 * so we presume the block marks are in the conventional location. 2197 */ 2198 dev_dbg(dev, "Transcribing bad block marks...\n"); 2199 2200 /* Compute the number of blocks in the entire medium. */ 2201 block_count = nanddev_eraseblocks_per_target(&chip->base); 2202 2203 /* 2204 * Loop over all the blocks in the medium, transcribing block marks as 2205 * we go. 2206 */ 2207 for (block = 0; block < block_count; block++) { 2208 /* 2209 * Compute the chip, page and byte addresses for this block's 2210 * conventional mark. 2211 */ 2212 chipnr = block >> (chip->chip_shift - chip->phys_erase_shift); 2213 page = block << (chip->phys_erase_shift - chip->page_shift); 2214 byte = block << chip->phys_erase_shift; 2215 2216 /* Send the command to read the conventional block mark. */ 2217 nand_select_target(chip, chipnr); 2218 ret = nand_read_page_op(chip, page, mtd->writesize, &block_mark, 2219 1); 2220 nand_deselect_target(chip); 2221 2222 if (ret) 2223 continue; 2224 2225 /* 2226 * Check if the block is marked bad. If so, we need to mark it 2227 * again, but this time the result will be a mark in the 2228 * location where we transcribe block marks. 2229 */ 2230 if (block_mark != 0xff) { 2231 dev_dbg(dev, "Transcribing mark in block %u\n", block); 2232 ret = chip->legacy.block_markbad(chip, byte); 2233 if (ret) 2234 dev_err(dev, 2235 "Failed to mark block bad with ret %d\n", 2236 ret); 2237 } 2238 } 2239 2240 /* Write the stamp that indicates we've transcribed the block marks. */ 2241 mx23_write_transcription_stamp(this); 2242 return 0; 2243 } 2244 2245 static int nand_boot_init(struct gpmi_nand_data *this) 2246 { 2247 nand_boot_set_geometry(this); 2248 2249 /* This is ROM arch-specific initilization before the BBT scanning. */ 2250 if (GPMI_IS_MX23(this)) 2251 return mx23_boot_init(this); 2252 return 0; 2253 } 2254 2255 static int gpmi_set_geometry(struct gpmi_nand_data *this) 2256 { 2257 int ret; 2258 2259 /* Free the temporary DMA memory for reading ID. */ 2260 gpmi_free_dma_buffer(this); 2261 2262 /* Set up the NFC geometry which is used by BCH. */ 2263 ret = bch_set_geometry(this); 2264 if (ret) { 2265 dev_err(this->dev, "Error setting BCH geometry : %d\n", ret); 2266 return ret; 2267 } 2268 2269 /* Alloc the new DMA buffers according to the pagesize and oobsize */ 2270 return gpmi_alloc_dma_buffer(this); 2271 } 2272 2273 static int gpmi_init_last(struct gpmi_nand_data *this) 2274 { 2275 struct nand_chip *chip = &this->nand; 2276 struct mtd_info *mtd = nand_to_mtd(chip); 2277 struct nand_ecc_ctrl *ecc = &chip->ecc; 2278 struct bch_geometry *bch_geo = &this->bch_geometry; 2279 int ret; 2280 2281 /* Set up the medium geometry */ 2282 ret = gpmi_set_geometry(this); 2283 if (ret) 2284 return ret; 2285 2286 /* Init the nand_ecc_ctrl{} */ 2287 ecc->read_page = gpmi_ecc_read_page; 2288 ecc->write_page = gpmi_ecc_write_page; 2289 ecc->read_oob = gpmi_ecc_read_oob; 2290 ecc->write_oob = gpmi_ecc_write_oob; 2291 ecc->read_page_raw = gpmi_ecc_read_page_raw; 2292 ecc->write_page_raw = gpmi_ecc_write_page_raw; 2293 ecc->read_oob_raw = gpmi_ecc_read_oob_raw; 2294 ecc->write_oob_raw = gpmi_ecc_write_oob_raw; 2295 ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; 2296 ecc->size = bch_geo->eccn_chunk_size; 2297 ecc->strength = bch_geo->ecc_strength; 2298 mtd_set_ooblayout(mtd, &gpmi_ooblayout_ops); 2299 2300 /* 2301 * We only enable the subpage read when: 2302 * (1) the chip is imx6, and 2303 * (2) the size of the ECC parity is byte aligned. 2304 */ 2305 if (GPMI_IS_MX6(this) && 2306 ((bch_geo->gf_len * bch_geo->ecc_strength) % 8) == 0) { 2307 ecc->read_subpage = gpmi_ecc_read_subpage; 2308 chip->options |= NAND_SUBPAGE_READ; 2309 } 2310 2311 return 0; 2312 } 2313 2314 static int gpmi_nand_attach_chip(struct nand_chip *chip) 2315 { 2316 struct gpmi_nand_data *this = nand_get_controller_data(chip); 2317 int ret; 2318 2319 if (chip->bbt_options & NAND_BBT_USE_FLASH) { 2320 chip->bbt_options |= NAND_BBT_NO_OOB; 2321 2322 if (of_property_read_bool(this->dev->of_node, 2323 "fsl,no-blockmark-swap")) 2324 this->swap_block_mark = false; 2325 } 2326 dev_dbg(this->dev, "Blockmark swapping %s\n", 2327 str_enabled_disabled(this->swap_block_mark)); 2328 2329 ret = gpmi_init_last(this); 2330 if (ret) 2331 return ret; 2332 2333 chip->options |= NAND_SKIP_BBTSCAN; 2334 2335 return 0; 2336 } 2337 2338 static struct gpmi_transfer *get_next_transfer(struct gpmi_nand_data *this) 2339 { 2340 struct gpmi_transfer *transfer = &this->transfers[this->ntransfers]; 2341 2342 this->ntransfers++; 2343 2344 if (this->ntransfers == GPMI_MAX_TRANSFERS) 2345 return NULL; 2346 2347 return transfer; 2348 } 2349 2350 static struct dma_async_tx_descriptor *gpmi_chain_command( 2351 struct gpmi_nand_data *this, u8 cmd, const u8 *addr, int naddr) 2352 { 2353 struct dma_chan *channel = get_dma_chan(this); 2354 struct dma_async_tx_descriptor *desc; 2355 struct gpmi_transfer *transfer; 2356 int chip = this->nand.cur_cs; 2357 u32 pio[3]; 2358 2359 /* [1] send out the PIO words */ 2360 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE) 2361 | BM_GPMI_CTRL0_WORD_LENGTH 2362 | BF_GPMI_CTRL0_CS(chip, this) 2363 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) 2364 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE) 2365 | BM_GPMI_CTRL0_ADDRESS_INCREMENT 2366 | BF_GPMI_CTRL0_XFER_COUNT(naddr + 1); 2367 pio[1] = 0; 2368 pio[2] = 0; 2369 desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio), 2370 DMA_TRANS_NONE, 0); 2371 if (!desc) 2372 return NULL; 2373 2374 transfer = get_next_transfer(this); 2375 if (!transfer) 2376 return NULL; 2377 2378 transfer->cmdbuf[0] = cmd; 2379 if (naddr) 2380 memcpy(&transfer->cmdbuf[1], addr, naddr); 2381 2382 sg_init_one(&transfer->sgl, transfer->cmdbuf, naddr + 1); 2383 dma_map_sg(this->dev, &transfer->sgl, 1, DMA_TO_DEVICE); 2384 2385 transfer->direction = DMA_TO_DEVICE; 2386 2387 desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1, DMA_MEM_TO_DEV, 2388 MXS_DMA_CTRL_WAIT4END); 2389 return desc; 2390 } 2391 2392 static struct dma_async_tx_descriptor *gpmi_chain_wait_ready( 2393 struct gpmi_nand_data *this) 2394 { 2395 struct dma_chan *channel = get_dma_chan(this); 2396 u32 pio[2]; 2397 2398 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY) 2399 | BM_GPMI_CTRL0_WORD_LENGTH 2400 | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this) 2401 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) 2402 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA) 2403 | BF_GPMI_CTRL0_XFER_COUNT(0); 2404 pio[1] = 0; 2405 2406 return mxs_dmaengine_prep_pio(channel, pio, 2, DMA_TRANS_NONE, 2407 MXS_DMA_CTRL_WAIT4END | MXS_DMA_CTRL_WAIT4RDY); 2408 } 2409 2410 static struct dma_async_tx_descriptor *gpmi_chain_data_read( 2411 struct gpmi_nand_data *this, void *buf, int raw_len, bool *direct) 2412 { 2413 struct dma_async_tx_descriptor *desc; 2414 struct dma_chan *channel = get_dma_chan(this); 2415 struct gpmi_transfer *transfer; 2416 u32 pio[6] = {}; 2417 2418 transfer = get_next_transfer(this); 2419 if (!transfer) 2420 return NULL; 2421 2422 transfer->direction = DMA_FROM_DEVICE; 2423 2424 *direct = prepare_data_dma(this, buf, raw_len, &transfer->sgl, 2425 DMA_FROM_DEVICE); 2426 2427 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ) 2428 | BM_GPMI_CTRL0_WORD_LENGTH 2429 | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this) 2430 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) 2431 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA) 2432 | BF_GPMI_CTRL0_XFER_COUNT(raw_len); 2433 2434 if (this->bch) { 2435 pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC 2436 | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE) 2437 | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE 2438 | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY); 2439 pio[3] = raw_len; 2440 pio[4] = transfer->sgl.dma_address; 2441 pio[5] = this->auxiliary_phys; 2442 } 2443 2444 desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio), 2445 DMA_TRANS_NONE, 0); 2446 if (!desc) 2447 return NULL; 2448 2449 if (!this->bch) 2450 desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1, 2451 DMA_DEV_TO_MEM, 2452 MXS_DMA_CTRL_WAIT4END); 2453 2454 return desc; 2455 } 2456 2457 static struct dma_async_tx_descriptor *gpmi_chain_data_write( 2458 struct gpmi_nand_data *this, const void *buf, int raw_len) 2459 { 2460 struct dma_chan *channel = get_dma_chan(this); 2461 struct dma_async_tx_descriptor *desc; 2462 struct gpmi_transfer *transfer; 2463 u32 pio[6] = {}; 2464 2465 transfer = get_next_transfer(this); 2466 if (!transfer) 2467 return NULL; 2468 2469 transfer->direction = DMA_TO_DEVICE; 2470 2471 prepare_data_dma(this, buf, raw_len, &transfer->sgl, DMA_TO_DEVICE); 2472 2473 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE) 2474 | BM_GPMI_CTRL0_WORD_LENGTH 2475 | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this) 2476 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) 2477 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA) 2478 | BF_GPMI_CTRL0_XFER_COUNT(raw_len); 2479 2480 if (this->bch) { 2481 pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC 2482 | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE) 2483 | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE | 2484 BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY); 2485 pio[3] = raw_len; 2486 pio[4] = transfer->sgl.dma_address; 2487 pio[5] = this->auxiliary_phys; 2488 } 2489 2490 desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio), 2491 DMA_TRANS_NONE, 2492 (this->bch ? MXS_DMA_CTRL_WAIT4END : 0)); 2493 if (!desc) 2494 return NULL; 2495 2496 if (!this->bch) 2497 desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1, 2498 DMA_MEM_TO_DEV, 2499 MXS_DMA_CTRL_WAIT4END); 2500 2501 return desc; 2502 } 2503 2504 static int gpmi_nfc_exec_op(struct nand_chip *chip, 2505 const struct nand_operation *op, 2506 bool check_only) 2507 { 2508 const struct nand_op_instr *instr; 2509 struct gpmi_nand_data *this = nand_get_controller_data(chip); 2510 struct dma_async_tx_descriptor *desc = NULL; 2511 int i, ret, buf_len = 0, nbufs = 0; 2512 u8 cmd = 0; 2513 void *buf_read = NULL; 2514 const void *buf_write = NULL; 2515 bool direct = false; 2516 struct completion *dma_completion, *bch_completion; 2517 unsigned long to; 2518 2519 if (check_only) 2520 return 0; 2521 2522 this->ntransfers = 0; 2523 for (i = 0; i < GPMI_MAX_TRANSFERS; i++) 2524 this->transfers[i].direction = DMA_NONE; 2525 2526 ret = pm_runtime_resume_and_get(this->dev); 2527 if (ret < 0) 2528 return ret; 2529 2530 /* 2531 * This driver currently supports only one NAND chip. Plus, dies share 2532 * the same configuration. So once timings have been applied on the 2533 * controller side, they will not change anymore. When the time will 2534 * come, the check on must_apply_timings will have to be dropped. 2535 */ 2536 if (this->hw.must_apply_timings) { 2537 this->hw.must_apply_timings = false; 2538 ret = gpmi_nfc_apply_timings(this); 2539 if (ret) 2540 goto out_pm; 2541 } 2542 2543 dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs); 2544 2545 for (i = 0; i < op->ninstrs; i++) { 2546 instr = &op->instrs[i]; 2547 2548 nand_op_trace(" ", instr); 2549 2550 switch (instr->type) { 2551 case NAND_OP_WAITRDY_INSTR: 2552 desc = gpmi_chain_wait_ready(this); 2553 break; 2554 case NAND_OP_CMD_INSTR: 2555 cmd = instr->ctx.cmd.opcode; 2556 2557 /* 2558 * When this command has an address cycle chain it 2559 * together with the address cycle 2560 */ 2561 if (i + 1 != op->ninstrs && 2562 op->instrs[i + 1].type == NAND_OP_ADDR_INSTR) 2563 continue; 2564 2565 desc = gpmi_chain_command(this, cmd, NULL, 0); 2566 2567 break; 2568 case NAND_OP_ADDR_INSTR: 2569 desc = gpmi_chain_command(this, cmd, instr->ctx.addr.addrs, 2570 instr->ctx.addr.naddrs); 2571 break; 2572 case NAND_OP_DATA_OUT_INSTR: 2573 buf_write = instr->ctx.data.buf.out; 2574 buf_len = instr->ctx.data.len; 2575 nbufs++; 2576 2577 desc = gpmi_chain_data_write(this, buf_write, buf_len); 2578 2579 break; 2580 case NAND_OP_DATA_IN_INSTR: 2581 if (!instr->ctx.data.len) 2582 break; 2583 buf_read = instr->ctx.data.buf.in; 2584 buf_len = instr->ctx.data.len; 2585 nbufs++; 2586 2587 desc = gpmi_chain_data_read(this, buf_read, buf_len, 2588 &direct); 2589 break; 2590 } 2591 2592 if (!desc) { 2593 ret = -ENXIO; 2594 goto unmap; 2595 } 2596 } 2597 2598 dev_dbg(this->dev, "%s setup done\n", __func__); 2599 2600 if (nbufs > 1) { 2601 dev_err(this->dev, "Multiple data instructions not supported\n"); 2602 ret = -EINVAL; 2603 goto unmap; 2604 } 2605 2606 if (this->bch) { 2607 writel(this->bch_flashlayout0, 2608 this->resources.bch_regs + HW_BCH_FLASH0LAYOUT0); 2609 writel(this->bch_flashlayout1, 2610 this->resources.bch_regs + HW_BCH_FLASH0LAYOUT1); 2611 } 2612 2613 desc->callback = dma_irq_callback; 2614 desc->callback_param = this; 2615 dma_completion = &this->dma_done; 2616 bch_completion = NULL; 2617 2618 init_completion(dma_completion); 2619 2620 if (this->bch && buf_read) { 2621 writel(BM_BCH_CTRL_COMPLETE_IRQ_EN, 2622 this->resources.bch_regs + HW_BCH_CTRL_SET); 2623 bch_completion = &this->bch_done; 2624 init_completion(bch_completion); 2625 } 2626 2627 dmaengine_submit(desc); 2628 dma_async_issue_pending(get_dma_chan(this)); 2629 2630 to = wait_for_completion_timeout(dma_completion, msecs_to_jiffies(1000)); 2631 if (!to) { 2632 dev_err(this->dev, "DMA timeout, last DMA\n"); 2633 gpmi_dump_info(this); 2634 ret = -ETIMEDOUT; 2635 goto unmap; 2636 } 2637 2638 if (this->bch && buf_read) { 2639 to = wait_for_completion_timeout(bch_completion, msecs_to_jiffies(1000)); 2640 if (!to) { 2641 dev_err(this->dev, "BCH timeout, last DMA\n"); 2642 gpmi_dump_info(this); 2643 ret = -ETIMEDOUT; 2644 goto unmap; 2645 } 2646 } 2647 2648 writel(BM_BCH_CTRL_COMPLETE_IRQ_EN, 2649 this->resources.bch_regs + HW_BCH_CTRL_CLR); 2650 gpmi_clear_bch(this); 2651 2652 ret = 0; 2653 2654 unmap: 2655 for (i = 0; i < this->ntransfers; i++) { 2656 struct gpmi_transfer *transfer = &this->transfers[i]; 2657 2658 if (transfer->direction != DMA_NONE) 2659 dma_unmap_sg(this->dev, &transfer->sgl, 1, 2660 transfer->direction); 2661 } 2662 2663 if (!ret && buf_read && !direct) 2664 memcpy(buf_read, this->data_buffer_dma, 2665 gpmi_raw_len_to_len(this, buf_len)); 2666 2667 this->bch = false; 2668 2669 out_pm: 2670 pm_runtime_mark_last_busy(this->dev); 2671 pm_runtime_put_autosuspend(this->dev); 2672 2673 return ret; 2674 } 2675 2676 static const struct nand_controller_ops gpmi_nand_controller_ops = { 2677 .attach_chip = gpmi_nand_attach_chip, 2678 .setup_interface = gpmi_setup_interface, 2679 .exec_op = gpmi_nfc_exec_op, 2680 }; 2681 2682 static int gpmi_nand_init(struct gpmi_nand_data *this) 2683 { 2684 struct nand_chip *chip = &this->nand; 2685 struct mtd_info *mtd = nand_to_mtd(chip); 2686 int ret; 2687 2688 /* init the MTD data structures */ 2689 mtd->name = "gpmi-nand"; 2690 mtd->dev.parent = this->dev; 2691 2692 /* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */ 2693 nand_set_controller_data(chip, this); 2694 nand_set_flash_node(chip, this->pdev->dev.of_node); 2695 chip->legacy.block_markbad = gpmi_block_markbad; 2696 chip->badblock_pattern = &gpmi_bbt_descr; 2697 chip->options |= NAND_NO_SUBPAGE_WRITE; 2698 2699 /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */ 2700 this->swap_block_mark = !GPMI_IS_MX23(this); 2701 2702 /* 2703 * Allocate a temporary DMA buffer for reading ID in the 2704 * nand_scan_ident(). 2705 */ 2706 this->bch_geometry.payload_size = 1024; 2707 this->bch_geometry.auxiliary_size = 128; 2708 ret = gpmi_alloc_dma_buffer(this); 2709 if (ret) 2710 return ret; 2711 2712 nand_controller_init(&this->base); 2713 this->base.ops = &gpmi_nand_controller_ops; 2714 chip->controller = &this->base; 2715 2716 ret = nand_scan(chip, GPMI_IS_MX6(this) ? 2 : 1); 2717 if (ret) 2718 goto err_out; 2719 2720 ret = nand_boot_init(this); 2721 if (ret) 2722 goto err_nand_cleanup; 2723 ret = nand_create_bbt(chip); 2724 if (ret) 2725 goto err_nand_cleanup; 2726 2727 ret = mtd_device_register(mtd, NULL, 0); 2728 if (ret) 2729 goto err_nand_cleanup; 2730 return 0; 2731 2732 err_nand_cleanup: 2733 nand_cleanup(chip); 2734 err_out: 2735 gpmi_free_dma_buffer(this); 2736 return ret; 2737 } 2738 2739 static const struct of_device_id gpmi_nand_id_table[] = { 2740 { .compatible = "fsl,imx23-gpmi-nand", .data = &gpmi_devdata_imx23, }, 2741 { .compatible = "fsl,imx28-gpmi-nand", .data = &gpmi_devdata_imx28, }, 2742 { .compatible = "fsl,imx6q-gpmi-nand", .data = &gpmi_devdata_imx6q, }, 2743 { .compatible = "fsl,imx6sx-gpmi-nand", .data = &gpmi_devdata_imx6sx, }, 2744 { .compatible = "fsl,imx7d-gpmi-nand", .data = &gpmi_devdata_imx7d,}, 2745 { .compatible = "fsl,imx8qxp-gpmi-nand", .data = &gpmi_devdata_imx8qxp, }, 2746 {} 2747 }; 2748 MODULE_DEVICE_TABLE(of, gpmi_nand_id_table); 2749 2750 static int gpmi_nand_probe(struct platform_device *pdev) 2751 { 2752 struct gpmi_nand_data *this; 2753 int ret; 2754 2755 this = devm_kzalloc(&pdev->dev, sizeof(*this), GFP_KERNEL); 2756 if (!this) 2757 return -ENOMEM; 2758 2759 this->devdata = of_device_get_match_data(&pdev->dev); 2760 platform_set_drvdata(pdev, this); 2761 this->pdev = pdev; 2762 this->dev = &pdev->dev; 2763 2764 ret = acquire_resources(this); 2765 if (ret) 2766 goto exit_acquire_resources; 2767 2768 pm_runtime_enable(&pdev->dev); 2769 pm_runtime_set_autosuspend_delay(&pdev->dev, 500); 2770 pm_runtime_use_autosuspend(&pdev->dev); 2771 #ifndef CONFIG_PM 2772 ret = gpmi_enable_clk(this); 2773 if (ret) 2774 goto exit_acquire_resources; 2775 #endif 2776 2777 ret = gpmi_init(this); 2778 if (ret) 2779 goto exit_nfc_init; 2780 2781 ret = gpmi_nand_init(this); 2782 if (ret) 2783 goto exit_nfc_init; 2784 2785 dev_info(this->dev, "driver registered.\n"); 2786 2787 return 0; 2788 2789 exit_nfc_init: 2790 pm_runtime_dont_use_autosuspend(&pdev->dev); 2791 pm_runtime_disable(&pdev->dev); 2792 release_resources(this); 2793 exit_acquire_resources: 2794 2795 return ret; 2796 } 2797 2798 static void gpmi_nand_remove(struct platform_device *pdev) 2799 { 2800 struct gpmi_nand_data *this = platform_get_drvdata(pdev); 2801 struct nand_chip *chip = &this->nand; 2802 int ret; 2803 2804 ret = mtd_device_unregister(nand_to_mtd(chip)); 2805 WARN_ON(ret); 2806 nand_cleanup(chip); 2807 gpmi_free_dma_buffer(this); 2808 release_resources(this); 2809 pm_runtime_dont_use_autosuspend(&pdev->dev); 2810 pm_runtime_disable(&pdev->dev); 2811 #ifndef CONFIG_PM 2812 gpmi_disable_clk(this); 2813 #endif 2814 } 2815 2816 static int gpmi_pm_suspend(struct device *dev) 2817 { 2818 int ret; 2819 2820 pinctrl_pm_select_sleep_state(dev); 2821 ret = pm_runtime_force_suspend(dev); 2822 2823 return ret; 2824 } 2825 2826 static int gpmi_pm_resume(struct device *dev) 2827 { 2828 struct gpmi_nand_data *this = dev_get_drvdata(dev); 2829 int ret; 2830 2831 ret = pm_runtime_force_resume(dev); 2832 if (ret) { 2833 dev_err(this->dev, "Error in resume %d\n", ret); 2834 return ret; 2835 } 2836 2837 pinctrl_pm_select_default_state(dev); 2838 2839 /* re-init the GPMI registers */ 2840 ret = gpmi_init(this); 2841 if (ret) { 2842 dev_err(this->dev, "Error setting GPMI : %d\n", ret); 2843 return ret; 2844 } 2845 2846 /* Set flag to get timing setup restored for next exec_op */ 2847 if (this->hw.clk_rate) 2848 this->hw.must_apply_timings = true; 2849 2850 /* re-init the BCH registers */ 2851 ret = bch_set_geometry(this); 2852 if (ret) { 2853 dev_err(this->dev, "Error setting BCH : %d\n", ret); 2854 return ret; 2855 } 2856 2857 return 0; 2858 } 2859 2860 static int gpmi_runtime_suspend(struct device *dev) 2861 { 2862 struct gpmi_nand_data *this = dev_get_drvdata(dev); 2863 2864 gpmi_disable_clk(this); 2865 2866 return 0; 2867 } 2868 2869 static int gpmi_runtime_resume(struct device *dev) 2870 { 2871 struct gpmi_nand_data *this = dev_get_drvdata(dev); 2872 int ret; 2873 2874 ret = gpmi_enable_clk(this); 2875 if (ret) 2876 return ret; 2877 2878 return 0; 2879 2880 } 2881 2882 static const struct dev_pm_ops gpmi_pm_ops = { 2883 SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume) 2884 RUNTIME_PM_OPS(gpmi_runtime_suspend, gpmi_runtime_resume, NULL) 2885 }; 2886 2887 static struct platform_driver gpmi_nand_driver = { 2888 .driver = { 2889 .name = "gpmi-nand", 2890 .pm = pm_ptr(&gpmi_pm_ops), 2891 .of_match_table = gpmi_nand_id_table, 2892 }, 2893 .probe = gpmi_nand_probe, 2894 .remove = gpmi_nand_remove, 2895 }; 2896 module_platform_driver(gpmi_nand_driver); 2897 2898 MODULE_AUTHOR("Freescale Semiconductor, Inc."); 2899 MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver"); 2900 MODULE_LICENSE("GPL"); 2901