1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Freescale Integrated Flash Controller NAND driver 4 * 5 * Copyright 2011-2012 Freescale Semiconductor, Inc 6 * 7 * Author: Dipen Dudhat <Dipen.Dudhat@freescale.com> 8 */ 9 10 #include <linux/module.h> 11 #include <linux/types.h> 12 #include <linux/kernel.h> 13 #include <linux/of_address.h> 14 #include <linux/slab.h> 15 #include <linux/mtd/mtd.h> 16 #include <linux/mtd/rawnand.h> 17 #include <linux/mtd/partitions.h> 18 #include <linux/fsl_ifc.h> 19 #include <linux/iopoll.h> 20 21 #define ERR_BYTE 0xFF /* Value returned for read 22 bytes when read failed */ 23 #define IFC_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait 24 for IFC NAND Machine */ 25 26 struct fsl_ifc_ctrl; 27 28 /* mtd information per set */ 29 struct fsl_ifc_mtd { 30 struct nand_chip chip; 31 struct fsl_ifc_ctrl *ctrl; 32 33 struct device *dev; 34 int bank; /* Chip select bank number */ 35 unsigned int bufnum_mask; /* bufnum = page & bufnum_mask */ 36 u8 __iomem *vbase; /* Chip select base virtual address */ 37 }; 38 39 /* overview of the fsl ifc controller */ 40 struct fsl_ifc_nand_ctrl { 41 struct nand_controller controller; 42 struct fsl_ifc_mtd *chips[FSL_IFC_BANK_COUNT]; 43 44 void __iomem *addr; /* Address of assigned IFC buffer */ 45 unsigned int page; /* Last page written to / read from */ 46 unsigned int read_bytes;/* Number of bytes read during command */ 47 unsigned int column; /* Saved column from SEQIN */ 48 unsigned int index; /* Pointer to next byte to 'read' */ 49 unsigned int oob; /* Non zero if operating on OOB data */ 50 unsigned int eccread; /* Non zero for a full-page ECC read */ 51 unsigned int counter; /* counter for the initializations */ 52 unsigned int max_bitflips; /* Saved during READ0 cmd */ 53 }; 54 55 static struct fsl_ifc_nand_ctrl *ifc_nand_ctrl; 56 57 /* 58 * Generic flash bbt descriptors 59 */ 60 static u8 bbt_pattern[] = {'B', 'b', 't', '0' }; 61 static u8 mirror_pattern[] = {'1', 't', 'b', 'B' }; 62 63 static struct nand_bbt_descr bbt_main_descr = { 64 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE | 65 NAND_BBT_2BIT | NAND_BBT_VERSION, 66 .offs = 2, /* 0 on 8-bit small page */ 67 .len = 4, 68 .veroffs = 6, 69 .maxblocks = 4, 70 .pattern = bbt_pattern, 71 }; 72 73 static struct nand_bbt_descr bbt_mirror_descr = { 74 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE | 75 NAND_BBT_2BIT | NAND_BBT_VERSION, 76 .offs = 2, /* 0 on 8-bit small page */ 77 .len = 4, 78 .veroffs = 6, 79 .maxblocks = 4, 80 .pattern = mirror_pattern, 81 }; 82 83 static int fsl_ifc_ooblayout_ecc(struct mtd_info *mtd, int section, 84 struct mtd_oob_region *oobregion) 85 { 86 struct nand_chip *chip = mtd_to_nand(mtd); 87 88 if (section) 89 return -ERANGE; 90 91 oobregion->offset = 8; 92 oobregion->length = chip->ecc.total; 93 94 return 0; 95 } 96 97 static int fsl_ifc_ooblayout_free(struct mtd_info *mtd, int section, 98 struct mtd_oob_region *oobregion) 99 { 100 struct nand_chip *chip = mtd_to_nand(mtd); 101 102 if (section > 1) 103 return -ERANGE; 104 105 if (mtd->writesize == 512 && 106 !(chip->options & NAND_BUSWIDTH_16)) { 107 if (!section) { 108 oobregion->offset = 0; 109 oobregion->length = 5; 110 } else { 111 oobregion->offset = 6; 112 oobregion->length = 2; 113 } 114 115 return 0; 116 } 117 118 if (!section) { 119 oobregion->offset = 2; 120 oobregion->length = 6; 121 } else { 122 oobregion->offset = chip->ecc.total + 8; 123 oobregion->length = mtd->oobsize - oobregion->offset; 124 } 125 126 return 0; 127 } 128 129 static const struct mtd_ooblayout_ops fsl_ifc_ooblayout_ops = { 130 .ecc = fsl_ifc_ooblayout_ecc, 131 .free = fsl_ifc_ooblayout_free, 132 }; 133 134 /* 135 * Set up the IFC hardware block and page address fields, and the ifc nand 136 * structure addr field to point to the correct IFC buffer in memory 137 */ 138 static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob) 139 { 140 struct nand_chip *chip = mtd_to_nand(mtd); 141 struct fsl_ifc_mtd *priv = nand_get_controller_data(chip); 142 struct fsl_ifc_ctrl *ctrl = priv->ctrl; 143 struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; 144 int buf_num; 145 146 ifc_nand_ctrl->page = page_addr; 147 /* Program ROW0/COL0 */ 148 ifc_out32(page_addr, &ifc->ifc_nand.row0); 149 ifc_out32((oob ? IFC_NAND_COL_MS : 0) | column, &ifc->ifc_nand.col0); 150 151 buf_num = page_addr & priv->bufnum_mask; 152 153 ifc_nand_ctrl->addr = priv->vbase + buf_num * (mtd->writesize * 2); 154 ifc_nand_ctrl->index = column; 155 156 /* for OOB data point to the second half of the buffer */ 157 if (oob) 158 ifc_nand_ctrl->index += mtd->writesize; 159 } 160 161 /* returns nonzero if entire page is blank */ 162 static int check_read_ecc(struct mtd_info *mtd, struct fsl_ifc_ctrl *ctrl, 163 u32 eccstat, unsigned int bufnum) 164 { 165 return (eccstat >> ((3 - bufnum % 4) * 8)) & 15; 166 } 167 168 /* 169 * execute IFC NAND command and wait for it to complete 170 */ 171 static void fsl_ifc_run_command(struct mtd_info *mtd) 172 { 173 struct nand_chip *chip = mtd_to_nand(mtd); 174 struct fsl_ifc_mtd *priv = nand_get_controller_data(chip); 175 struct fsl_ifc_ctrl *ctrl = priv->ctrl; 176 struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl; 177 struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; 178 u32 eccstat; 179 int i; 180 181 /* set the chip select for NAND Transaction */ 182 ifc_out32(priv->bank << IFC_NAND_CSEL_SHIFT, 183 &ifc->ifc_nand.nand_csel); 184 185 dev_vdbg(priv->dev, 186 "%s: fir0=%08x fcr0=%08x\n", 187 __func__, 188 ifc_in32(&ifc->ifc_nand.nand_fir0), 189 ifc_in32(&ifc->ifc_nand.nand_fcr0)); 190 191 ctrl->nand_stat = 0; 192 193 /* start read/write seq */ 194 ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt); 195 196 /* wait for command complete flag or timeout */ 197 wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat, 198 msecs_to_jiffies(IFC_TIMEOUT_MSECS)); 199 200 /* ctrl->nand_stat will be updated from IRQ context */ 201 if (!ctrl->nand_stat) 202 dev_err(priv->dev, "Controller is not responding\n"); 203 if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_FTOER) 204 dev_err(priv->dev, "NAND Flash Timeout Error\n"); 205 if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_WPER) 206 dev_err(priv->dev, "NAND Flash Write Protect Error\n"); 207 208 nctrl->max_bitflips = 0; 209 210 if (nctrl->eccread) { 211 int errors; 212 int bufnum = nctrl->page & priv->bufnum_mask; 213 int sector_start = bufnum * chip->ecc.steps; 214 int sector_end = sector_start + chip->ecc.steps - 1; 215 __be32 __iomem *eccstat_regs; 216 217 eccstat_regs = ifc->ifc_nand.nand_eccstat; 218 eccstat = ifc_in32(&eccstat_regs[sector_start / 4]); 219 220 for (i = sector_start; i <= sector_end; i++) { 221 if (i != sector_start && !(i % 4)) 222 eccstat = ifc_in32(&eccstat_regs[i / 4]); 223 224 errors = check_read_ecc(mtd, ctrl, eccstat, i); 225 226 if (errors == 15) { 227 /* 228 * Uncorrectable error. 229 * We'll check for blank pages later. 230 * 231 * We disable ECCER reporting due to... 232 * erratum IFC-A002770 -- so report it now if we 233 * see an uncorrectable error in ECCSTAT. 234 */ 235 ctrl->nand_stat |= IFC_NAND_EVTER_STAT_ECCER; 236 continue; 237 } 238 239 mtd->ecc_stats.corrected += errors; 240 nctrl->max_bitflips = max_t(unsigned int, 241 nctrl->max_bitflips, 242 errors); 243 } 244 245 nctrl->eccread = 0; 246 } 247 } 248 249 static void fsl_ifc_do_read(struct nand_chip *chip, 250 int oob, 251 struct mtd_info *mtd) 252 { 253 struct fsl_ifc_mtd *priv = nand_get_controller_data(chip); 254 struct fsl_ifc_ctrl *ctrl = priv->ctrl; 255 struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; 256 257 /* Program FIR/IFC_NAND_FCR0 for Small/Large page */ 258 if (mtd->writesize > 512) { 259 ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 260 (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | 261 (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | 262 (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) | 263 (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT), 264 &ifc->ifc_nand.nand_fir0); 265 ifc_out32(0x0, &ifc->ifc_nand.nand_fir1); 266 267 ifc_out32((NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) | 268 (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT), 269 &ifc->ifc_nand.nand_fcr0); 270 } else { 271 ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 272 (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | 273 (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | 274 (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT), 275 &ifc->ifc_nand.nand_fir0); 276 ifc_out32(0x0, &ifc->ifc_nand.nand_fir1); 277 278 if (oob) 279 ifc_out32(NAND_CMD_READOOB << 280 IFC_NAND_FCR0_CMD0_SHIFT, 281 &ifc->ifc_nand.nand_fcr0); 282 else 283 ifc_out32(NAND_CMD_READ0 << 284 IFC_NAND_FCR0_CMD0_SHIFT, 285 &ifc->ifc_nand.nand_fcr0); 286 } 287 } 288 289 /* cmdfunc send commands to the IFC NAND Machine */ 290 static void fsl_ifc_cmdfunc(struct nand_chip *chip, unsigned int command, 291 int column, int page_addr) { 292 struct mtd_info *mtd = nand_to_mtd(chip); 293 struct fsl_ifc_mtd *priv = nand_get_controller_data(chip); 294 struct fsl_ifc_ctrl *ctrl = priv->ctrl; 295 struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; 296 297 /* clear the read buffer */ 298 ifc_nand_ctrl->read_bytes = 0; 299 if (command != NAND_CMD_PAGEPROG) 300 ifc_nand_ctrl->index = 0; 301 302 switch (command) { 303 /* READ0 read the entire buffer to use hardware ECC. */ 304 case NAND_CMD_READ0: 305 ifc_out32(0, &ifc->ifc_nand.nand_fbcr); 306 set_addr(mtd, 0, page_addr, 0); 307 308 ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize; 309 ifc_nand_ctrl->index += column; 310 311 if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) 312 ifc_nand_ctrl->eccread = 1; 313 314 fsl_ifc_do_read(chip, 0, mtd); 315 fsl_ifc_run_command(mtd); 316 return; 317 318 /* READOOB reads only the OOB because no ECC is performed. */ 319 case NAND_CMD_READOOB: 320 ifc_out32(mtd->oobsize - column, &ifc->ifc_nand.nand_fbcr); 321 set_addr(mtd, column, page_addr, 1); 322 323 ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize; 324 325 fsl_ifc_do_read(chip, 1, mtd); 326 fsl_ifc_run_command(mtd); 327 328 return; 329 330 case NAND_CMD_READID: 331 case NAND_CMD_PARAM: { 332 /* 333 * For READID, read 8 bytes that are currently used. 334 * For PARAM, read all 3 copies of 256-bytes pages. 335 */ 336 int len = 8; 337 int timing = IFC_FIR_OP_RB; 338 if (command == NAND_CMD_PARAM) { 339 timing = IFC_FIR_OP_RBCD; 340 len = 256 * 3; 341 } 342 343 ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 344 (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | 345 (timing << IFC_NAND_FIR0_OP2_SHIFT), 346 &ifc->ifc_nand.nand_fir0); 347 ifc_out32(command << IFC_NAND_FCR0_CMD0_SHIFT, 348 &ifc->ifc_nand.nand_fcr0); 349 ifc_out32(column, &ifc->ifc_nand.row3); 350 351 ifc_out32(len, &ifc->ifc_nand.nand_fbcr); 352 ifc_nand_ctrl->read_bytes = len; 353 354 set_addr(mtd, 0, 0, 0); 355 fsl_ifc_run_command(mtd); 356 return; 357 } 358 359 /* ERASE1 stores the block and page address */ 360 case NAND_CMD_ERASE1: 361 set_addr(mtd, 0, page_addr, 0); 362 return; 363 364 /* ERASE2 uses the block and page address from ERASE1 */ 365 case NAND_CMD_ERASE2: 366 ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 367 (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) | 368 (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT), 369 &ifc->ifc_nand.nand_fir0); 370 371 ifc_out32((NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) | 372 (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT), 373 &ifc->ifc_nand.nand_fcr0); 374 375 ifc_out32(0, &ifc->ifc_nand.nand_fbcr); 376 ifc_nand_ctrl->read_bytes = 0; 377 fsl_ifc_run_command(mtd); 378 return; 379 380 /* SEQIN sets up the addr buffer and all registers except the length */ 381 case NAND_CMD_SEQIN: { 382 u32 nand_fcr0; 383 ifc_nand_ctrl->column = column; 384 ifc_nand_ctrl->oob = 0; 385 386 if (mtd->writesize > 512) { 387 nand_fcr0 = 388 (NAND_CMD_SEQIN << IFC_NAND_FCR0_CMD0_SHIFT) | 389 (NAND_CMD_STATUS << IFC_NAND_FCR0_CMD1_SHIFT) | 390 (NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD2_SHIFT); 391 392 ifc_out32( 393 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 394 (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | 395 (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | 396 (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) | 397 (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT), 398 &ifc->ifc_nand.nand_fir0); 399 ifc_out32( 400 (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) | 401 (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR1_OP6_SHIFT) | 402 (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT), 403 &ifc->ifc_nand.nand_fir1); 404 } else { 405 nand_fcr0 = ((NAND_CMD_PAGEPROG << 406 IFC_NAND_FCR0_CMD1_SHIFT) | 407 (NAND_CMD_SEQIN << 408 IFC_NAND_FCR0_CMD2_SHIFT) | 409 (NAND_CMD_STATUS << 410 IFC_NAND_FCR0_CMD3_SHIFT)); 411 412 ifc_out32( 413 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 414 (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) | 415 (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) | 416 (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) | 417 (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT), 418 &ifc->ifc_nand.nand_fir0); 419 ifc_out32( 420 (IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) | 421 (IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) | 422 (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR1_OP7_SHIFT) | 423 (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT), 424 &ifc->ifc_nand.nand_fir1); 425 426 if (column >= mtd->writesize) 427 nand_fcr0 |= 428 NAND_CMD_READOOB << IFC_NAND_FCR0_CMD0_SHIFT; 429 else 430 nand_fcr0 |= 431 NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT; 432 } 433 434 if (column >= mtd->writesize) { 435 /* OOB area --> READOOB */ 436 column -= mtd->writesize; 437 ifc_nand_ctrl->oob = 1; 438 } 439 ifc_out32(nand_fcr0, &ifc->ifc_nand.nand_fcr0); 440 set_addr(mtd, column, page_addr, ifc_nand_ctrl->oob); 441 return; 442 } 443 444 /* PAGEPROG reuses all of the setup from SEQIN and adds the length */ 445 case NAND_CMD_PAGEPROG: { 446 if (ifc_nand_ctrl->oob) { 447 ifc_out32(ifc_nand_ctrl->index - 448 ifc_nand_ctrl->column, 449 &ifc->ifc_nand.nand_fbcr); 450 } else { 451 ifc_out32(0, &ifc->ifc_nand.nand_fbcr); 452 } 453 454 fsl_ifc_run_command(mtd); 455 return; 456 } 457 458 case NAND_CMD_STATUS: { 459 void __iomem *addr; 460 461 ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 462 (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT), 463 &ifc->ifc_nand.nand_fir0); 464 ifc_out32(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT, 465 &ifc->ifc_nand.nand_fcr0); 466 ifc_out32(1, &ifc->ifc_nand.nand_fbcr); 467 set_addr(mtd, 0, 0, 0); 468 ifc_nand_ctrl->read_bytes = 1; 469 470 fsl_ifc_run_command(mtd); 471 472 /* 473 * The chip always seems to report that it is 474 * write-protected, even when it is not. 475 */ 476 addr = ifc_nand_ctrl->addr; 477 if (chip->options & NAND_BUSWIDTH_16) 478 ifc_out16(ifc_in16(addr) | (NAND_STATUS_WP), addr); 479 else 480 ifc_out8(ifc_in8(addr) | (NAND_STATUS_WP), addr); 481 return; 482 } 483 484 case NAND_CMD_RESET: 485 ifc_out32(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT, 486 &ifc->ifc_nand.nand_fir0); 487 ifc_out32(NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT, 488 &ifc->ifc_nand.nand_fcr0); 489 fsl_ifc_run_command(mtd); 490 return; 491 492 default: 493 dev_err(priv->dev, "%s: error, unsupported command 0x%x.\n", 494 __func__, command); 495 } 496 } 497 498 static void fsl_ifc_select_chip(struct nand_chip *chip, int cs) 499 { 500 /* The hardware does not seem to support multiple 501 * chips per bank. 502 */ 503 } 504 505 /* 506 * Write buf to the IFC NAND Controller Data Buffer 507 */ 508 static void fsl_ifc_write_buf(struct nand_chip *chip, const u8 *buf, int len) 509 { 510 struct mtd_info *mtd = nand_to_mtd(chip); 511 struct fsl_ifc_mtd *priv = nand_get_controller_data(chip); 512 unsigned int bufsize = mtd->writesize + mtd->oobsize; 513 514 if (len <= 0) { 515 dev_err(priv->dev, "%s: len %d bytes", __func__, len); 516 return; 517 } 518 519 if ((unsigned int)len > bufsize - ifc_nand_ctrl->index) { 520 dev_err(priv->dev, 521 "%s: beyond end of buffer (%d requested, %u available)\n", 522 __func__, len, bufsize - ifc_nand_ctrl->index); 523 len = bufsize - ifc_nand_ctrl->index; 524 } 525 526 memcpy_toio(ifc_nand_ctrl->addr + ifc_nand_ctrl->index, buf, len); 527 ifc_nand_ctrl->index += len; 528 } 529 530 /* 531 * Read a byte from either the IFC hardware buffer 532 * read function for 8-bit buswidth 533 */ 534 static uint8_t fsl_ifc_read_byte(struct nand_chip *chip) 535 { 536 struct fsl_ifc_mtd *priv = nand_get_controller_data(chip); 537 unsigned int offset; 538 539 /* 540 * If there are still bytes in the IFC buffer, then use the 541 * next byte. 542 */ 543 if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) { 544 offset = ifc_nand_ctrl->index++; 545 return ifc_in8(ifc_nand_ctrl->addr + offset); 546 } 547 548 dev_err(priv->dev, "%s: beyond end of buffer\n", __func__); 549 return ERR_BYTE; 550 } 551 552 /* 553 * Read two bytes from the IFC hardware buffer 554 * read function for 16-bit buswith 555 */ 556 static uint8_t fsl_ifc_read_byte16(struct nand_chip *chip) 557 { 558 struct fsl_ifc_mtd *priv = nand_get_controller_data(chip); 559 uint16_t data; 560 561 /* 562 * If there are still bytes in the IFC buffer, then use the 563 * next byte. 564 */ 565 if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) { 566 data = ifc_in16(ifc_nand_ctrl->addr + ifc_nand_ctrl->index); 567 ifc_nand_ctrl->index += 2; 568 return (uint8_t) data; 569 } 570 571 dev_err(priv->dev, "%s: beyond end of buffer\n", __func__); 572 return ERR_BYTE; 573 } 574 575 /* 576 * Read from the IFC Controller Data Buffer 577 */ 578 static void fsl_ifc_read_buf(struct nand_chip *chip, u8 *buf, int len) 579 { 580 struct fsl_ifc_mtd *priv = nand_get_controller_data(chip); 581 int avail; 582 583 if (len < 0) { 584 dev_err(priv->dev, "%s: len %d bytes", __func__, len); 585 return; 586 } 587 588 avail = min((unsigned int)len, 589 ifc_nand_ctrl->read_bytes - ifc_nand_ctrl->index); 590 memcpy_fromio(buf, ifc_nand_ctrl->addr + ifc_nand_ctrl->index, avail); 591 ifc_nand_ctrl->index += avail; 592 593 if (len > avail) 594 dev_err(priv->dev, 595 "%s: beyond end of buffer (%d requested, %d available)\n", 596 __func__, len, avail); 597 } 598 599 /* 600 * This function is called after Program and Erase Operations to 601 * check for success or failure. 602 */ 603 static int fsl_ifc_wait(struct nand_chip *chip) 604 { 605 struct mtd_info *mtd = nand_to_mtd(chip); 606 struct fsl_ifc_mtd *priv = nand_get_controller_data(chip); 607 struct fsl_ifc_ctrl *ctrl = priv->ctrl; 608 struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; 609 u32 nand_fsr; 610 int status; 611 612 /* Use READ_STATUS command, but wait for the device to be ready */ 613 ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 614 (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT), 615 &ifc->ifc_nand.nand_fir0); 616 ifc_out32(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT, 617 &ifc->ifc_nand.nand_fcr0); 618 ifc_out32(1, &ifc->ifc_nand.nand_fbcr); 619 set_addr(mtd, 0, 0, 0); 620 ifc_nand_ctrl->read_bytes = 1; 621 622 fsl_ifc_run_command(mtd); 623 624 nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr); 625 status = nand_fsr >> 24; 626 /* 627 * The chip always seems to report that it is 628 * write-protected, even when it is not. 629 */ 630 return status | NAND_STATUS_WP; 631 } 632 633 /* 634 * The controller does not check for bitflips in erased pages, 635 * therefore software must check instead. 636 */ 637 static int check_erased_page(struct nand_chip *chip, u8 *buf) 638 { 639 struct mtd_info *mtd = nand_to_mtd(chip); 640 u8 *ecc = chip->oob_poi; 641 const int ecc_size = chip->ecc.bytes; 642 const int pkt_size = chip->ecc.size; 643 int i, res, bitflips = 0; 644 struct mtd_oob_region oobregion = { }; 645 646 mtd_ooblayout_ecc(mtd, 0, &oobregion); 647 ecc += oobregion.offset; 648 649 for (i = 0; i < chip->ecc.steps; ++i) { 650 res = nand_check_erased_ecc_chunk(buf, pkt_size, ecc, ecc_size, 651 NULL, 0, 652 chip->ecc.strength); 653 if (res < 0) 654 mtd->ecc_stats.failed++; 655 else 656 mtd->ecc_stats.corrected += res; 657 658 bitflips = max(res, bitflips); 659 buf += pkt_size; 660 ecc += ecc_size; 661 } 662 663 return bitflips; 664 } 665 666 static int fsl_ifc_read_page(struct nand_chip *chip, uint8_t *buf, 667 int oob_required, int page) 668 { 669 struct mtd_info *mtd = nand_to_mtd(chip); 670 struct fsl_ifc_mtd *priv = nand_get_controller_data(chip); 671 struct fsl_ifc_ctrl *ctrl = priv->ctrl; 672 struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl; 673 674 nand_read_page_op(chip, page, 0, buf, mtd->writesize); 675 if (oob_required) 676 fsl_ifc_read_buf(chip, chip->oob_poi, mtd->oobsize); 677 678 if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_ECCER) { 679 if (!oob_required) 680 fsl_ifc_read_buf(chip, chip->oob_poi, mtd->oobsize); 681 682 return check_erased_page(chip, buf); 683 } 684 685 if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC) 686 mtd->ecc_stats.failed++; 687 688 return nctrl->max_bitflips; 689 } 690 691 /* ECC will be calculated automatically, and errors will be detected in 692 * waitfunc. 693 */ 694 static int fsl_ifc_write_page(struct nand_chip *chip, const uint8_t *buf, 695 int oob_required, int page) 696 { 697 struct mtd_info *mtd = nand_to_mtd(chip); 698 699 nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize); 700 fsl_ifc_write_buf(chip, chip->oob_poi, mtd->oobsize); 701 702 return nand_prog_page_end_op(chip); 703 } 704 705 static int fsl_ifc_attach_chip(struct nand_chip *chip) 706 { 707 struct mtd_info *mtd = nand_to_mtd(chip); 708 struct fsl_ifc_mtd *priv = nand_get_controller_data(chip); 709 struct fsl_ifc_ctrl *ctrl = priv->ctrl; 710 struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs; 711 u32 csor; 712 713 csor = ifc_in32(&ifc_global->csor_cs[priv->bank].csor); 714 715 /* Must also set CSOR_NAND_ECC_ENC_EN if DEC_EN set */ 716 if (csor & CSOR_NAND_ECC_DEC_EN) { 717 chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; 718 mtd_set_ooblayout(mtd, &fsl_ifc_ooblayout_ops); 719 720 /* Hardware generates ECC per 512 Bytes */ 721 chip->ecc.size = 512; 722 if ((csor & CSOR_NAND_ECC_MODE_MASK) == CSOR_NAND_ECC_MODE_4) { 723 chip->ecc.bytes = 8; 724 chip->ecc.strength = 4; 725 } else { 726 chip->ecc.bytes = 16; 727 chip->ecc.strength = 8; 728 } 729 } else { 730 chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; 731 chip->ecc.algo = NAND_ECC_ALGO_HAMMING; 732 } 733 734 dev_dbg(priv->dev, "%s: nand->numchips = %d\n", __func__, 735 nanddev_ntargets(&chip->base)); 736 dev_dbg(priv->dev, "%s: nand->chipsize = %lld\n", __func__, 737 nanddev_target_size(&chip->base)); 738 dev_dbg(priv->dev, "%s: nand->pagemask = %8x\n", __func__, 739 chip->pagemask); 740 dev_dbg(priv->dev, "%s: nand->legacy.chip_delay = %d\n", __func__, 741 chip->legacy.chip_delay); 742 dev_dbg(priv->dev, "%s: nand->badblockpos = %d\n", __func__, 743 chip->badblockpos); 744 dev_dbg(priv->dev, "%s: nand->chip_shift = %d\n", __func__, 745 chip->chip_shift); 746 dev_dbg(priv->dev, "%s: nand->page_shift = %d\n", __func__, 747 chip->page_shift); 748 dev_dbg(priv->dev, "%s: nand->phys_erase_shift = %d\n", __func__, 749 chip->phys_erase_shift); 750 dev_dbg(priv->dev, "%s: nand->ecc.engine_type = %d\n", __func__, 751 chip->ecc.engine_type); 752 dev_dbg(priv->dev, "%s: nand->ecc.steps = %d\n", __func__, 753 chip->ecc.steps); 754 dev_dbg(priv->dev, "%s: nand->ecc.bytes = %d\n", __func__, 755 chip->ecc.bytes); 756 dev_dbg(priv->dev, "%s: nand->ecc.total = %d\n", __func__, 757 chip->ecc.total); 758 dev_dbg(priv->dev, "%s: mtd->ooblayout = %p\n", __func__, 759 mtd->ooblayout); 760 dev_dbg(priv->dev, "%s: mtd->flags = %08x\n", __func__, mtd->flags); 761 dev_dbg(priv->dev, "%s: mtd->size = %lld\n", __func__, mtd->size); 762 dev_dbg(priv->dev, "%s: mtd->erasesize = %d\n", __func__, 763 mtd->erasesize); 764 dev_dbg(priv->dev, "%s: mtd->writesize = %d\n", __func__, 765 mtd->writesize); 766 dev_dbg(priv->dev, "%s: mtd->oobsize = %d\n", __func__, 767 mtd->oobsize); 768 769 return 0; 770 } 771 772 static const struct nand_controller_ops fsl_ifc_controller_ops = { 773 .attach_chip = fsl_ifc_attach_chip, 774 }; 775 776 static int fsl_ifc_sram_init(struct fsl_ifc_mtd *priv) 777 { 778 struct fsl_ifc_ctrl *ctrl = priv->ctrl; 779 struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs; 780 struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs; 781 uint32_t csor = 0, csor_8k = 0, csor_ext = 0; 782 uint32_t cs = priv->bank; 783 784 if (ctrl->version < FSL_IFC_VERSION_1_1_0) 785 return 0; 786 787 if (ctrl->version > FSL_IFC_VERSION_1_1_0) { 788 u32 ncfgr, status; 789 int ret; 790 791 /* Trigger auto initialization */ 792 ncfgr = ifc_in32(&ifc_runtime->ifc_nand.ncfgr); 793 ifc_out32(ncfgr | IFC_NAND_NCFGR_SRAM_INIT_EN, &ifc_runtime->ifc_nand.ncfgr); 794 795 /* Wait until done */ 796 ret = readx_poll_timeout(ifc_in32, &ifc_runtime->ifc_nand.ncfgr, 797 status, !(status & IFC_NAND_NCFGR_SRAM_INIT_EN), 798 10, IFC_TIMEOUT_MSECS * 1000); 799 if (ret) 800 dev_err(priv->dev, "Failed to initialize SRAM!\n"); 801 802 return ret; 803 } 804 805 /* Save CSOR and CSOR_ext */ 806 csor = ifc_in32(&ifc_global->csor_cs[cs].csor); 807 csor_ext = ifc_in32(&ifc_global->csor_cs[cs].csor_ext); 808 809 /* chage PageSize 8K and SpareSize 1K*/ 810 csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000; 811 ifc_out32(csor_8k, &ifc_global->csor_cs[cs].csor); 812 ifc_out32(0x0000400, &ifc_global->csor_cs[cs].csor_ext); 813 814 /* READID */ 815 ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 816 (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | 817 (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT), 818 &ifc_runtime->ifc_nand.nand_fir0); 819 ifc_out32(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT, 820 &ifc_runtime->ifc_nand.nand_fcr0); 821 ifc_out32(0x0, &ifc_runtime->ifc_nand.row3); 822 823 ifc_out32(0x0, &ifc_runtime->ifc_nand.nand_fbcr); 824 825 /* Program ROW0/COL0 */ 826 ifc_out32(0x0, &ifc_runtime->ifc_nand.row0); 827 ifc_out32(0x0, &ifc_runtime->ifc_nand.col0); 828 829 /* set the chip select for NAND Transaction */ 830 ifc_out32(cs << IFC_NAND_CSEL_SHIFT, 831 &ifc_runtime->ifc_nand.nand_csel); 832 833 /* start read seq */ 834 ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT, 835 &ifc_runtime->ifc_nand.nandseq_strt); 836 837 /* wait for command complete flag or timeout */ 838 wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat, 839 msecs_to_jiffies(IFC_TIMEOUT_MSECS)); 840 841 if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC) { 842 pr_err("fsl-ifc: Failed to Initialise SRAM\n"); 843 return -ETIMEDOUT; 844 } 845 846 /* Restore CSOR and CSOR_ext */ 847 ifc_out32(csor, &ifc_global->csor_cs[cs].csor); 848 ifc_out32(csor_ext, &ifc_global->csor_cs[cs].csor_ext); 849 850 return 0; 851 } 852 853 static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) 854 { 855 struct fsl_ifc_ctrl *ctrl = priv->ctrl; 856 struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs; 857 struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs; 858 struct nand_chip *chip = &priv->chip; 859 struct mtd_info *mtd = nand_to_mtd(&priv->chip); 860 u32 csor; 861 int ret; 862 863 /* Fill in fsl_ifc_mtd structure */ 864 mtd->dev.parent = priv->dev; 865 nand_set_flash_node(chip, priv->dev->of_node); 866 867 /* fill in nand_chip structure */ 868 /* set up function call table */ 869 if ((ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr)) 870 & CSPR_PORT_SIZE_16) 871 chip->legacy.read_byte = fsl_ifc_read_byte16; 872 else 873 chip->legacy.read_byte = fsl_ifc_read_byte; 874 875 chip->legacy.write_buf = fsl_ifc_write_buf; 876 chip->legacy.read_buf = fsl_ifc_read_buf; 877 chip->legacy.select_chip = fsl_ifc_select_chip; 878 chip->legacy.cmdfunc = fsl_ifc_cmdfunc; 879 chip->legacy.waitfunc = fsl_ifc_wait; 880 chip->legacy.set_features = nand_get_set_features_notsupp; 881 chip->legacy.get_features = nand_get_set_features_notsupp; 882 883 chip->bbt_td = &bbt_main_descr; 884 chip->bbt_md = &bbt_mirror_descr; 885 886 ifc_out32(0x0, &ifc_runtime->ifc_nand.ncfgr); 887 888 /* set up nand options */ 889 chip->bbt_options = NAND_BBT_USE_FLASH; 890 chip->options = NAND_NO_SUBPAGE_WRITE; 891 892 if (ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr) 893 & CSPR_PORT_SIZE_16) { 894 chip->legacy.read_byte = fsl_ifc_read_byte16; 895 chip->options |= NAND_BUSWIDTH_16; 896 } else { 897 chip->legacy.read_byte = fsl_ifc_read_byte; 898 } 899 900 chip->controller = &ifc_nand_ctrl->controller; 901 nand_set_controller_data(chip, priv); 902 903 chip->ecc.read_page = fsl_ifc_read_page; 904 chip->ecc.write_page = fsl_ifc_write_page; 905 906 csor = ifc_in32(&ifc_global->csor_cs[priv->bank].csor); 907 908 switch (csor & CSOR_NAND_PGS_MASK) { 909 case CSOR_NAND_PGS_512: 910 if (!(chip->options & NAND_BUSWIDTH_16)) { 911 /* Avoid conflict with bad block marker */ 912 bbt_main_descr.offs = 0; 913 bbt_mirror_descr.offs = 0; 914 } 915 916 priv->bufnum_mask = 15; 917 break; 918 919 case CSOR_NAND_PGS_2K: 920 priv->bufnum_mask = 3; 921 break; 922 923 case CSOR_NAND_PGS_4K: 924 priv->bufnum_mask = 1; 925 break; 926 927 case CSOR_NAND_PGS_8K: 928 priv->bufnum_mask = 0; 929 break; 930 931 default: 932 dev_err(priv->dev, "bad csor %#x: bad page size\n", csor); 933 return -ENODEV; 934 } 935 936 ret = fsl_ifc_sram_init(priv); 937 if (ret) 938 return ret; 939 940 /* 941 * As IFC version 2.0.0 has 16KB of internal SRAM as compared to older 942 * versions which had 8KB. Hence bufnum mask needs to be updated. 943 */ 944 if (ctrl->version >= FSL_IFC_VERSION_2_0_0) 945 priv->bufnum_mask = (priv->bufnum_mask * 2) + 1; 946 947 return 0; 948 } 949 950 static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv) 951 { 952 struct mtd_info *mtd = nand_to_mtd(&priv->chip); 953 954 kfree(mtd->name); 955 956 if (priv->vbase) 957 iounmap(priv->vbase); 958 959 ifc_nand_ctrl->chips[priv->bank] = NULL; 960 961 return 0; 962 } 963 964 static int match_bank(struct fsl_ifc_global __iomem *ifc_global, int bank, 965 phys_addr_t addr) 966 { 967 u32 cspr = ifc_in32(&ifc_global->cspr_cs[bank].cspr); 968 969 if (!(cspr & CSPR_V)) 970 return 0; 971 if ((cspr & CSPR_MSEL) != CSPR_MSEL_NAND) 972 return 0; 973 974 return (cspr & CSPR_BA) == convert_ifc_address(addr); 975 } 976 977 static DEFINE_MUTEX(fsl_ifc_nand_mutex); 978 979 static int fsl_ifc_nand_probe(struct platform_device *dev) 980 { 981 struct fsl_ifc_runtime __iomem *ifc; 982 struct fsl_ifc_mtd *priv; 983 struct resource res; 984 static const char *part_probe_types[] 985 = { "cmdlinepart", "RedBoot", "ofpart", NULL }; 986 int ret; 987 int bank; 988 struct device_node *node = dev->dev.of_node; 989 struct mtd_info *mtd; 990 991 if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->rregs) 992 return -ENODEV; 993 ifc = fsl_ifc_ctrl_dev->rregs; 994 995 /* get, allocate and map the memory resource */ 996 ret = of_address_to_resource(node, 0, &res); 997 if (ret) { 998 dev_err(&dev->dev, "%s: failed to get resource\n", __func__); 999 return ret; 1000 } 1001 1002 /* find which chip select it is connected to */ 1003 for (bank = 0; bank < fsl_ifc_ctrl_dev->banks; bank++) { 1004 if (match_bank(fsl_ifc_ctrl_dev->gregs, bank, res.start)) 1005 break; 1006 } 1007 1008 if (bank >= fsl_ifc_ctrl_dev->banks) { 1009 dev_err(&dev->dev, "%s: address did not match any chip selects\n", 1010 __func__); 1011 return -ENODEV; 1012 } 1013 1014 priv = devm_kzalloc(&dev->dev, sizeof(*priv), GFP_KERNEL); 1015 if (!priv) 1016 return -ENOMEM; 1017 1018 mutex_lock(&fsl_ifc_nand_mutex); 1019 if (!fsl_ifc_ctrl_dev->nand) { 1020 ifc_nand_ctrl = kzalloc(sizeof(*ifc_nand_ctrl), GFP_KERNEL); 1021 if (!ifc_nand_ctrl) { 1022 mutex_unlock(&fsl_ifc_nand_mutex); 1023 return -ENOMEM; 1024 } 1025 1026 ifc_nand_ctrl->read_bytes = 0; 1027 ifc_nand_ctrl->index = 0; 1028 ifc_nand_ctrl->addr = NULL; 1029 fsl_ifc_ctrl_dev->nand = ifc_nand_ctrl; 1030 1031 nand_controller_init(&ifc_nand_ctrl->controller); 1032 } else { 1033 ifc_nand_ctrl = fsl_ifc_ctrl_dev->nand; 1034 } 1035 mutex_unlock(&fsl_ifc_nand_mutex); 1036 1037 ifc_nand_ctrl->chips[bank] = priv; 1038 priv->bank = bank; 1039 priv->ctrl = fsl_ifc_ctrl_dev; 1040 priv->dev = &dev->dev; 1041 1042 priv->vbase = ioremap(res.start, resource_size(&res)); 1043 if (!priv->vbase) { 1044 dev_err(priv->dev, "%s: failed to map chip region\n", __func__); 1045 ret = -ENOMEM; 1046 goto err; 1047 } 1048 1049 dev_set_drvdata(priv->dev, priv); 1050 1051 ifc_out32(IFC_NAND_EVTER_EN_OPC_EN | 1052 IFC_NAND_EVTER_EN_FTOER_EN | 1053 IFC_NAND_EVTER_EN_WPER_EN, 1054 &ifc->ifc_nand.nand_evter_en); 1055 1056 /* enable NAND Machine Interrupts */ 1057 ifc_out32(IFC_NAND_EVTER_INTR_OPCIR_EN | 1058 IFC_NAND_EVTER_INTR_FTOERIR_EN | 1059 IFC_NAND_EVTER_INTR_WPERIR_EN, 1060 &ifc->ifc_nand.nand_evter_intr_en); 1061 1062 mtd = nand_to_mtd(&priv->chip); 1063 mtd->name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start); 1064 if (!mtd->name) { 1065 ret = -ENOMEM; 1066 goto err; 1067 } 1068 1069 ret = fsl_ifc_chip_init(priv); 1070 if (ret) 1071 goto err; 1072 1073 priv->chip.controller->ops = &fsl_ifc_controller_ops; 1074 ret = nand_scan(&priv->chip, 1); 1075 if (ret) 1076 goto err; 1077 1078 /* First look for RedBoot table or partitions on the command 1079 * line, these take precedence over device tree information */ 1080 ret = mtd_device_parse_register(mtd, part_probe_types, NULL, NULL, 0); 1081 if (ret) 1082 goto cleanup_nand; 1083 1084 dev_info(priv->dev, "IFC NAND device at 0x%llx, bank %d\n", 1085 (unsigned long long)res.start, priv->bank); 1086 1087 return 0; 1088 1089 cleanup_nand: 1090 nand_cleanup(&priv->chip); 1091 err: 1092 fsl_ifc_chip_remove(priv); 1093 1094 return ret; 1095 } 1096 1097 static void fsl_ifc_nand_remove(struct platform_device *dev) 1098 { 1099 struct fsl_ifc_mtd *priv = dev_get_drvdata(&dev->dev); 1100 struct nand_chip *chip = &priv->chip; 1101 int ret; 1102 1103 ret = mtd_device_unregister(nand_to_mtd(chip)); 1104 WARN_ON(ret); 1105 nand_cleanup(chip); 1106 1107 fsl_ifc_chip_remove(priv); 1108 1109 mutex_lock(&fsl_ifc_nand_mutex); 1110 ifc_nand_ctrl->counter--; 1111 if (!ifc_nand_ctrl->counter) { 1112 fsl_ifc_ctrl_dev->nand = NULL; 1113 kfree(ifc_nand_ctrl); 1114 } 1115 mutex_unlock(&fsl_ifc_nand_mutex); 1116 } 1117 1118 static const struct of_device_id fsl_ifc_nand_match[] = { 1119 { 1120 .compatible = "fsl,ifc-nand", 1121 }, 1122 {} 1123 }; 1124 MODULE_DEVICE_TABLE(of, fsl_ifc_nand_match); 1125 1126 static struct platform_driver fsl_ifc_nand_driver = { 1127 .driver = { 1128 .name = "fsl,ifc-nand", 1129 .of_match_table = fsl_ifc_nand_match, 1130 }, 1131 .probe = fsl_ifc_nand_probe, 1132 .remove_new = fsl_ifc_nand_remove, 1133 }; 1134 1135 module_platform_driver(fsl_ifc_nand_driver); 1136 1137 MODULE_LICENSE("GPL"); 1138 MODULE_AUTHOR("Freescale"); 1139 MODULE_DESCRIPTION("Freescale Integrated Flash Controller MTD NAND driver"); 1140