1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Freescale Integrated Flash Controller NAND driver
4 *
5 * Copyright 2011-2012 Freescale Semiconductor, Inc
6 *
7 * Author: Dipen Dudhat <Dipen.Dudhat@freescale.com>
8 */
9
10 #include <linux/module.h>
11 #include <linux/platform_device.h>
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/of_address.h>
15 #include <linux/slab.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/mtd/rawnand.h>
18 #include <linux/mtd/partitions.h>
19 #include <linux/fsl_ifc.h>
20 #include <linux/iopoll.h>
21
22 #define ERR_BYTE 0xFF /* Value returned for read
23 bytes when read failed */
24 #define IFC_TIMEOUT_MSECS 1000 /* Maximum timeout to wait
25 for IFC NAND Machine */
26
27 struct fsl_ifc_ctrl;
28
29 /* mtd information per set */
30 struct fsl_ifc_mtd {
31 struct nand_chip chip;
32 struct fsl_ifc_ctrl *ctrl;
33
34 struct device *dev;
35 int bank; /* Chip select bank number */
36 unsigned int bufnum_mask; /* bufnum = page & bufnum_mask */
37 u8 __iomem *vbase; /* Chip select base virtual address */
38 };
39
40 /* overview of the fsl ifc controller */
41 struct fsl_ifc_nand_ctrl {
42 struct nand_controller controller;
43 struct fsl_ifc_mtd *chips[FSL_IFC_BANK_COUNT];
44
45 void __iomem *addr; /* Address of assigned IFC buffer */
46 unsigned int page; /* Last page written to / read from */
47 unsigned int read_bytes;/* Number of bytes read during command */
48 unsigned int column; /* Saved column from SEQIN */
49 unsigned int index; /* Pointer to next byte to 'read' */
50 unsigned int oob; /* Non zero if operating on OOB data */
51 unsigned int eccread; /* Non zero for a full-page ECC read */
52 unsigned int counter; /* counter for the initializations */
53 unsigned int max_bitflips; /* Saved during READ0 cmd */
54 };
55
56 static struct fsl_ifc_nand_ctrl *ifc_nand_ctrl;
57
58 /*
59 * Generic flash bbt descriptors
60 */
61 static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
62 static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
63
64 static struct nand_bbt_descr bbt_main_descr = {
65 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
66 NAND_BBT_2BIT | NAND_BBT_VERSION,
67 .offs = 2, /* 0 on 8-bit small page */
68 .len = 4,
69 .veroffs = 6,
70 .maxblocks = 4,
71 .pattern = bbt_pattern,
72 };
73
74 static struct nand_bbt_descr bbt_mirror_descr = {
75 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
76 NAND_BBT_2BIT | NAND_BBT_VERSION,
77 .offs = 2, /* 0 on 8-bit small page */
78 .len = 4,
79 .veroffs = 6,
80 .maxblocks = 4,
81 .pattern = mirror_pattern,
82 };
83
fsl_ifc_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)84 static int fsl_ifc_ooblayout_ecc(struct mtd_info *mtd, int section,
85 struct mtd_oob_region *oobregion)
86 {
87 struct nand_chip *chip = mtd_to_nand(mtd);
88
89 if (section)
90 return -ERANGE;
91
92 oobregion->offset = 8;
93 oobregion->length = chip->ecc.total;
94
95 return 0;
96 }
97
fsl_ifc_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)98 static int fsl_ifc_ooblayout_free(struct mtd_info *mtd, int section,
99 struct mtd_oob_region *oobregion)
100 {
101 struct nand_chip *chip = mtd_to_nand(mtd);
102
103 if (section > 1)
104 return -ERANGE;
105
106 if (mtd->writesize == 512 &&
107 !(chip->options & NAND_BUSWIDTH_16)) {
108 if (!section) {
109 oobregion->offset = 0;
110 oobregion->length = 5;
111 } else {
112 oobregion->offset = 6;
113 oobregion->length = 2;
114 }
115
116 return 0;
117 }
118
119 if (!section) {
120 oobregion->offset = 2;
121 oobregion->length = 6;
122 } else {
123 oobregion->offset = chip->ecc.total + 8;
124 oobregion->length = mtd->oobsize - oobregion->offset;
125 }
126
127 return 0;
128 }
129
130 static const struct mtd_ooblayout_ops fsl_ifc_ooblayout_ops = {
131 .ecc = fsl_ifc_ooblayout_ecc,
132 .free = fsl_ifc_ooblayout_free,
133 };
134
135 /*
136 * Set up the IFC hardware block and page address fields, and the ifc nand
137 * structure addr field to point to the correct IFC buffer in memory
138 */
set_addr(struct mtd_info * mtd,int column,int page_addr,int oob)139 static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
140 {
141 struct nand_chip *chip = mtd_to_nand(mtd);
142 struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
143 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
144 struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
145 int buf_num;
146
147 ifc_nand_ctrl->page = page_addr;
148 /* Program ROW0/COL0 */
149 ifc_out32(page_addr, &ifc->ifc_nand.row0);
150 ifc_out32((oob ? IFC_NAND_COL_MS : 0) | column, &ifc->ifc_nand.col0);
151
152 buf_num = page_addr & priv->bufnum_mask;
153
154 ifc_nand_ctrl->addr = priv->vbase + buf_num * (mtd->writesize * 2);
155 ifc_nand_ctrl->index = column;
156
157 /* for OOB data point to the second half of the buffer */
158 if (oob)
159 ifc_nand_ctrl->index += mtd->writesize;
160 }
161
162 /* returns nonzero if entire page is blank */
check_read_ecc(struct mtd_info * mtd,struct fsl_ifc_ctrl * ctrl,u32 eccstat,unsigned int bufnum)163 static int check_read_ecc(struct mtd_info *mtd, struct fsl_ifc_ctrl *ctrl,
164 u32 eccstat, unsigned int bufnum)
165 {
166 return (eccstat >> ((3 - bufnum % 4) * 8)) & 15;
167 }
168
169 /*
170 * execute IFC NAND command and wait for it to complete
171 */
fsl_ifc_run_command(struct mtd_info * mtd)172 static void fsl_ifc_run_command(struct mtd_info *mtd)
173 {
174 struct nand_chip *chip = mtd_to_nand(mtd);
175 struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
176 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
177 struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
178 struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
179 u32 eccstat;
180 int i;
181
182 /* set the chip select for NAND Transaction */
183 ifc_out32(priv->bank << IFC_NAND_CSEL_SHIFT,
184 &ifc->ifc_nand.nand_csel);
185
186 dev_vdbg(priv->dev,
187 "%s: fir0=%08x fcr0=%08x\n",
188 __func__,
189 ifc_in32(&ifc->ifc_nand.nand_fir0),
190 ifc_in32(&ifc->ifc_nand.nand_fcr0));
191
192 ctrl->nand_stat = 0;
193
194 /* start read/write seq */
195 ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
196
197 /* wait for command complete flag or timeout */
198 wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
199 msecs_to_jiffies(IFC_TIMEOUT_MSECS));
200
201 /* ctrl->nand_stat will be updated from IRQ context */
202 if (!ctrl->nand_stat)
203 dev_err(priv->dev, "Controller is not responding\n");
204 if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_FTOER)
205 dev_err(priv->dev, "NAND Flash Timeout Error\n");
206 if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_WPER)
207 dev_err(priv->dev, "NAND Flash Write Protect Error\n");
208
209 nctrl->max_bitflips = 0;
210
211 if (nctrl->eccread) {
212 int errors;
213 int bufnum = nctrl->page & priv->bufnum_mask;
214 int sector_start = bufnum * chip->ecc.steps;
215 int sector_end = sector_start + chip->ecc.steps - 1;
216 __be32 __iomem *eccstat_regs;
217
218 eccstat_regs = ifc->ifc_nand.nand_eccstat;
219 eccstat = ifc_in32(&eccstat_regs[sector_start / 4]);
220
221 for (i = sector_start; i <= sector_end; i++) {
222 if (i != sector_start && !(i % 4))
223 eccstat = ifc_in32(&eccstat_regs[i / 4]);
224
225 errors = check_read_ecc(mtd, ctrl, eccstat, i);
226
227 if (errors == 15) {
228 /*
229 * Uncorrectable error.
230 * We'll check for blank pages later.
231 *
232 * We disable ECCER reporting due to...
233 * erratum IFC-A002770 -- so report it now if we
234 * see an uncorrectable error in ECCSTAT.
235 */
236 ctrl->nand_stat |= IFC_NAND_EVTER_STAT_ECCER;
237 continue;
238 }
239
240 mtd->ecc_stats.corrected += errors;
241 nctrl->max_bitflips = max_t(unsigned int,
242 nctrl->max_bitflips,
243 errors);
244 }
245
246 nctrl->eccread = 0;
247 }
248 }
249
fsl_ifc_do_read(struct nand_chip * chip,int oob,struct mtd_info * mtd)250 static void fsl_ifc_do_read(struct nand_chip *chip,
251 int oob,
252 struct mtd_info *mtd)
253 {
254 struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
255 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
256 struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
257
258 /* Program FIR/IFC_NAND_FCR0 for Small/Large page */
259 if (mtd->writesize > 512) {
260 ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
261 (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
262 (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
263 (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) |
264 (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT),
265 &ifc->ifc_nand.nand_fir0);
266 ifc_out32(0x0, &ifc->ifc_nand.nand_fir1);
267
268 ifc_out32((NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) |
269 (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT),
270 &ifc->ifc_nand.nand_fcr0);
271 } else {
272 ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
273 (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
274 (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
275 (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT),
276 &ifc->ifc_nand.nand_fir0);
277 ifc_out32(0x0, &ifc->ifc_nand.nand_fir1);
278
279 if (oob)
280 ifc_out32(NAND_CMD_READOOB <<
281 IFC_NAND_FCR0_CMD0_SHIFT,
282 &ifc->ifc_nand.nand_fcr0);
283 else
284 ifc_out32(NAND_CMD_READ0 <<
285 IFC_NAND_FCR0_CMD0_SHIFT,
286 &ifc->ifc_nand.nand_fcr0);
287 }
288 }
289
290 /* cmdfunc send commands to the IFC NAND Machine */
fsl_ifc_cmdfunc(struct nand_chip * chip,unsigned int command,int column,int page_addr)291 static void fsl_ifc_cmdfunc(struct nand_chip *chip, unsigned int command,
292 int column, int page_addr) {
293 struct mtd_info *mtd = nand_to_mtd(chip);
294 struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
295 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
296 struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
297
298 /* clear the read buffer */
299 ifc_nand_ctrl->read_bytes = 0;
300 if (command != NAND_CMD_PAGEPROG)
301 ifc_nand_ctrl->index = 0;
302
303 switch (command) {
304 /* READ0 read the entire buffer to use hardware ECC. */
305 case NAND_CMD_READ0:
306 ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
307 set_addr(mtd, 0, page_addr, 0);
308
309 ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
310 ifc_nand_ctrl->index += column;
311
312 if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST)
313 ifc_nand_ctrl->eccread = 1;
314
315 fsl_ifc_do_read(chip, 0, mtd);
316 fsl_ifc_run_command(mtd);
317 return;
318
319 /* READOOB reads only the OOB because no ECC is performed. */
320 case NAND_CMD_READOOB:
321 ifc_out32(mtd->oobsize - column, &ifc->ifc_nand.nand_fbcr);
322 set_addr(mtd, column, page_addr, 1);
323
324 ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
325
326 fsl_ifc_do_read(chip, 1, mtd);
327 fsl_ifc_run_command(mtd);
328
329 return;
330
331 case NAND_CMD_READID:
332 case NAND_CMD_PARAM: {
333 /*
334 * For READID, read 8 bytes that are currently used.
335 * For PARAM, read all 3 copies of 256-bytes pages.
336 */
337 int len = 8;
338 int timing = IFC_FIR_OP_RB;
339 if (command == NAND_CMD_PARAM) {
340 timing = IFC_FIR_OP_RBCD;
341 len = 256 * 3;
342 }
343
344 ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
345 (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
346 (timing << IFC_NAND_FIR0_OP2_SHIFT),
347 &ifc->ifc_nand.nand_fir0);
348 ifc_out32(command << IFC_NAND_FCR0_CMD0_SHIFT,
349 &ifc->ifc_nand.nand_fcr0);
350 ifc_out32(column, &ifc->ifc_nand.row3);
351
352 ifc_out32(len, &ifc->ifc_nand.nand_fbcr);
353 ifc_nand_ctrl->read_bytes = len;
354
355 set_addr(mtd, 0, 0, 0);
356 fsl_ifc_run_command(mtd);
357 return;
358 }
359
360 /* ERASE1 stores the block and page address */
361 case NAND_CMD_ERASE1:
362 set_addr(mtd, 0, page_addr, 0);
363 return;
364
365 /* ERASE2 uses the block and page address from ERASE1 */
366 case NAND_CMD_ERASE2:
367 ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
368 (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) |
369 (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT),
370 &ifc->ifc_nand.nand_fir0);
371
372 ifc_out32((NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) |
373 (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT),
374 &ifc->ifc_nand.nand_fcr0);
375
376 ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
377 ifc_nand_ctrl->read_bytes = 0;
378 fsl_ifc_run_command(mtd);
379 return;
380
381 /* SEQIN sets up the addr buffer and all registers except the length */
382 case NAND_CMD_SEQIN: {
383 u32 nand_fcr0;
384 ifc_nand_ctrl->column = column;
385 ifc_nand_ctrl->oob = 0;
386
387 if (mtd->writesize > 512) {
388 nand_fcr0 =
389 (NAND_CMD_SEQIN << IFC_NAND_FCR0_CMD0_SHIFT) |
390 (NAND_CMD_STATUS << IFC_NAND_FCR0_CMD1_SHIFT) |
391 (NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD2_SHIFT);
392
393 ifc_out32(
394 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
395 (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
396 (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
397 (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) |
398 (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT),
399 &ifc->ifc_nand.nand_fir0);
400 ifc_out32(
401 (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) |
402 (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR1_OP6_SHIFT) |
403 (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT),
404 &ifc->ifc_nand.nand_fir1);
405 } else {
406 nand_fcr0 = ((NAND_CMD_PAGEPROG <<
407 IFC_NAND_FCR0_CMD1_SHIFT) |
408 (NAND_CMD_SEQIN <<
409 IFC_NAND_FCR0_CMD2_SHIFT) |
410 (NAND_CMD_STATUS <<
411 IFC_NAND_FCR0_CMD3_SHIFT));
412
413 ifc_out32(
414 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
415 (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) |
416 (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) |
417 (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) |
418 (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT),
419 &ifc->ifc_nand.nand_fir0);
420 ifc_out32(
421 (IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) |
422 (IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) |
423 (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR1_OP7_SHIFT) |
424 (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT),
425 &ifc->ifc_nand.nand_fir1);
426
427 if (column >= mtd->writesize)
428 nand_fcr0 |=
429 NAND_CMD_READOOB << IFC_NAND_FCR0_CMD0_SHIFT;
430 else
431 nand_fcr0 |=
432 NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT;
433 }
434
435 if (column >= mtd->writesize) {
436 /* OOB area --> READOOB */
437 column -= mtd->writesize;
438 ifc_nand_ctrl->oob = 1;
439 }
440 ifc_out32(nand_fcr0, &ifc->ifc_nand.nand_fcr0);
441 set_addr(mtd, column, page_addr, ifc_nand_ctrl->oob);
442 return;
443 }
444
445 /* PAGEPROG reuses all of the setup from SEQIN and adds the length */
446 case NAND_CMD_PAGEPROG: {
447 if (ifc_nand_ctrl->oob) {
448 ifc_out32(ifc_nand_ctrl->index -
449 ifc_nand_ctrl->column,
450 &ifc->ifc_nand.nand_fbcr);
451 } else {
452 ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
453 }
454
455 fsl_ifc_run_command(mtd);
456 return;
457 }
458
459 case NAND_CMD_STATUS: {
460 void __iomem *addr;
461
462 ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
463 (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT),
464 &ifc->ifc_nand.nand_fir0);
465 ifc_out32(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
466 &ifc->ifc_nand.nand_fcr0);
467 ifc_out32(1, &ifc->ifc_nand.nand_fbcr);
468 set_addr(mtd, 0, 0, 0);
469 ifc_nand_ctrl->read_bytes = 1;
470
471 fsl_ifc_run_command(mtd);
472
473 /*
474 * The chip always seems to report that it is
475 * write-protected, even when it is not.
476 */
477 addr = ifc_nand_ctrl->addr;
478 if (chip->options & NAND_BUSWIDTH_16)
479 ifc_out16(ifc_in16(addr) | (NAND_STATUS_WP), addr);
480 else
481 ifc_out8(ifc_in8(addr) | (NAND_STATUS_WP), addr);
482 return;
483 }
484
485 case NAND_CMD_RESET:
486 ifc_out32(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT,
487 &ifc->ifc_nand.nand_fir0);
488 ifc_out32(NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT,
489 &ifc->ifc_nand.nand_fcr0);
490 fsl_ifc_run_command(mtd);
491 return;
492
493 default:
494 dev_err(priv->dev, "%s: error, unsupported command 0x%x.\n",
495 __func__, command);
496 }
497 }
498
fsl_ifc_select_chip(struct nand_chip * chip,int cs)499 static void fsl_ifc_select_chip(struct nand_chip *chip, int cs)
500 {
501 /* The hardware does not seem to support multiple
502 * chips per bank.
503 */
504 }
505
506 /*
507 * Write buf to the IFC NAND Controller Data Buffer
508 */
fsl_ifc_write_buf(struct nand_chip * chip,const u8 * buf,int len)509 static void fsl_ifc_write_buf(struct nand_chip *chip, const u8 *buf, int len)
510 {
511 struct mtd_info *mtd = nand_to_mtd(chip);
512 struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
513 unsigned int bufsize = mtd->writesize + mtd->oobsize;
514
515 if (len <= 0) {
516 dev_err(priv->dev, "%s: len %d bytes", __func__, len);
517 return;
518 }
519
520 if ((unsigned int)len > bufsize - ifc_nand_ctrl->index) {
521 dev_err(priv->dev,
522 "%s: beyond end of buffer (%d requested, %u available)\n",
523 __func__, len, bufsize - ifc_nand_ctrl->index);
524 len = bufsize - ifc_nand_ctrl->index;
525 }
526
527 memcpy_toio(ifc_nand_ctrl->addr + ifc_nand_ctrl->index, buf, len);
528 ifc_nand_ctrl->index += len;
529 }
530
531 /*
532 * Read a byte from either the IFC hardware buffer
533 * read function for 8-bit buswidth
534 */
fsl_ifc_read_byte(struct nand_chip * chip)535 static uint8_t fsl_ifc_read_byte(struct nand_chip *chip)
536 {
537 struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
538 unsigned int offset;
539
540 /*
541 * If there are still bytes in the IFC buffer, then use the
542 * next byte.
543 */
544 if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
545 offset = ifc_nand_ctrl->index++;
546 return ifc_in8(ifc_nand_ctrl->addr + offset);
547 }
548
549 dev_err(priv->dev, "%s: beyond end of buffer\n", __func__);
550 return ERR_BYTE;
551 }
552
553 /*
554 * Read two bytes from the IFC hardware buffer
555 * read function for 16-bit buswith
556 */
fsl_ifc_read_byte16(struct nand_chip * chip)557 static uint8_t fsl_ifc_read_byte16(struct nand_chip *chip)
558 {
559 struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
560 uint16_t data;
561
562 /*
563 * If there are still bytes in the IFC buffer, then use the
564 * next byte.
565 */
566 if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
567 data = ifc_in16(ifc_nand_ctrl->addr + ifc_nand_ctrl->index);
568 ifc_nand_ctrl->index += 2;
569 return (uint8_t) data;
570 }
571
572 dev_err(priv->dev, "%s: beyond end of buffer\n", __func__);
573 return ERR_BYTE;
574 }
575
576 /*
577 * Read from the IFC Controller Data Buffer
578 */
fsl_ifc_read_buf(struct nand_chip * chip,u8 * buf,int len)579 static void fsl_ifc_read_buf(struct nand_chip *chip, u8 *buf, int len)
580 {
581 struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
582 int avail;
583
584 if (len < 0) {
585 dev_err(priv->dev, "%s: len %d bytes", __func__, len);
586 return;
587 }
588
589 avail = min((unsigned int)len,
590 ifc_nand_ctrl->read_bytes - ifc_nand_ctrl->index);
591 memcpy_fromio(buf, ifc_nand_ctrl->addr + ifc_nand_ctrl->index, avail);
592 ifc_nand_ctrl->index += avail;
593
594 if (len > avail)
595 dev_err(priv->dev,
596 "%s: beyond end of buffer (%d requested, %d available)\n",
597 __func__, len, avail);
598 }
599
600 /*
601 * This function is called after Program and Erase Operations to
602 * check for success or failure.
603 */
fsl_ifc_wait(struct nand_chip * chip)604 static int fsl_ifc_wait(struct nand_chip *chip)
605 {
606 struct mtd_info *mtd = nand_to_mtd(chip);
607 struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
608 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
609 struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
610 u32 nand_fsr;
611 int status;
612
613 /* Use READ_STATUS command, but wait for the device to be ready */
614 ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
615 (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT),
616 &ifc->ifc_nand.nand_fir0);
617 ifc_out32(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
618 &ifc->ifc_nand.nand_fcr0);
619 ifc_out32(1, &ifc->ifc_nand.nand_fbcr);
620 set_addr(mtd, 0, 0, 0);
621 ifc_nand_ctrl->read_bytes = 1;
622
623 fsl_ifc_run_command(mtd);
624
625 nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr);
626 status = nand_fsr >> 24;
627 /*
628 * The chip always seems to report that it is
629 * write-protected, even when it is not.
630 */
631 return status | NAND_STATUS_WP;
632 }
633
634 /*
635 * The controller does not check for bitflips in erased pages,
636 * therefore software must check instead.
637 */
check_erased_page(struct nand_chip * chip,u8 * buf)638 static int check_erased_page(struct nand_chip *chip, u8 *buf)
639 {
640 struct mtd_info *mtd = nand_to_mtd(chip);
641 u8 *ecc = chip->oob_poi;
642 const int ecc_size = chip->ecc.bytes;
643 const int pkt_size = chip->ecc.size;
644 int i, res, bitflips = 0;
645 struct mtd_oob_region oobregion = { };
646
647 mtd_ooblayout_ecc(mtd, 0, &oobregion);
648 ecc += oobregion.offset;
649
650 for (i = 0; i < chip->ecc.steps; ++i) {
651 res = nand_check_erased_ecc_chunk(buf, pkt_size, ecc, ecc_size,
652 NULL, 0,
653 chip->ecc.strength);
654 if (res < 0)
655 mtd->ecc_stats.failed++;
656 else
657 mtd->ecc_stats.corrected += res;
658
659 bitflips = max(res, bitflips);
660 buf += pkt_size;
661 ecc += ecc_size;
662 }
663
664 return bitflips;
665 }
666
fsl_ifc_read_page(struct nand_chip * chip,uint8_t * buf,int oob_required,int page)667 static int fsl_ifc_read_page(struct nand_chip *chip, uint8_t *buf,
668 int oob_required, int page)
669 {
670 struct mtd_info *mtd = nand_to_mtd(chip);
671 struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
672 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
673 struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
674
675 nand_read_page_op(chip, page, 0, buf, mtd->writesize);
676 if (oob_required)
677 fsl_ifc_read_buf(chip, chip->oob_poi, mtd->oobsize);
678
679 if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_ECCER) {
680 if (!oob_required)
681 fsl_ifc_read_buf(chip, chip->oob_poi, mtd->oobsize);
682
683 return check_erased_page(chip, buf);
684 }
685
686 if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
687 mtd->ecc_stats.failed++;
688
689 return nctrl->max_bitflips;
690 }
691
692 /* ECC will be calculated automatically, and errors will be detected in
693 * waitfunc.
694 */
fsl_ifc_write_page(struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)695 static int fsl_ifc_write_page(struct nand_chip *chip, const uint8_t *buf,
696 int oob_required, int page)
697 {
698 struct mtd_info *mtd = nand_to_mtd(chip);
699
700 nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
701 fsl_ifc_write_buf(chip, chip->oob_poi, mtd->oobsize);
702
703 return nand_prog_page_end_op(chip);
704 }
705
fsl_ifc_attach_chip(struct nand_chip * chip)706 static int fsl_ifc_attach_chip(struct nand_chip *chip)
707 {
708 struct mtd_info *mtd = nand_to_mtd(chip);
709 struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
710 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
711 struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs;
712 u32 csor;
713
714 csor = ifc_in32(&ifc_global->csor_cs[priv->bank].csor);
715
716 /* Must also set CSOR_NAND_ECC_ENC_EN if DEC_EN set */
717 if (csor & CSOR_NAND_ECC_DEC_EN) {
718 chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
719 mtd_set_ooblayout(mtd, &fsl_ifc_ooblayout_ops);
720
721 /* Hardware generates ECC per 512 Bytes */
722 chip->ecc.size = 512;
723 if ((csor & CSOR_NAND_ECC_MODE_MASK) == CSOR_NAND_ECC_MODE_4) {
724 chip->ecc.bytes = 8;
725 chip->ecc.strength = 4;
726 } else {
727 chip->ecc.bytes = 16;
728 chip->ecc.strength = 8;
729 }
730 } else {
731 chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
732 chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
733 }
734
735 dev_dbg(priv->dev, "%s: nand->numchips = %d\n", __func__,
736 nanddev_ntargets(&chip->base));
737 dev_dbg(priv->dev, "%s: nand->chipsize = %lld\n", __func__,
738 nanddev_target_size(&chip->base));
739 dev_dbg(priv->dev, "%s: nand->pagemask = %8x\n", __func__,
740 chip->pagemask);
741 dev_dbg(priv->dev, "%s: nand->legacy.chip_delay = %d\n", __func__,
742 chip->legacy.chip_delay);
743 dev_dbg(priv->dev, "%s: nand->badblockpos = %d\n", __func__,
744 chip->badblockpos);
745 dev_dbg(priv->dev, "%s: nand->chip_shift = %d\n", __func__,
746 chip->chip_shift);
747 dev_dbg(priv->dev, "%s: nand->page_shift = %d\n", __func__,
748 chip->page_shift);
749 dev_dbg(priv->dev, "%s: nand->phys_erase_shift = %d\n", __func__,
750 chip->phys_erase_shift);
751 dev_dbg(priv->dev, "%s: nand->ecc.engine_type = %d\n", __func__,
752 chip->ecc.engine_type);
753 dev_dbg(priv->dev, "%s: nand->ecc.steps = %d\n", __func__,
754 chip->ecc.steps);
755 dev_dbg(priv->dev, "%s: nand->ecc.bytes = %d\n", __func__,
756 chip->ecc.bytes);
757 dev_dbg(priv->dev, "%s: nand->ecc.total = %d\n", __func__,
758 chip->ecc.total);
759 dev_dbg(priv->dev, "%s: mtd->ooblayout = %p\n", __func__,
760 mtd->ooblayout);
761 dev_dbg(priv->dev, "%s: mtd->flags = %08x\n", __func__, mtd->flags);
762 dev_dbg(priv->dev, "%s: mtd->size = %lld\n", __func__, mtd->size);
763 dev_dbg(priv->dev, "%s: mtd->erasesize = %d\n", __func__,
764 mtd->erasesize);
765 dev_dbg(priv->dev, "%s: mtd->writesize = %d\n", __func__,
766 mtd->writesize);
767 dev_dbg(priv->dev, "%s: mtd->oobsize = %d\n", __func__,
768 mtd->oobsize);
769
770 return 0;
771 }
772
773 static const struct nand_controller_ops fsl_ifc_controller_ops = {
774 .attach_chip = fsl_ifc_attach_chip,
775 };
776
fsl_ifc_sram_init(struct fsl_ifc_mtd * priv)777 static int fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
778 {
779 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
780 struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs;
781 struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs;
782 uint32_t csor = 0, csor_8k = 0, csor_ext = 0;
783 uint32_t cs = priv->bank;
784
785 if (ctrl->version < FSL_IFC_VERSION_1_1_0)
786 return 0;
787
788 if (ctrl->version > FSL_IFC_VERSION_1_1_0) {
789 u32 ncfgr, status;
790 int ret;
791
792 /* Trigger auto initialization */
793 ncfgr = ifc_in32(&ifc_runtime->ifc_nand.ncfgr);
794 ifc_out32(ncfgr | IFC_NAND_NCFGR_SRAM_INIT_EN, &ifc_runtime->ifc_nand.ncfgr);
795
796 /* Wait until done */
797 ret = readx_poll_timeout(ifc_in32, &ifc_runtime->ifc_nand.ncfgr,
798 status, !(status & IFC_NAND_NCFGR_SRAM_INIT_EN),
799 10, IFC_TIMEOUT_MSECS * 1000);
800 if (ret)
801 dev_err(priv->dev, "Failed to initialize SRAM!\n");
802
803 return ret;
804 }
805
806 /* Save CSOR and CSOR_ext */
807 csor = ifc_in32(&ifc_global->csor_cs[cs].csor);
808 csor_ext = ifc_in32(&ifc_global->csor_cs[cs].csor_ext);
809
810 /* chage PageSize 8K and SpareSize 1K*/
811 csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000;
812 ifc_out32(csor_8k, &ifc_global->csor_cs[cs].csor);
813 ifc_out32(0x0000400, &ifc_global->csor_cs[cs].csor_ext);
814
815 /* READID */
816 ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
817 (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
818 (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT),
819 &ifc_runtime->ifc_nand.nand_fir0);
820 ifc_out32(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT,
821 &ifc_runtime->ifc_nand.nand_fcr0);
822 ifc_out32(0x0, &ifc_runtime->ifc_nand.row3);
823
824 ifc_out32(0x0, &ifc_runtime->ifc_nand.nand_fbcr);
825
826 /* Program ROW0/COL0 */
827 ifc_out32(0x0, &ifc_runtime->ifc_nand.row0);
828 ifc_out32(0x0, &ifc_runtime->ifc_nand.col0);
829
830 /* set the chip select for NAND Transaction */
831 ifc_out32(cs << IFC_NAND_CSEL_SHIFT,
832 &ifc_runtime->ifc_nand.nand_csel);
833
834 /* start read seq */
835 ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT,
836 &ifc_runtime->ifc_nand.nandseq_strt);
837
838 /* wait for command complete flag or timeout */
839 wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
840 msecs_to_jiffies(IFC_TIMEOUT_MSECS));
841
842 if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC) {
843 pr_err("fsl-ifc: Failed to Initialise SRAM\n");
844 return -ETIMEDOUT;
845 }
846
847 /* Restore CSOR and CSOR_ext */
848 ifc_out32(csor, &ifc_global->csor_cs[cs].csor);
849 ifc_out32(csor_ext, &ifc_global->csor_cs[cs].csor_ext);
850
851 return 0;
852 }
853
fsl_ifc_chip_init(struct fsl_ifc_mtd * priv)854 static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
855 {
856 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
857 struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs;
858 struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs;
859 struct nand_chip *chip = &priv->chip;
860 struct mtd_info *mtd = nand_to_mtd(&priv->chip);
861 u32 csor;
862 int ret;
863
864 /* Fill in fsl_ifc_mtd structure */
865 mtd->dev.parent = priv->dev;
866 nand_set_flash_node(chip, priv->dev->of_node);
867
868 /* fill in nand_chip structure */
869 /* set up function call table */
870 if ((ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr))
871 & CSPR_PORT_SIZE_16)
872 chip->legacy.read_byte = fsl_ifc_read_byte16;
873 else
874 chip->legacy.read_byte = fsl_ifc_read_byte;
875
876 chip->legacy.write_buf = fsl_ifc_write_buf;
877 chip->legacy.read_buf = fsl_ifc_read_buf;
878 chip->legacy.select_chip = fsl_ifc_select_chip;
879 chip->legacy.cmdfunc = fsl_ifc_cmdfunc;
880 chip->legacy.waitfunc = fsl_ifc_wait;
881 chip->legacy.set_features = nand_get_set_features_notsupp;
882 chip->legacy.get_features = nand_get_set_features_notsupp;
883
884 chip->bbt_td = &bbt_main_descr;
885 chip->bbt_md = &bbt_mirror_descr;
886
887 ifc_out32(0x0, &ifc_runtime->ifc_nand.ncfgr);
888
889 /* set up nand options */
890 chip->bbt_options = NAND_BBT_USE_FLASH;
891 chip->options = NAND_NO_SUBPAGE_WRITE;
892
893 if (ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr)
894 & CSPR_PORT_SIZE_16) {
895 chip->legacy.read_byte = fsl_ifc_read_byte16;
896 chip->options |= NAND_BUSWIDTH_16;
897 } else {
898 chip->legacy.read_byte = fsl_ifc_read_byte;
899 }
900
901 chip->controller = &ifc_nand_ctrl->controller;
902 nand_set_controller_data(chip, priv);
903
904 chip->ecc.read_page = fsl_ifc_read_page;
905 chip->ecc.write_page = fsl_ifc_write_page;
906
907 csor = ifc_in32(&ifc_global->csor_cs[priv->bank].csor);
908
909 switch (csor & CSOR_NAND_PGS_MASK) {
910 case CSOR_NAND_PGS_512:
911 if (!(chip->options & NAND_BUSWIDTH_16)) {
912 /* Avoid conflict with bad block marker */
913 bbt_main_descr.offs = 0;
914 bbt_mirror_descr.offs = 0;
915 }
916
917 priv->bufnum_mask = 15;
918 break;
919
920 case CSOR_NAND_PGS_2K:
921 priv->bufnum_mask = 3;
922 break;
923
924 case CSOR_NAND_PGS_4K:
925 priv->bufnum_mask = 1;
926 break;
927
928 case CSOR_NAND_PGS_8K:
929 priv->bufnum_mask = 0;
930 break;
931
932 default:
933 dev_err(priv->dev, "bad csor %#x: bad page size\n", csor);
934 return -ENODEV;
935 }
936
937 ret = fsl_ifc_sram_init(priv);
938 if (ret)
939 return ret;
940
941 /*
942 * As IFC version 2.0.0 has 16KB of internal SRAM as compared to older
943 * versions which had 8KB. Hence bufnum mask needs to be updated.
944 */
945 if (ctrl->version >= FSL_IFC_VERSION_2_0_0)
946 priv->bufnum_mask = (priv->bufnum_mask * 2) + 1;
947
948 return 0;
949 }
950
fsl_ifc_chip_remove(struct fsl_ifc_mtd * priv)951 static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv)
952 {
953 struct mtd_info *mtd = nand_to_mtd(&priv->chip);
954
955 kfree(mtd->name);
956
957 if (priv->vbase)
958 iounmap(priv->vbase);
959
960 ifc_nand_ctrl->chips[priv->bank] = NULL;
961
962 return 0;
963 }
964
match_bank(struct fsl_ifc_global __iomem * ifc_global,int bank,phys_addr_t addr)965 static int match_bank(struct fsl_ifc_global __iomem *ifc_global, int bank,
966 phys_addr_t addr)
967 {
968 u32 cspr = ifc_in32(&ifc_global->cspr_cs[bank].cspr);
969
970 if (!(cspr & CSPR_V))
971 return 0;
972 if ((cspr & CSPR_MSEL) != CSPR_MSEL_NAND)
973 return 0;
974
975 return (cspr & CSPR_BA) == convert_ifc_address(addr);
976 }
977
978 static DEFINE_MUTEX(fsl_ifc_nand_mutex);
979
fsl_ifc_nand_probe(struct platform_device * dev)980 static int fsl_ifc_nand_probe(struct platform_device *dev)
981 {
982 struct fsl_ifc_runtime __iomem *ifc;
983 struct fsl_ifc_mtd *priv;
984 struct resource res;
985 static const char *part_probe_types[]
986 = { "cmdlinepart", "RedBoot", "ofpart", NULL };
987 int ret;
988 int bank;
989 struct device_node *node = dev->dev.of_node;
990 struct mtd_info *mtd;
991
992 if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->rregs)
993 return -ENODEV;
994 ifc = fsl_ifc_ctrl_dev->rregs;
995
996 /* get, allocate and map the memory resource */
997 ret = of_address_to_resource(node, 0, &res);
998 if (ret) {
999 dev_err(&dev->dev, "%s: failed to get resource\n", __func__);
1000 return ret;
1001 }
1002
1003 /* find which chip select it is connected to */
1004 for (bank = 0; bank < fsl_ifc_ctrl_dev->banks; bank++) {
1005 if (match_bank(fsl_ifc_ctrl_dev->gregs, bank, res.start))
1006 break;
1007 }
1008
1009 if (bank >= fsl_ifc_ctrl_dev->banks) {
1010 dev_err(&dev->dev, "%s: address did not match any chip selects\n",
1011 __func__);
1012 return -ENODEV;
1013 }
1014
1015 priv = devm_kzalloc(&dev->dev, sizeof(*priv), GFP_KERNEL);
1016 if (!priv)
1017 return -ENOMEM;
1018
1019 mutex_lock(&fsl_ifc_nand_mutex);
1020 if (!fsl_ifc_ctrl_dev->nand) {
1021 ifc_nand_ctrl = kzalloc(sizeof(*ifc_nand_ctrl), GFP_KERNEL);
1022 if (!ifc_nand_ctrl) {
1023 mutex_unlock(&fsl_ifc_nand_mutex);
1024 return -ENOMEM;
1025 }
1026
1027 ifc_nand_ctrl->read_bytes = 0;
1028 ifc_nand_ctrl->index = 0;
1029 ifc_nand_ctrl->addr = NULL;
1030 fsl_ifc_ctrl_dev->nand = ifc_nand_ctrl;
1031
1032 nand_controller_init(&ifc_nand_ctrl->controller);
1033 } else {
1034 ifc_nand_ctrl = fsl_ifc_ctrl_dev->nand;
1035 }
1036 mutex_unlock(&fsl_ifc_nand_mutex);
1037
1038 ifc_nand_ctrl->chips[bank] = priv;
1039 priv->bank = bank;
1040 priv->ctrl = fsl_ifc_ctrl_dev;
1041 priv->dev = &dev->dev;
1042
1043 priv->vbase = ioremap(res.start, resource_size(&res));
1044 if (!priv->vbase) {
1045 dev_err(priv->dev, "%s: failed to map chip region\n", __func__);
1046 ret = -ENOMEM;
1047 goto err;
1048 }
1049
1050 dev_set_drvdata(priv->dev, priv);
1051
1052 ifc_out32(IFC_NAND_EVTER_EN_OPC_EN |
1053 IFC_NAND_EVTER_EN_FTOER_EN |
1054 IFC_NAND_EVTER_EN_WPER_EN,
1055 &ifc->ifc_nand.nand_evter_en);
1056
1057 /* enable NAND Machine Interrupts */
1058 ifc_out32(IFC_NAND_EVTER_INTR_OPCIR_EN |
1059 IFC_NAND_EVTER_INTR_FTOERIR_EN |
1060 IFC_NAND_EVTER_INTR_WPERIR_EN,
1061 &ifc->ifc_nand.nand_evter_intr_en);
1062
1063 mtd = nand_to_mtd(&priv->chip);
1064 mtd->name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start);
1065 if (!mtd->name) {
1066 ret = -ENOMEM;
1067 goto err;
1068 }
1069
1070 ret = fsl_ifc_chip_init(priv);
1071 if (ret)
1072 goto err;
1073
1074 priv->chip.controller->ops = &fsl_ifc_controller_ops;
1075 ret = nand_scan(&priv->chip, 1);
1076 if (ret)
1077 goto err;
1078
1079 /* First look for RedBoot table or partitions on the command
1080 * line, these take precedence over device tree information */
1081 ret = mtd_device_parse_register(mtd, part_probe_types, NULL, NULL, 0);
1082 if (ret)
1083 goto cleanup_nand;
1084
1085 dev_info(priv->dev, "IFC NAND device at 0x%llx, bank %d\n",
1086 (unsigned long long)res.start, priv->bank);
1087
1088 return 0;
1089
1090 cleanup_nand:
1091 nand_cleanup(&priv->chip);
1092 err:
1093 fsl_ifc_chip_remove(priv);
1094
1095 return ret;
1096 }
1097
fsl_ifc_nand_remove(struct platform_device * dev)1098 static void fsl_ifc_nand_remove(struct platform_device *dev)
1099 {
1100 struct fsl_ifc_mtd *priv = dev_get_drvdata(&dev->dev);
1101 struct nand_chip *chip = &priv->chip;
1102 int ret;
1103
1104 ret = mtd_device_unregister(nand_to_mtd(chip));
1105 WARN_ON(ret);
1106 nand_cleanup(chip);
1107
1108 fsl_ifc_chip_remove(priv);
1109
1110 mutex_lock(&fsl_ifc_nand_mutex);
1111 ifc_nand_ctrl->counter--;
1112 if (!ifc_nand_ctrl->counter) {
1113 fsl_ifc_ctrl_dev->nand = NULL;
1114 kfree(ifc_nand_ctrl);
1115 }
1116 mutex_unlock(&fsl_ifc_nand_mutex);
1117 }
1118
1119 static const struct of_device_id fsl_ifc_nand_match[] = {
1120 {
1121 .compatible = "fsl,ifc-nand",
1122 },
1123 {}
1124 };
1125 MODULE_DEVICE_TABLE(of, fsl_ifc_nand_match);
1126
1127 static struct platform_driver fsl_ifc_nand_driver = {
1128 .driver = {
1129 .name = "fsl,ifc-nand",
1130 .of_match_table = fsl_ifc_nand_match,
1131 },
1132 .probe = fsl_ifc_nand_probe,
1133 .remove_new = fsl_ifc_nand_remove,
1134 };
1135
1136 module_platform_driver(fsl_ifc_nand_driver);
1137
1138 MODULE_LICENSE("GPL");
1139 MODULE_AUTHOR("Freescale");
1140 MODULE_DESCRIPTION("Freescale Integrated Flash Controller MTD NAND driver");
1141