xref: /linux/drivers/mtd/nand/raw/pl35x-nand-controller.c (revision bdd1a21b52557ea8f61d0a5dc2f77151b576eb70)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ARM PL35X NAND flash controller driver
4  *
5  * Copyright (C) 2017 Xilinx, Inc
6  * Author:
7  *   Miquel Raynal <miquel.raynal@bootlin.com>
8  * Original work (rewritten):
9  *   Punnaiah Choudary Kalluri <punnaia@xilinx.com>
10  *   Naga Sureshkumar Relli <nagasure@xilinx.com>
11  */
12 
13 #include <linux/amba/bus.h>
14 #include <linux/err.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/io.h>
18 #include <linux/ioport.h>
19 #include <linux/iopoll.h>
20 #include <linux/irq.h>
21 #include <linux/module.h>
22 #include <linux/moduleparam.h>
23 #include <linux/mtd/mtd.h>
24 #include <linux/mtd/rawnand.h>
25 #include <linux/mtd/partitions.h>
26 #include <linux/of_address.h>
27 #include <linux/of_device.h>
28 #include <linux/of_platform.h>
29 #include <linux/platform_device.h>
30 #include <linux/slab.h>
31 #include <linux/clk.h>
32 
33 #define PL35X_NANDC_DRIVER_NAME "pl35x-nand-controller"
34 
35 /* SMC controller status register (RO) */
36 #define PL35X_SMC_MEMC_STATUS 0x0
37 #define   PL35X_SMC_MEMC_STATUS_RAW_INT_STATUS1	BIT(6)
38 /* SMC clear config register (WO) */
39 #define PL35X_SMC_MEMC_CFG_CLR 0xC
40 #define   PL35X_SMC_MEMC_CFG_CLR_INT_DIS_1	BIT(1)
41 #define   PL35X_SMC_MEMC_CFG_CLR_INT_CLR_1	BIT(4)
42 #define   PL35X_SMC_MEMC_CFG_CLR_ECC_INT_DIS_1	BIT(6)
43 /* SMC direct command register (WO) */
44 #define PL35X_SMC_DIRECT_CMD 0x10
45 #define   PL35X_SMC_DIRECT_CMD_NAND_CS (0x4 << 23)
46 #define   PL35X_SMC_DIRECT_CMD_UPD_REGS (0x2 << 21)
47 /* SMC set cycles register (WO) */
48 #define PL35X_SMC_CYCLES 0x14
49 #define   PL35X_SMC_NAND_TRC_CYCLES(x) ((x) << 0)
50 #define   PL35X_SMC_NAND_TWC_CYCLES(x) ((x) << 4)
51 #define   PL35X_SMC_NAND_TREA_CYCLES(x) ((x) << 8)
52 #define   PL35X_SMC_NAND_TWP_CYCLES(x) ((x) << 11)
53 #define   PL35X_SMC_NAND_TCLR_CYCLES(x) ((x) << 14)
54 #define   PL35X_SMC_NAND_TAR_CYCLES(x) ((x) << 17)
55 #define   PL35X_SMC_NAND_TRR_CYCLES(x) ((x) << 20)
56 /* SMC set opmode register (WO) */
57 #define PL35X_SMC_OPMODE 0x18
58 #define   PL35X_SMC_OPMODE_BW_8 0
59 #define   PL35X_SMC_OPMODE_BW_16 1
60 /* SMC ECC status register (RO) */
61 #define PL35X_SMC_ECC_STATUS 0x400
62 #define   PL35X_SMC_ECC_STATUS_ECC_BUSY BIT(6)
63 /* SMC ECC configuration register */
64 #define PL35X_SMC_ECC_CFG 0x404
65 #define   PL35X_SMC_ECC_CFG_MODE_MASK 0xC
66 #define   PL35X_SMC_ECC_CFG_MODE_BYPASS 0
67 #define   PL35X_SMC_ECC_CFG_MODE_APB BIT(2)
68 #define   PL35X_SMC_ECC_CFG_MODE_MEM BIT(3)
69 #define   PL35X_SMC_ECC_CFG_PGSIZE_MASK	0x3
70 /* SMC ECC command 1 register */
71 #define PL35X_SMC_ECC_CMD1 0x408
72 #define   PL35X_SMC_ECC_CMD1_WRITE(x) ((x) << 0)
73 #define   PL35X_SMC_ECC_CMD1_READ(x) ((x) << 8)
74 #define   PL35X_SMC_ECC_CMD1_READ_END(x) ((x) << 16)
75 #define   PL35X_SMC_ECC_CMD1_READ_END_VALID(x) ((x) << 24)
76 /* SMC ECC command 2 register */
77 #define PL35X_SMC_ECC_CMD2 0x40C
78 #define   PL35X_SMC_ECC_CMD2_WRITE_COL_CHG(x) ((x) << 0)
79 #define   PL35X_SMC_ECC_CMD2_READ_COL_CHG(x) ((x) << 8)
80 #define   PL35X_SMC_ECC_CMD2_READ_COL_CHG_END(x) ((x) << 16)
81 #define   PL35X_SMC_ECC_CMD2_READ_COL_CHG_END_VALID(x) ((x) << 24)
82 /* SMC ECC value registers (RO) */
83 #define PL35X_SMC_ECC_VALUE(x) (0x418 + (4 * (x)))
84 #define   PL35X_SMC_ECC_VALUE_IS_CORRECTABLE(x) ((x) & BIT(27))
85 #define   PL35X_SMC_ECC_VALUE_HAS_FAILED(x) ((x) & BIT(28))
86 #define   PL35X_SMC_ECC_VALUE_IS_VALID(x) ((x) & BIT(30))
87 
88 /* NAND AXI interface */
89 #define PL35X_SMC_CMD_PHASE 0
90 #define PL35X_SMC_CMD_PHASE_CMD0(x) ((x) << 3)
91 #define PL35X_SMC_CMD_PHASE_CMD1(x) ((x) << 11)
92 #define PL35X_SMC_CMD_PHASE_CMD1_VALID BIT(20)
93 #define PL35X_SMC_CMD_PHASE_ADDR(pos, x) ((x) << (8 * (pos)))
94 #define PL35X_SMC_CMD_PHASE_NADDRS(x) ((x) << 21)
95 #define PL35X_SMC_DATA_PHASE BIT(19)
96 #define PL35X_SMC_DATA_PHASE_ECC_LAST BIT(10)
97 #define PL35X_SMC_DATA_PHASE_CLEAR_CS BIT(21)
98 
99 #define PL35X_NAND_MAX_CS 1
100 #define PL35X_NAND_LAST_XFER_SZ 4
101 #define TO_CYCLES(ps, period_ns) (DIV_ROUND_UP((ps) / 1000, period_ns))
102 
103 #define PL35X_NAND_ECC_BITS_MASK 0xFFF
104 #define PL35X_NAND_ECC_BYTE_OFF_MASK 0x1FF
105 #define PL35X_NAND_ECC_BIT_OFF_MASK 0x7
106 
107 struct pl35x_nand_timings {
108 	unsigned int t_rc:4;
109 	unsigned int t_wc:4;
110 	unsigned int t_rea:3;
111 	unsigned int t_wp:3;
112 	unsigned int t_clr:3;
113 	unsigned int t_ar:3;
114 	unsigned int t_rr:4;
115 	unsigned int rsvd:8;
116 };
117 
118 struct pl35x_nand {
119 	struct list_head node;
120 	struct nand_chip chip;
121 	unsigned int cs;
122 	unsigned int addr_cycles;
123 	u32 ecc_cfg;
124 	u32 timings;
125 };
126 
127 /**
128  * struct pl35x_nandc - NAND flash controller driver structure
129  * @dev: Kernel device
130  * @conf_regs: SMC configuration registers for command phase
131  * @io_regs: NAND data registers for data phase
132  * @controller: Core NAND controller structure
133  * @chip: NAND chip information structure
134  * @selected_chip: NAND chip currently selected by the controller
135  * @assigned_cs: List of assigned CS
136  * @ecc_buf: Temporary buffer to extract ECC bytes
137  */
138 struct pl35x_nandc {
139 	struct device *dev;
140 	void __iomem *conf_regs;
141 	void __iomem *io_regs;
142 	struct nand_controller controller;
143 	struct list_head chips;
144 	struct nand_chip *selected_chip;
145 	unsigned long assigned_cs;
146 	u8 *ecc_buf;
147 };
148 
149 static inline struct pl35x_nandc *to_pl35x_nandc(struct nand_controller *ctrl)
150 {
151 	return container_of(ctrl, struct pl35x_nandc, controller);
152 }
153 
154 static inline struct pl35x_nand *to_pl35x_nand(struct nand_chip *chip)
155 {
156 	return container_of(chip, struct pl35x_nand, chip);
157 }
158 
159 static int pl35x_ecc_ooblayout16_ecc(struct mtd_info *mtd, int section,
160 				     struct mtd_oob_region *oobregion)
161 {
162 	struct nand_chip *chip = mtd_to_nand(mtd);
163 
164 	if (section >= chip->ecc.steps)
165 		return -ERANGE;
166 
167 	oobregion->offset = (section * chip->ecc.bytes);
168 	oobregion->length = chip->ecc.bytes;
169 
170 	return 0;
171 }
172 
173 static int pl35x_ecc_ooblayout16_free(struct mtd_info *mtd, int section,
174 				      struct mtd_oob_region *oobregion)
175 {
176 	struct nand_chip *chip = mtd_to_nand(mtd);
177 
178 	if (section >= chip->ecc.steps)
179 		return -ERANGE;
180 
181 	oobregion->offset = (section * chip->ecc.bytes) + 8;
182 	oobregion->length = 8;
183 
184 	return 0;
185 }
186 
187 static const struct mtd_ooblayout_ops pl35x_ecc_ooblayout16_ops = {
188 	.ecc = pl35x_ecc_ooblayout16_ecc,
189 	.free = pl35x_ecc_ooblayout16_free,
190 };
191 
192 /* Generic flash bbt decriptors */
193 static u8 bbt_pattern[] = { 'B', 'b', 't', '0' };
194 static u8 mirror_pattern[] = { '1', 't', 'b', 'B' };
195 
196 static struct nand_bbt_descr bbt_main_descr = {
197 	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
198 		| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
199 	.offs = 4,
200 	.len = 4,
201 	.veroffs = 20,
202 	.maxblocks = 4,
203 	.pattern = bbt_pattern
204 };
205 
206 static struct nand_bbt_descr bbt_mirror_descr = {
207 	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
208 		| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
209 	.offs = 4,
210 	.len = 4,
211 	.veroffs = 20,
212 	.maxblocks = 4,
213 	.pattern = mirror_pattern
214 };
215 
216 static void pl35x_smc_update_regs(struct pl35x_nandc *nfc)
217 {
218 	writel(PL35X_SMC_DIRECT_CMD_NAND_CS |
219 	       PL35X_SMC_DIRECT_CMD_UPD_REGS,
220 	       nfc->conf_regs + PL35X_SMC_DIRECT_CMD);
221 }
222 
223 static int pl35x_smc_set_buswidth(struct pl35x_nandc *nfc, unsigned int bw)
224 {
225 	if (bw != PL35X_SMC_OPMODE_BW_8 && bw != PL35X_SMC_OPMODE_BW_16)
226 		return -EINVAL;
227 
228 	writel(bw, nfc->conf_regs + PL35X_SMC_OPMODE);
229 	pl35x_smc_update_regs(nfc);
230 
231 	return 0;
232 }
233 
234 static void pl35x_smc_clear_irq(struct pl35x_nandc *nfc)
235 {
236 	writel(PL35X_SMC_MEMC_CFG_CLR_INT_CLR_1,
237 	       nfc->conf_regs + PL35X_SMC_MEMC_CFG_CLR);
238 }
239 
240 static int pl35x_smc_wait_for_irq(struct pl35x_nandc *nfc)
241 {
242 	u32 reg;
243 	int ret;
244 
245 	ret = readl_poll_timeout(nfc->conf_regs + PL35X_SMC_MEMC_STATUS, reg,
246 				 reg & PL35X_SMC_MEMC_STATUS_RAW_INT_STATUS1,
247 				 10, 1000000);
248 	if (ret)
249 		dev_err(nfc->dev,
250 			"Timeout polling on NAND controller interrupt (0x%x)\n",
251 			reg);
252 
253 	pl35x_smc_clear_irq(nfc);
254 
255 	return ret;
256 }
257 
258 static int pl35x_smc_wait_for_ecc_done(struct pl35x_nandc *nfc)
259 {
260 	u32 reg;
261 	int ret;
262 
263 	ret = readl_poll_timeout(nfc->conf_regs + PL35X_SMC_ECC_STATUS, reg,
264 				 !(reg & PL35X_SMC_ECC_STATUS_ECC_BUSY),
265 				 10, 1000000);
266 	if (ret)
267 		dev_err(nfc->dev,
268 			"Timeout polling on ECC controller interrupt\n");
269 
270 	return ret;
271 }
272 
273 static int pl35x_smc_set_ecc_mode(struct pl35x_nandc *nfc,
274 				  struct nand_chip *chip,
275 				  unsigned int mode)
276 {
277 	struct pl35x_nand *plnand;
278 	u32 ecc_cfg;
279 
280 	ecc_cfg = readl(nfc->conf_regs + PL35X_SMC_ECC_CFG);
281 	ecc_cfg &= ~PL35X_SMC_ECC_CFG_MODE_MASK;
282 	ecc_cfg |= mode;
283 	writel(ecc_cfg, nfc->conf_regs + PL35X_SMC_ECC_CFG);
284 
285 	if (chip) {
286 		plnand = to_pl35x_nand(chip);
287 		plnand->ecc_cfg = ecc_cfg;
288 	}
289 
290 	if (mode != PL35X_SMC_ECC_CFG_MODE_BYPASS)
291 		return pl35x_smc_wait_for_ecc_done(nfc);
292 
293 	return 0;
294 }
295 
296 static void pl35x_smc_force_byte_access(struct nand_chip *chip,
297 					bool force_8bit)
298 {
299 	struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
300 	int ret;
301 
302 	if (!(chip->options & NAND_BUSWIDTH_16))
303 		return;
304 
305 	if (force_8bit)
306 		ret = pl35x_smc_set_buswidth(nfc, PL35X_SMC_OPMODE_BW_8);
307 	else
308 		ret = pl35x_smc_set_buswidth(nfc, PL35X_SMC_OPMODE_BW_16);
309 
310 	if (ret)
311 		dev_err(nfc->dev, "Error in Buswidth\n");
312 }
313 
314 static void pl35x_nand_select_target(struct nand_chip *chip,
315 				     unsigned int die_nr)
316 {
317 	struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
318 	struct pl35x_nand *plnand = to_pl35x_nand(chip);
319 
320 	if (chip == nfc->selected_chip)
321 		return;
322 
323 	/* Setup the timings */
324 	writel(plnand->timings, nfc->conf_regs + PL35X_SMC_CYCLES);
325 	pl35x_smc_update_regs(nfc);
326 
327 	/* Configure the ECC engine */
328 	writel(plnand->ecc_cfg, nfc->conf_regs + PL35X_SMC_ECC_CFG);
329 
330 	nfc->selected_chip = chip;
331 }
332 
333 static void pl35x_nand_read_data_op(struct nand_chip *chip, u8 *in,
334 				    unsigned int len, bool force_8bit,
335 				    unsigned int flags, unsigned int last_flags)
336 {
337 	struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
338 	unsigned int buf_end = len / 4;
339 	unsigned int in_start = round_down(len, 4);
340 	unsigned int data_phase_addr;
341 	u32 *buf32 = (u32 *)in;
342 	u8 *buf8 = (u8 *)in;
343 	int i;
344 
345 	if (force_8bit)
346 		pl35x_smc_force_byte_access(chip, true);
347 
348 	for (i = 0; i < buf_end; i++) {
349 		data_phase_addr = PL35X_SMC_DATA_PHASE + flags;
350 		if (i + 1 == buf_end)
351 			data_phase_addr = PL35X_SMC_DATA_PHASE + last_flags;
352 
353 		buf32[i] = readl(nfc->io_regs + data_phase_addr);
354 	}
355 
356 	/* No working extra flags on unaligned data accesses */
357 	for (i = in_start; i < len; i++)
358 		buf8[i] = readb(nfc->io_regs + PL35X_SMC_DATA_PHASE);
359 
360 	if (force_8bit)
361 		pl35x_smc_force_byte_access(chip, false);
362 }
363 
364 static void pl35x_nand_write_data_op(struct nand_chip *chip, const u8 *out,
365 				     int len, bool force_8bit,
366 				     unsigned int flags,
367 				     unsigned int last_flags)
368 {
369 	struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
370 	unsigned int buf_end = len / 4;
371 	unsigned int in_start = round_down(len, 4);
372 	const u32 *buf32 = (const u32 *)out;
373 	const u8 *buf8 = (const u8 *)out;
374 	unsigned int data_phase_addr;
375 	int i;
376 
377 	if (force_8bit)
378 		pl35x_smc_force_byte_access(chip, true);
379 
380 	for (i = 0; i < buf_end; i++) {
381 		data_phase_addr = PL35X_SMC_DATA_PHASE + flags;
382 		if (i + 1 == buf_end)
383 			data_phase_addr = PL35X_SMC_DATA_PHASE + last_flags;
384 
385 		writel(buf32[i], nfc->io_regs + data_phase_addr);
386 	}
387 
388 	/* No working extra flags on unaligned data accesses */
389 	for (i = in_start; i < len; i++)
390 		writeb(buf8[i], nfc->io_regs + PL35X_SMC_DATA_PHASE);
391 
392 	if (force_8bit)
393 		pl35x_smc_force_byte_access(chip, false);
394 }
395 
396 static int pl35x_nand_correct_data(struct pl35x_nandc *nfc, unsigned char *buf,
397 				   unsigned char *read_ecc,
398 				   unsigned char *calc_ecc)
399 {
400 	unsigned short ecc_odd, ecc_even, read_ecc_lower, read_ecc_upper;
401 	unsigned short calc_ecc_lower, calc_ecc_upper;
402 	unsigned short byte_addr, bit_addr;
403 
404 	read_ecc_lower = (read_ecc[0] | (read_ecc[1] << 8)) &
405 			 PL35X_NAND_ECC_BITS_MASK;
406 	read_ecc_upper = ((read_ecc[1] >> 4) | (read_ecc[2] << 4)) &
407 			 PL35X_NAND_ECC_BITS_MASK;
408 
409 	calc_ecc_lower = (calc_ecc[0] | (calc_ecc[1] << 8)) &
410 			 PL35X_NAND_ECC_BITS_MASK;
411 	calc_ecc_upper = ((calc_ecc[1] >> 4) | (calc_ecc[2] << 4)) &
412 			 PL35X_NAND_ECC_BITS_MASK;
413 
414 	ecc_odd = read_ecc_lower ^ calc_ecc_lower;
415 	ecc_even = read_ecc_upper ^ calc_ecc_upper;
416 
417 	/* No error */
418 	if (likely(!ecc_odd && !ecc_even))
419 		return 0;
420 
421 	/* One error in the main data; to be corrected */
422 	if (ecc_odd == (~ecc_even & PL35X_NAND_ECC_BITS_MASK)) {
423 		/* Bits [11:3] of error code give the byte offset */
424 		byte_addr = (ecc_odd >> 3) & PL35X_NAND_ECC_BYTE_OFF_MASK;
425 		/* Bits [2:0] of error code give the bit offset */
426 		bit_addr = ecc_odd & PL35X_NAND_ECC_BIT_OFF_MASK;
427 		/* Toggle the faulty bit */
428 		buf[byte_addr] ^= (BIT(bit_addr));
429 
430 		return 1;
431 	}
432 
433 	/* One error in the ECC data; no action needed */
434 	if (hweight32(ecc_odd | ecc_even) == 1)
435 		return 1;
436 
437 	return -EBADMSG;
438 }
439 
440 static void pl35x_nand_ecc_reg_to_array(struct nand_chip *chip, u32 ecc_reg,
441 					u8 *ecc_array)
442 {
443 	u32 ecc_value = ~ecc_reg;
444 	unsigned int ecc_byte;
445 
446 	for (ecc_byte = 0; ecc_byte < chip->ecc.bytes; ecc_byte++)
447 		ecc_array[ecc_byte] = ecc_value >> (8 * ecc_byte);
448 }
449 
450 static int pl35x_nand_read_eccbytes(struct pl35x_nandc *nfc,
451 				    struct nand_chip *chip, u8 *read_ecc)
452 {
453 	u32 ecc_value;
454 	int chunk;
455 
456 	for (chunk = 0; chunk < chip->ecc.steps;
457 	     chunk++, read_ecc += chip->ecc.bytes) {
458 		ecc_value = readl(nfc->conf_regs + PL35X_SMC_ECC_VALUE(chunk));
459 		if (!PL35X_SMC_ECC_VALUE_IS_VALID(ecc_value))
460 			return -EINVAL;
461 
462 		pl35x_nand_ecc_reg_to_array(chip, ecc_value, read_ecc);
463 	}
464 
465 	return 0;
466 }
467 
468 static int pl35x_nand_recover_data_hwecc(struct pl35x_nandc *nfc,
469 					 struct nand_chip *chip, u8 *data,
470 					 u8 *read_ecc)
471 {
472 	struct mtd_info *mtd = nand_to_mtd(chip);
473 	unsigned int max_bitflips = 0, chunk;
474 	u8 calc_ecc[3];
475 	u32 ecc_value;
476 	int stats;
477 
478 	for (chunk = 0; chunk < chip->ecc.steps;
479 	     chunk++, data += chip->ecc.size, read_ecc += chip->ecc.bytes) {
480 		/* Read ECC value for each chunk */
481 		ecc_value = readl(nfc->conf_regs + PL35X_SMC_ECC_VALUE(chunk));
482 
483 		if (!PL35X_SMC_ECC_VALUE_IS_VALID(ecc_value))
484 			return -EINVAL;
485 
486 		if (PL35X_SMC_ECC_VALUE_HAS_FAILED(ecc_value)) {
487 			mtd->ecc_stats.failed++;
488 			continue;
489 		}
490 
491 		pl35x_nand_ecc_reg_to_array(chip, ecc_value, calc_ecc);
492 		stats = pl35x_nand_correct_data(nfc, data, read_ecc, calc_ecc);
493 		if (stats < 0) {
494 			mtd->ecc_stats.failed++;
495 		} else {
496 			mtd->ecc_stats.corrected += stats;
497 			max_bitflips = max_t(unsigned int, max_bitflips, stats);
498 		}
499 	}
500 
501 	return max_bitflips;
502 }
503 
504 static int pl35x_nand_write_page_hwecc(struct nand_chip *chip,
505 				       const u8 *buf, int oob_required,
506 				       int page)
507 {
508 	struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
509 	struct pl35x_nand *plnand = to_pl35x_nand(chip);
510 	struct mtd_info *mtd = nand_to_mtd(chip);
511 	unsigned int first_row = (mtd->writesize <= 512) ? 1 : 2;
512 	unsigned int nrows = plnand->addr_cycles;
513 	u32 addr1 = 0, addr2 = 0, row;
514 	u32 cmd_addr;
515 	int i, ret;
516 
517 	ret = pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_APB);
518 	if (ret)
519 		return ret;
520 
521 	cmd_addr = PL35X_SMC_CMD_PHASE |
522 		   PL35X_SMC_CMD_PHASE_NADDRS(plnand->addr_cycles) |
523 		   PL35X_SMC_CMD_PHASE_CMD0(NAND_CMD_SEQIN);
524 
525 	for (i = 0, row = first_row; row < nrows; i++, row++) {
526 		u8 addr = page >> ((i * 8) & 0xFF);
527 
528 		if (row < 4)
529 			addr1 |= PL35X_SMC_CMD_PHASE_ADDR(row, addr);
530 		else
531 			addr2 |= PL35X_SMC_CMD_PHASE_ADDR(row - 4, addr);
532 	}
533 
534 	/* Send the command and address cycles */
535 	writel(addr1, nfc->io_regs + cmd_addr);
536 	if (plnand->addr_cycles > 4)
537 		writel(addr2, nfc->io_regs + cmd_addr);
538 
539 	/* Write the data with the engine enabled */
540 	pl35x_nand_write_data_op(chip, buf, mtd->writesize, false,
541 				 0, PL35X_SMC_DATA_PHASE_ECC_LAST);
542 	ret = pl35x_smc_wait_for_ecc_done(nfc);
543 	if (ret)
544 		goto disable_ecc_engine;
545 
546 	/* Copy the HW calculated ECC bytes in the OOB buffer */
547 	ret = pl35x_nand_read_eccbytes(nfc, chip, nfc->ecc_buf);
548 	if (ret)
549 		goto disable_ecc_engine;
550 
551 	if (!oob_required)
552 		memset(chip->oob_poi, 0xFF, mtd->oobsize);
553 
554 	ret = mtd_ooblayout_set_eccbytes(mtd, nfc->ecc_buf, chip->oob_poi,
555 					 0, chip->ecc.total);
556 	if (ret)
557 		goto disable_ecc_engine;
558 
559 	/* Write the spare area with ECC bytes */
560 	pl35x_nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false, 0,
561 				 PL35X_SMC_CMD_PHASE_CMD1(NAND_CMD_PAGEPROG) |
562 				 PL35X_SMC_CMD_PHASE_CMD1_VALID |
563 				 PL35X_SMC_DATA_PHASE_CLEAR_CS);
564 	ret = pl35x_smc_wait_for_irq(nfc);
565 	if (ret)
566 		goto disable_ecc_engine;
567 
568 disable_ecc_engine:
569 	pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_BYPASS);
570 
571 	return ret;
572 }
573 
574 /*
575  * This functions reads data and checks the data integrity by comparing hardware
576  * generated ECC values and read ECC values from spare area.
577  *
578  * There is a limitation with SMC controller: ECC_LAST must be set on the
579  * last data access to tell the ECC engine not to expect any further data.
580  * In practice, this implies to shrink the last data transfert by eg. 4 bytes,
581  * and doing a last 4-byte transfer with the additional bit set. The last block
582  * should be aligned with the end of an ECC block. Because of this limitation,
583  * it is not possible to use the core routines.
584  */
585 static int pl35x_nand_read_page_hwecc(struct nand_chip *chip,
586 				      u8 *buf, int oob_required, int page)
587 {
588 	const struct nand_sdr_timings *sdr =
589 		nand_get_sdr_timings(nand_get_interface_config(chip));
590 	struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
591 	struct pl35x_nand *plnand = to_pl35x_nand(chip);
592 	struct mtd_info *mtd = nand_to_mtd(chip);
593 	unsigned int first_row = (mtd->writesize <= 512) ? 1 : 2;
594 	unsigned int nrows = plnand->addr_cycles;
595 	unsigned int addr1 = 0, addr2 = 0, row;
596 	u32 cmd_addr;
597 	int i, ret;
598 
599 	ret = pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_APB);
600 	if (ret)
601 		return ret;
602 
603 	cmd_addr = PL35X_SMC_CMD_PHASE |
604 		   PL35X_SMC_CMD_PHASE_NADDRS(plnand->addr_cycles) |
605 		   PL35X_SMC_CMD_PHASE_CMD0(NAND_CMD_READ0) |
606 		   PL35X_SMC_CMD_PHASE_CMD1(NAND_CMD_READSTART) |
607 		   PL35X_SMC_CMD_PHASE_CMD1_VALID;
608 
609 	for (i = 0, row = first_row; row < nrows; i++, row++) {
610 		u8 addr = page >> ((i * 8) & 0xFF);
611 
612 		if (row < 4)
613 			addr1 |= PL35X_SMC_CMD_PHASE_ADDR(row, addr);
614 		else
615 			addr2 |= PL35X_SMC_CMD_PHASE_ADDR(row - 4, addr);
616 	}
617 
618 	/* Send the command and address cycles */
619 	writel(addr1, nfc->io_regs + cmd_addr);
620 	if (plnand->addr_cycles > 4)
621 		writel(addr2, nfc->io_regs + cmd_addr);
622 
623 	/* Wait the data to be available in the NAND cache */
624 	ndelay(PSEC_TO_NSEC(sdr->tRR_min));
625 	ret = pl35x_smc_wait_for_irq(nfc);
626 	if (ret)
627 		goto disable_ecc_engine;
628 
629 	/* Retrieve the raw data with the engine enabled */
630 	pl35x_nand_read_data_op(chip, buf, mtd->writesize, false,
631 				0, PL35X_SMC_DATA_PHASE_ECC_LAST);
632 	ret = pl35x_smc_wait_for_ecc_done(nfc);
633 	if (ret)
634 		goto disable_ecc_engine;
635 
636 	/* Retrieve the stored ECC bytes */
637 	pl35x_nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false,
638 				0, PL35X_SMC_DATA_PHASE_CLEAR_CS);
639 	ret = mtd_ooblayout_get_eccbytes(mtd, nfc->ecc_buf, chip->oob_poi, 0,
640 					 chip->ecc.total);
641 	if (ret)
642 		goto disable_ecc_engine;
643 
644 	pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_BYPASS);
645 
646 	/* Correct the data and report failures */
647 	return pl35x_nand_recover_data_hwecc(nfc, chip, buf, nfc->ecc_buf);
648 
649 disable_ecc_engine:
650 	pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_BYPASS);
651 
652 	return ret;
653 }
654 
655 static int pl35x_nand_exec_op(struct nand_chip *chip,
656 			      const struct nand_subop *subop)
657 {
658 	struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
659 	const struct nand_op_instr *instr, *data_instr = NULL;
660 	unsigned int rdy_tim_ms = 0, naddrs = 0, cmds = 0, last_flags = 0;
661 	u32 addr1 = 0, addr2 = 0, cmd0 = 0, cmd1 = 0, cmd_addr = 0;
662 	unsigned int op_id, len, offset, rdy_del_ns;
663 	int last_instr_type = -1;
664 	bool cmd1_valid = false;
665 	const u8 *addrs;
666 	int i, ret;
667 
668 	for (op_id = 0; op_id < subop->ninstrs; op_id++) {
669 		instr = &subop->instrs[op_id];
670 
671 		switch (instr->type) {
672 		case NAND_OP_CMD_INSTR:
673 			if (!cmds) {
674 				cmd0 = PL35X_SMC_CMD_PHASE_CMD0(instr->ctx.cmd.opcode);
675 			} else {
676 				cmd1 = PL35X_SMC_CMD_PHASE_CMD1(instr->ctx.cmd.opcode);
677 				if (last_instr_type != NAND_OP_DATA_OUT_INSTR)
678 					cmd1_valid = true;
679 			}
680 			cmds++;
681 			break;
682 
683 		case NAND_OP_ADDR_INSTR:
684 			offset = nand_subop_get_addr_start_off(subop, op_id);
685 			naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
686 			addrs = &instr->ctx.addr.addrs[offset];
687 			cmd_addr |= PL35X_SMC_CMD_PHASE_NADDRS(naddrs);
688 
689 			for (i = offset; i < naddrs; i++) {
690 				if (i < 4)
691 					addr1 |= PL35X_SMC_CMD_PHASE_ADDR(i, addrs[i]);
692 				else
693 					addr2 |= PL35X_SMC_CMD_PHASE_ADDR(i - 4, addrs[i]);
694 			}
695 			break;
696 
697 		case NAND_OP_DATA_IN_INSTR:
698 		case NAND_OP_DATA_OUT_INSTR:
699 			data_instr = instr;
700 			len = nand_subop_get_data_len(subop, op_id);
701 			break;
702 
703 		case NAND_OP_WAITRDY_INSTR:
704 			rdy_tim_ms = instr->ctx.waitrdy.timeout_ms;
705 			rdy_del_ns = instr->delay_ns;
706 			break;
707 		}
708 
709 		last_instr_type = instr->type;
710 	}
711 
712 	/* Command phase */
713 	cmd_addr |= PL35X_SMC_CMD_PHASE | cmd0 | cmd1 |
714 		    (cmd1_valid ? PL35X_SMC_CMD_PHASE_CMD1_VALID : 0);
715 	writel(addr1, nfc->io_regs + cmd_addr);
716 	if (naddrs > 4)
717 		writel(addr2, nfc->io_regs + cmd_addr);
718 
719 	/* Data phase */
720 	if (data_instr && data_instr->type == NAND_OP_DATA_OUT_INSTR) {
721 		last_flags = PL35X_SMC_DATA_PHASE_CLEAR_CS;
722 		if (cmds == 2)
723 			last_flags |= cmd1 | PL35X_SMC_CMD_PHASE_CMD1_VALID;
724 
725 		pl35x_nand_write_data_op(chip, data_instr->ctx.data.buf.out,
726 					 len, data_instr->ctx.data.force_8bit,
727 					 0, last_flags);
728 	}
729 
730 	if (rdy_tim_ms) {
731 		ndelay(rdy_del_ns);
732 		ret = pl35x_smc_wait_for_irq(nfc);
733 		if (ret)
734 			return ret;
735 	}
736 
737 	if (data_instr && data_instr->type == NAND_OP_DATA_IN_INSTR)
738 		pl35x_nand_read_data_op(chip, data_instr->ctx.data.buf.in,
739 					len, data_instr->ctx.data.force_8bit,
740 					0, PL35X_SMC_DATA_PHASE_CLEAR_CS);
741 
742 	return 0;
743 }
744 
745 static const struct nand_op_parser pl35x_nandc_op_parser = NAND_OP_PARSER(
746 	NAND_OP_PARSER_PATTERN(pl35x_nand_exec_op,
747 			       NAND_OP_PARSER_PAT_CMD_ELEM(true),
748 			       NAND_OP_PARSER_PAT_ADDR_ELEM(true, 7),
749 			       NAND_OP_PARSER_PAT_CMD_ELEM(true),
750 			       NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
751 			       NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 2112)),
752 	NAND_OP_PARSER_PATTERN(pl35x_nand_exec_op,
753 			       NAND_OP_PARSER_PAT_CMD_ELEM(false),
754 			       NAND_OP_PARSER_PAT_ADDR_ELEM(false, 7),
755 			       NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 2112),
756 			       NAND_OP_PARSER_PAT_CMD_ELEM(false),
757 			       NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
758 	NAND_OP_PARSER_PATTERN(pl35x_nand_exec_op,
759 			       NAND_OP_PARSER_PAT_CMD_ELEM(false),
760 			       NAND_OP_PARSER_PAT_ADDR_ELEM(false, 7),
761 			       NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 2112),
762 			       NAND_OP_PARSER_PAT_CMD_ELEM(true),
763 			       NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
764 	);
765 
766 static int pl35x_nfc_exec_op(struct nand_chip *chip,
767 			     const struct nand_operation *op,
768 			     bool check_only)
769 {
770 	if (!check_only)
771 		pl35x_nand_select_target(chip, op->cs);
772 
773 	return nand_op_parser_exec_op(chip, &pl35x_nandc_op_parser,
774 				      op, check_only);
775 }
776 
777 static int pl35x_nfc_setup_interface(struct nand_chip *chip, int cs,
778 				     const struct nand_interface_config *conf)
779 {
780 	struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
781 	struct pl35x_nand *plnand = to_pl35x_nand(chip);
782 	struct pl35x_nand_timings tmgs = {};
783 	const struct nand_sdr_timings *sdr;
784 	unsigned int period_ns, val;
785 	struct clk *mclk;
786 
787 	sdr = nand_get_sdr_timings(conf);
788 	if (IS_ERR(sdr))
789 		return PTR_ERR(sdr);
790 
791 	mclk = of_clk_get_by_name(nfc->dev->parent->of_node, "memclk");
792 	if (IS_ERR(mclk)) {
793 		dev_err(nfc->dev, "Failed to retrieve SMC memclk\n");
794 		return PTR_ERR(mclk);
795 	}
796 
797 	/*
798 	 * SDR timings are given in pico-seconds while NFC timings must be
799 	 * expressed in NAND controller clock cycles. We use the TO_CYCLE()
800 	 * macro to convert from one to the other.
801 	 */
802 	period_ns = NSEC_PER_SEC / clk_get_rate(mclk);
803 
804 	/*
805 	 * PL35X SMC needs one extra read cycle in SDR Mode 5. This is not
806 	 * written anywhere in the datasheet but is an empirical observation.
807 	 */
808 	val = TO_CYCLES(sdr->tRC_min, period_ns);
809 	if (sdr->tRC_min <= 20000)
810 		val++;
811 
812 	tmgs.t_rc = val;
813 	if (tmgs.t_rc != val || tmgs.t_rc < 2)
814 		return -EINVAL;
815 
816 	val = TO_CYCLES(sdr->tWC_min, period_ns);
817 	tmgs.t_wc = val;
818 	if (tmgs.t_wc != val || tmgs.t_wc < 2)
819 		return -EINVAL;
820 
821 	/*
822 	 * For all SDR modes, PL35X SMC needs tREA_max being 1,
823 	 * this is also an empirical result.
824 	 */
825 	tmgs.t_rea = 1;
826 
827 	val = TO_CYCLES(sdr->tWP_min, period_ns);
828 	tmgs.t_wp = val;
829 	if (tmgs.t_wp != val || tmgs.t_wp < 1)
830 		return -EINVAL;
831 
832 	val = TO_CYCLES(sdr->tCLR_min, period_ns);
833 	tmgs.t_clr = val;
834 	if (tmgs.t_clr != val)
835 		return -EINVAL;
836 
837 	val = TO_CYCLES(sdr->tAR_min, period_ns);
838 	tmgs.t_ar = val;
839 	if (tmgs.t_ar != val)
840 		return -EINVAL;
841 
842 	val = TO_CYCLES(sdr->tRR_min, period_ns);
843 	tmgs.t_rr = val;
844 	if (tmgs.t_rr != val)
845 		return -EINVAL;
846 
847 	if (cs == NAND_DATA_IFACE_CHECK_ONLY)
848 		return 0;
849 
850 	plnand->timings = PL35X_SMC_NAND_TRC_CYCLES(tmgs.t_rc) |
851 			  PL35X_SMC_NAND_TWC_CYCLES(tmgs.t_wc) |
852 			  PL35X_SMC_NAND_TREA_CYCLES(tmgs.t_rea) |
853 			  PL35X_SMC_NAND_TWP_CYCLES(tmgs.t_wp) |
854 			  PL35X_SMC_NAND_TCLR_CYCLES(tmgs.t_clr) |
855 			  PL35X_SMC_NAND_TAR_CYCLES(tmgs.t_ar) |
856 			  PL35X_SMC_NAND_TRR_CYCLES(tmgs.t_rr);
857 
858 	return 0;
859 }
860 
861 static void pl35x_smc_set_ecc_pg_size(struct pl35x_nandc *nfc,
862 				      struct nand_chip *chip,
863 				      unsigned int pg_sz)
864 {
865 	struct pl35x_nand *plnand = to_pl35x_nand(chip);
866 	u32 sz;
867 
868 	switch (pg_sz) {
869 	case SZ_512:
870 		sz = 1;
871 		break;
872 	case SZ_1K:
873 		sz = 2;
874 		break;
875 	case SZ_2K:
876 		sz = 3;
877 		break;
878 	default:
879 		sz = 0;
880 		break;
881 	}
882 
883 	plnand->ecc_cfg = readl(nfc->conf_regs + PL35X_SMC_ECC_CFG);
884 	plnand->ecc_cfg &= ~PL35X_SMC_ECC_CFG_PGSIZE_MASK;
885 	plnand->ecc_cfg |= sz;
886 	writel(plnand->ecc_cfg, nfc->conf_regs + PL35X_SMC_ECC_CFG);
887 }
888 
889 static int pl35x_nand_init_hw_ecc_controller(struct pl35x_nandc *nfc,
890 					     struct nand_chip *chip)
891 {
892 	struct mtd_info *mtd = nand_to_mtd(chip);
893 	int ret = 0;
894 
895 	if (mtd->writesize < SZ_512 || mtd->writesize > SZ_2K) {
896 		dev_err(nfc->dev,
897 			"The hardware ECC engine is limited to pages up to 2kiB\n");
898 		return -EOPNOTSUPP;
899 	}
900 
901 	chip->ecc.strength = 1;
902 	chip->ecc.bytes = 3;
903 	chip->ecc.size = SZ_512;
904 	chip->ecc.steps = mtd->writesize / chip->ecc.size;
905 	chip->ecc.read_page = pl35x_nand_read_page_hwecc;
906 	chip->ecc.write_page = pl35x_nand_write_page_hwecc;
907 	chip->ecc.write_page_raw = nand_monolithic_write_page_raw;
908 	pl35x_smc_set_ecc_pg_size(nfc, chip, mtd->writesize);
909 
910 	nfc->ecc_buf = devm_kmalloc(nfc->dev, chip->ecc.bytes * chip->ecc.steps,
911 				    GFP_KERNEL);
912 	if (!nfc->ecc_buf)
913 		return -ENOMEM;
914 
915 	switch (mtd->oobsize) {
916 	case 16:
917 		/* Legacy Xilinx layout */
918 		mtd_set_ooblayout(mtd, &pl35x_ecc_ooblayout16_ops);
919 		chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
920 		break;
921 	case 64:
922 		mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
923 		break;
924 	default:
925 		dev_err(nfc->dev, "Unsupported OOB size\n");
926 		return -EOPNOTSUPP;
927 	}
928 
929 	return ret;
930 }
931 
932 static int pl35x_nand_attach_chip(struct nand_chip *chip)
933 {
934 	const struct nand_ecc_props *requirements =
935 		nanddev_get_ecc_requirements(&chip->base);
936 	struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
937 	struct pl35x_nand *plnand = to_pl35x_nand(chip);
938 	struct mtd_info *mtd = nand_to_mtd(chip);
939 	int ret;
940 
941 	if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_NONE &&
942 	    (!chip->ecc.size || !chip->ecc.strength)) {
943 		if (requirements->step_size && requirements->strength) {
944 			chip->ecc.size = requirements->step_size;
945 			chip->ecc.strength = requirements->strength;
946 		} else {
947 			dev_info(nfc->dev,
948 				 "No minimum ECC strength, using 1b/512B\n");
949 			chip->ecc.size = 512;
950 			chip->ecc.strength = 1;
951 		}
952 	}
953 
954 	if (mtd->writesize <= SZ_512)
955 		plnand->addr_cycles = 1;
956 	else
957 		plnand->addr_cycles = 2;
958 
959 	if (chip->options & NAND_ROW_ADDR_3)
960 		plnand->addr_cycles += 3;
961 	else
962 		plnand->addr_cycles += 2;
963 
964 	switch (chip->ecc.engine_type) {
965 	case NAND_ECC_ENGINE_TYPE_ON_DIE:
966 		/* Keep these legacy BBT descriptors for ON_DIE situations */
967 		chip->bbt_td = &bbt_main_descr;
968 		chip->bbt_md = &bbt_mirror_descr;
969 		fallthrough;
970 	case NAND_ECC_ENGINE_TYPE_NONE:
971 	case NAND_ECC_ENGINE_TYPE_SOFT:
972 		break;
973 	case NAND_ECC_ENGINE_TYPE_ON_HOST:
974 		ret = pl35x_nand_init_hw_ecc_controller(nfc, chip);
975 		if (ret)
976 			return ret;
977 		break;
978 	default:
979 		dev_err(nfc->dev, "Unsupported ECC mode: %d\n",
980 			chip->ecc.engine_type);
981 		return -EINVAL;
982 	}
983 
984 	return 0;
985 }
986 
987 static const struct nand_controller_ops pl35x_nandc_ops = {
988 	.attach_chip = pl35x_nand_attach_chip,
989 	.exec_op = pl35x_nfc_exec_op,
990 	.setup_interface = pl35x_nfc_setup_interface,
991 };
992 
993 static int pl35x_nand_reset_state(struct pl35x_nandc *nfc)
994 {
995 	int ret;
996 
997 	/* Disable interrupts and clear their status */
998 	writel(PL35X_SMC_MEMC_CFG_CLR_INT_CLR_1 |
999 	       PL35X_SMC_MEMC_CFG_CLR_ECC_INT_DIS_1 |
1000 	       PL35X_SMC_MEMC_CFG_CLR_INT_DIS_1,
1001 	       nfc->conf_regs + PL35X_SMC_MEMC_CFG_CLR);
1002 
1003 	/* Set default bus width to 8-bit */
1004 	ret = pl35x_smc_set_buswidth(nfc, PL35X_SMC_OPMODE_BW_8);
1005 	if (ret)
1006 		return ret;
1007 
1008 	/* Ensure the ECC controller is bypassed by default */
1009 	ret = pl35x_smc_set_ecc_mode(nfc, NULL, PL35X_SMC_ECC_CFG_MODE_BYPASS);
1010 	if (ret)
1011 		return ret;
1012 
1013 	/*
1014 	 * Configure the commands that the ECC block uses to detect the
1015 	 * operations it should start/end.
1016 	 */
1017 	writel(PL35X_SMC_ECC_CMD1_WRITE(NAND_CMD_SEQIN) |
1018 	       PL35X_SMC_ECC_CMD1_READ(NAND_CMD_READ0) |
1019 	       PL35X_SMC_ECC_CMD1_READ_END(NAND_CMD_READSTART) |
1020 	       PL35X_SMC_ECC_CMD1_READ_END_VALID(NAND_CMD_READ1),
1021 	       nfc->conf_regs + PL35X_SMC_ECC_CMD1);
1022 	writel(PL35X_SMC_ECC_CMD2_WRITE_COL_CHG(NAND_CMD_RNDIN) |
1023 	       PL35X_SMC_ECC_CMD2_READ_COL_CHG(NAND_CMD_RNDOUT) |
1024 	       PL35X_SMC_ECC_CMD2_READ_COL_CHG_END(NAND_CMD_RNDOUTSTART) |
1025 	       PL35X_SMC_ECC_CMD2_READ_COL_CHG_END_VALID(NAND_CMD_READ1),
1026 	       nfc->conf_regs + PL35X_SMC_ECC_CMD2);
1027 
1028 	return 0;
1029 }
1030 
1031 static int pl35x_nand_chip_init(struct pl35x_nandc *nfc,
1032 				struct device_node *np)
1033 {
1034 	struct pl35x_nand *plnand;
1035 	struct nand_chip *chip;
1036 	struct mtd_info *mtd;
1037 	int cs, ret;
1038 
1039 	plnand = devm_kzalloc(nfc->dev, sizeof(*plnand), GFP_KERNEL);
1040 	if (!plnand)
1041 		return -ENOMEM;
1042 
1043 	ret = of_property_read_u32(np, "reg", &cs);
1044 	if (ret)
1045 		return ret;
1046 
1047 	if (cs >= PL35X_NAND_MAX_CS) {
1048 		dev_err(nfc->dev, "Wrong CS %d\n", cs);
1049 		return -EINVAL;
1050 	}
1051 
1052 	if (test_and_set_bit(cs, &nfc->assigned_cs)) {
1053 		dev_err(nfc->dev, "Already assigned CS %d\n", cs);
1054 		return -EINVAL;
1055 	}
1056 
1057 	plnand->cs = cs;
1058 
1059 	chip = &plnand->chip;
1060 	chip->options = NAND_BUSWIDTH_AUTO | NAND_USES_DMA | NAND_NO_SUBPAGE_WRITE;
1061 	chip->bbt_options = NAND_BBT_USE_FLASH;
1062 	chip->controller = &nfc->controller;
1063 	mtd = nand_to_mtd(chip);
1064 	mtd->dev.parent = nfc->dev;
1065 	nand_set_flash_node(chip, nfc->dev->of_node);
1066 	if (!mtd->name) {
1067 		mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL,
1068 					   "%s", PL35X_NANDC_DRIVER_NAME);
1069 		if (!mtd->name) {
1070 			dev_err(nfc->dev, "Failed to allocate mtd->name\n");
1071 			return -ENOMEM;
1072 		}
1073 	}
1074 
1075 	ret = nand_scan(chip, 1);
1076 	if (ret)
1077 		return ret;
1078 
1079 	ret = mtd_device_register(mtd, NULL, 0);
1080 	if (ret) {
1081 		nand_cleanup(chip);
1082 		return ret;
1083 	}
1084 
1085 	list_add_tail(&plnand->node, &nfc->chips);
1086 
1087 	return ret;
1088 }
1089 
1090 static void pl35x_nand_chips_cleanup(struct pl35x_nandc *nfc)
1091 {
1092 	struct pl35x_nand *plnand, *tmp;
1093 	struct nand_chip *chip;
1094 	int ret;
1095 
1096 	list_for_each_entry_safe(plnand, tmp, &nfc->chips, node) {
1097 		chip = &plnand->chip;
1098 		ret = mtd_device_unregister(nand_to_mtd(chip));
1099 		WARN_ON(ret);
1100 		nand_cleanup(chip);
1101 		list_del(&plnand->node);
1102 	}
1103 }
1104 
1105 static int pl35x_nand_chips_init(struct pl35x_nandc *nfc)
1106 {
1107 	struct device_node *np = nfc->dev->of_node, *nand_np;
1108 	int nchips = of_get_child_count(np);
1109 	int ret;
1110 
1111 	if (!nchips || nchips > PL35X_NAND_MAX_CS) {
1112 		dev_err(nfc->dev, "Incorrect number of NAND chips (%d)\n",
1113 			nchips);
1114 		return -EINVAL;
1115 	}
1116 
1117 	for_each_child_of_node(np, nand_np) {
1118 		ret = pl35x_nand_chip_init(nfc, nand_np);
1119 		if (ret) {
1120 			of_node_put(nand_np);
1121 			pl35x_nand_chips_cleanup(nfc);
1122 			break;
1123 		}
1124 	}
1125 
1126 	return ret;
1127 }
1128 
1129 static int pl35x_nand_probe(struct platform_device *pdev)
1130 {
1131 	struct device *smc_dev = pdev->dev.parent;
1132 	struct amba_device *smc_amba = to_amba_device(smc_dev);
1133 	struct pl35x_nandc *nfc;
1134 	u32 ret;
1135 
1136 	nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL);
1137 	if (!nfc)
1138 		return -ENOMEM;
1139 
1140 	nfc->dev = &pdev->dev;
1141 	nand_controller_init(&nfc->controller);
1142 	nfc->controller.ops = &pl35x_nandc_ops;
1143 	INIT_LIST_HEAD(&nfc->chips);
1144 
1145 	nfc->conf_regs = devm_ioremap_resource(&smc_amba->dev, &smc_amba->res);
1146 	if (IS_ERR(nfc->conf_regs))
1147 		return PTR_ERR(nfc->conf_regs);
1148 
1149 	nfc->io_regs = devm_platform_ioremap_resource(pdev, 0);
1150 	if (IS_ERR(nfc->io_regs))
1151 		return PTR_ERR(nfc->io_regs);
1152 
1153 	ret = pl35x_nand_reset_state(nfc);
1154 	if (ret)
1155 		return ret;
1156 
1157 	ret = pl35x_nand_chips_init(nfc);
1158 	if (ret)
1159 		return ret;
1160 
1161 	platform_set_drvdata(pdev, nfc);
1162 
1163 	return 0;
1164 }
1165 
1166 static int pl35x_nand_remove(struct platform_device *pdev)
1167 {
1168 	struct pl35x_nandc *nfc = platform_get_drvdata(pdev);
1169 
1170 	pl35x_nand_chips_cleanup(nfc);
1171 
1172 	return 0;
1173 }
1174 
1175 static const struct of_device_id pl35x_nand_of_match[] = {
1176 	{ .compatible = "arm,pl353-nand-r2p1" },
1177 	{},
1178 };
1179 MODULE_DEVICE_TABLE(of, pl35x_nand_of_match);
1180 
1181 static struct platform_driver pl35x_nandc_driver = {
1182 	.probe = pl35x_nand_probe,
1183 	.remove	= pl35x_nand_remove,
1184 	.driver = {
1185 		.name = PL35X_NANDC_DRIVER_NAME,
1186 		.of_match_table = pl35x_nand_of_match,
1187 	},
1188 };
1189 module_platform_driver(pl35x_nandc_driver);
1190 
1191 MODULE_AUTHOR("Xilinx, Inc.");
1192 MODULE_ALIAS("platform:" PL35X_NANDC_DRIVER_NAME);
1193 MODULE_DESCRIPTION("ARM PL35X NAND controller driver");
1194 MODULE_LICENSE("GPL");
1195