xref: /linux/drivers/mtd/nand/raw/nuvoton-ma35d1-nand-controller.c (revision 08de7f9d4d39fd9aa5e747a13acc891214fa2d5f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2024 Nuvoton Technology Corp.
4  */
5 #include <linux/clk.h>
6 #include <linux/dma-mapping.h>
7 #include <linux/err.h>
8 #include <linux/init.h>
9 #include <linux/interrupt.h>
10 #include <linux/io.h>
11 #include <linux/iopoll.h>
12 #include <linux/module.h>
13 #include <linux/mtd/mtd.h>
14 #include <linux/mtd/partitions.h>
15 #include <linux/mtd/rawnand.h>
16 #include <linux/of.h>
17 #include <linux/platform_device.h>
18 #include <linux/slab.h>
19 
20 /* NFI Registers */
21 #define MA35_NFI_REG_DMACTL		0x400
22 #define   DMA_EN				BIT(0)
23 #define   DMA_RST				BIT(1)
24 #define   DMA_BUSY				BIT(9)
25 
26 #define MA35_NFI_REG_DMASA		0x408
27 #define MA35_NFI_REG_GCTL		0x800
28 #define   GRST					BIT(0)
29 #define   NAND_EN				BIT(3)
30 
31 #define MA35_NFI_REG_NANDCTL		0x8A0
32 #define   SWRST				BIT(0)
33 #define   DMA_R_EN				BIT(1)
34 #define   DMA_W_EN				BIT(2)
35 #define   ECC_CHK				BIT(7)
36 #define   PROT3BEN				BIT(8)
37 #define   PSIZE_2K				BIT(16)
38 #define   PSIZE_4K				BIT(17)
39 #define   PSIZE_8K				GENMASK(17, 16)
40 #define   PSIZE_MASK				GENMASK(17, 16)
41 #define   BCH_T24				BIT(18)
42 #define   BCH_T8				BIT(20)
43 #define   BCH_T12				BIT(21)
44 #define   BCH_NONE				(0x0)
45 #define   BCH_MASK				GENMASK(22, 18)
46 #define   ECC_EN				BIT(23)
47 #define   DISABLE_CS0				BIT(25)
48 
49 #define MA35_NFI_REG_NANDINTEN	0x8A8
50 #define MA35_NFI_REG_NANDINTSTS	0x8AC
51 #define   INT_DMA				BIT(0)
52 #define   INT_ECC				BIT(2)
53 #define   INT_RB0				BIT(10)
54 
55 #define MA35_NFI_REG_NANDCMD		0x8B0
56 #define MA35_NFI_REG_NANDADDR		0x8B4
57 #define   ENDADDR				BIT(31)
58 
59 #define MA35_NFI_REG_NANDDATA		0x8B8
60 #define MA35_NFI_REG_NANDRACTL	0x8BC
61 #define MA35_NFI_REG_NANDECTL		0x8C0
62 #define   ENABLE_WP				0x0
63 #define   DISABLE_WP				BIT(0)
64 
65 #define MA35_NFI_REG_NANDECCES0	0x8D0
66 #define   ECC_STATUS_MASK			GENMASK(1, 0)
67 #define   ECC_ERR_CNT_MASK			GENMASK(4, 0)
68 
69 #define MA35_NFI_REG_NANDECCEA0	0x900
70 #define MA35_NFI_REG_NANDECCED0	0x960
71 #define MA35_NFI_REG_NANDRA0		0xA00
72 
73 /* Define for the BCH hardware ECC engine */
74 /* define the total padding bytes for 512/1024 data segment */
75 #define MA35_BCH_PADDING_512	32
76 #define MA35_BCH_PADDING_1024	64
77 /* define the BCH parity code length for 512 bytes data pattern */
78 #define MA35_PARITY_BCH8	15
79 #define MA35_PARITY_BCH12	23
80 /* define the BCH parity code length for 1024 bytes data pattern */
81 #define MA35_PARITY_BCH24	45
82 
83 #define MA35_MAX_NSELS		(2)
84 #define PREFIX_RA_IS_EMPTY(reg)	FIELD_GET(GENMASK(31, 16), (reg))
85 
86 struct ma35_nand_chip {
87 	struct list_head node;
88 	struct nand_chip chip;
89 
90 	u32 eccstatus;
91 	u8 nsels;
92 	u8 sels[] __counted_by(nsels);
93 };
94 
95 struct ma35_nand_info {
96 	struct nand_controller controller;
97 	struct device *dev;
98 	void __iomem *regs;
99 	int irq;
100 	struct clk *clk;
101 	struct completion complete;
102 	struct list_head chips;
103 
104 	u8 *buffer;
105 	unsigned long assigned_cs;
106 };
107 
108 static inline struct ma35_nand_chip *to_ma35_nand(struct nand_chip *chip)
109 {
110 	return container_of(chip, struct ma35_nand_chip, chip);
111 }
112 
113 static int ma35_ooblayout_ecc(struct mtd_info *mtd, int section,
114 			      struct mtd_oob_region *oob_region)
115 {
116 	struct nand_chip *chip = mtd_to_nand(mtd);
117 
118 	if (section)
119 		return -ERANGE;
120 
121 	oob_region->length = chip->ecc.total;
122 	oob_region->offset = mtd->oobsize - oob_region->length;
123 
124 	return 0;
125 }
126 
127 static int ma35_ooblayout_free(struct mtd_info *mtd, int section,
128 			       struct mtd_oob_region *oob_region)
129 {
130 	struct nand_chip *chip = mtd_to_nand(mtd);
131 
132 	if (section)
133 		return -ERANGE;
134 
135 	oob_region->length = mtd->oobsize - chip->ecc.total - 2;
136 	oob_region->offset = 2;
137 
138 	return 0;
139 }
140 
141 static const struct mtd_ooblayout_ops ma35_ooblayout_ops = {
142 	.free = ma35_ooblayout_free,
143 	.ecc = ma35_ooblayout_ecc,
144 };
145 
146 static inline void ma35_clear_spare(struct nand_chip *chip, int size)
147 {
148 	struct ma35_nand_info *nand = nand_get_controller_data(chip);
149 	int i;
150 
151 	for (i = 0; i < size / 4; i++)
152 		writel(0xff, nand->regs + MA35_NFI_REG_NANDRA0);
153 }
154 
155 static inline void read_remaining_bytes(struct ma35_nand_info *nand, u32 *buf,
156 					u32 offset, int size, int swap)
157 {
158 	u32 value = readl(nand->regs + MA35_NFI_REG_NANDRA0 + offset);
159 	u8 *ptr = (u8 *)buf;
160 	int i, shift;
161 
162 	for (i = 0; i < size; i++) {
163 		shift = (swap ? 3 - i : i) * 8;
164 		ptr[i] = (value >> shift) & 0xff;
165 	}
166 }
167 
168 static inline void ma35_read_spare(struct nand_chip *chip, int size, u32 *buf, u32 offset)
169 {
170 	struct ma35_nand_info *nand = nand_get_controller_data(chip);
171 	u32 off = round_down(offset, 4);
172 	int len = offset % 4;
173 	int i;
174 
175 	if (len) {
176 		read_remaining_bytes(nand, buf, off, 4 - len, 1);
177 		off += 4;
178 		size -= (4 - len);
179 	}
180 
181 	for (i = 0; i < size / 4; i++)
182 		*buf++ = readl(nand->regs + MA35_NFI_REG_NANDRA0 + off + (i * 4));
183 
184 	read_remaining_bytes(nand, buf, off + (size & ~3), size % 4, 0);
185 }
186 
187 static inline void ma35_write_spare(struct nand_chip *chip, int size, u32 *buf)
188 {
189 	struct ma35_nand_info *nand = nand_get_controller_data(chip);
190 	u32 value;
191 	int i, j;
192 	u8 *ptr;
193 
194 	for (i = 0, j = 0; i < size / 4; i++, j += 4)
195 		writel(*buf++, nand->regs + MA35_NFI_REG_NANDRA0 + j);
196 
197 	ptr = (u8 *)buf;
198 	switch (size % 4) {
199 	case 1:
200 		writel(*ptr, nand->regs + MA35_NFI_REG_NANDRA0 + j);
201 		break;
202 	case 2:
203 		value = *ptr | (*(ptr + 1) << 8);
204 		writel(value, nand->regs + MA35_NFI_REG_NANDRA0 + j);
205 		break;
206 	case 3:
207 		value = *ptr | (*(ptr + 1) << 8) | (*(ptr + 2) << 16);
208 		writel(value, nand->regs + MA35_NFI_REG_NANDRA0 + j);
209 		break;
210 	default:
211 		break;
212 	}
213 }
214 
215 static void ma35_nand_target_enable(struct nand_chip *chip, unsigned int cs)
216 {
217 	struct ma35_nand_info *nand = nand_get_controller_data(chip);
218 	u32 reg;
219 
220 	switch (cs) {
221 	case 0:
222 		reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
223 		writel(reg & ~DISABLE_CS0, nand->regs + MA35_NFI_REG_NANDCTL);
224 
225 		reg = readl(nand->regs + MA35_NFI_REG_NANDINTSTS);
226 		reg |= INT_RB0;
227 		writel(reg, nand->regs + MA35_NFI_REG_NANDINTSTS);
228 		break;
229 	default:
230 		break;
231 	}
232 }
233 
234 static int ma35_nand_hwecc_init(struct nand_chip *chip, struct ma35_nand_info *nand)
235 {
236 	struct ma35_nand_chip *nvtnand = to_ma35_nand(chip);
237 	struct mtd_info *mtd = nand_to_mtd(chip);
238 	struct device *dev = mtd->dev.parent;
239 	u32 reg;
240 
241 	nand->buffer = devm_kzalloc(dev, mtd->writesize, GFP_KERNEL);
242 	if (!nand->buffer)
243 		return -ENOMEM;
244 
245 	/* Redundant area size */
246 	writel(mtd->oobsize, nand->regs + MA35_NFI_REG_NANDRACTL);
247 
248 	/* Protect redundant 3 bytes and disable ECC engine */
249 	reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
250 	reg |= (PROT3BEN | ECC_CHK);
251 	reg &= ~ECC_EN;
252 
253 	if (chip->ecc.strength != 0) {
254 		chip->ecc.steps = mtd->writesize / chip->ecc.size;
255 		nvtnand->eccstatus = (chip->ecc.steps < 4) ? 1 : chip->ecc.steps / 4;
256 		/* Set BCH algorithm */
257 		reg &= ~BCH_MASK;
258 		switch (chip->ecc.strength) {
259 		case 8:
260 			chip->ecc.total = chip->ecc.steps * MA35_PARITY_BCH8;
261 			reg |= BCH_T8;
262 			break;
263 		case 12:
264 			chip->ecc.total = chip->ecc.steps * MA35_PARITY_BCH12;
265 			reg |= BCH_T12;
266 			break;
267 		case 24:
268 			chip->ecc.total = chip->ecc.steps * MA35_PARITY_BCH24;
269 			reg |= BCH_T24;
270 			break;
271 		default:
272 			dev_err(nand->dev, "ECC strength unsupported\n");
273 			return -EINVAL;
274 		}
275 
276 		chip->ecc.bytes = chip->ecc.total / chip->ecc.steps;
277 	}
278 	writel(reg, nand->regs + MA35_NFI_REG_NANDCTL);
279 	return 0;
280 }
281 
282 /* Correct data by BCH alrogithm */
283 static void ma35_nfi_correct(struct nand_chip *chip, u8 index,
284 			     u8 err_cnt, u8 *addr)
285 {
286 	struct ma35_nand_info *nand = nand_get_controller_data(chip);
287 	u32 temp_data[24], temp_addr[24];
288 	u32 padding_len, parity_len;
289 	u32 value, offset, remain;
290 	u32 err_data[6];
291 	u8  i, j;
292 
293 	/* Configurations */
294 	if (chip->ecc.strength <= 8) {
295 		parity_len = MA35_PARITY_BCH8;
296 		padding_len = MA35_BCH_PADDING_512;
297 	} else if (chip->ecc.strength <= 12) {
298 		parity_len = MA35_PARITY_BCH12;
299 		padding_len = MA35_BCH_PADDING_512;
300 	} else if (chip->ecc.strength <= 24) {
301 		parity_len = MA35_PARITY_BCH24;
302 		padding_len = MA35_BCH_PADDING_1024;
303 	} else {
304 		dev_err(nand->dev, "Invalid BCH_TSEL = 0x%lx\n",
305 			readl(nand->regs + MA35_NFI_REG_NANDCTL) & BCH_MASK);
306 		return;
307 	}
308 
309 	/*
310 	 * got valid BCH_ECC_DATAx and parse them to temp_data[]
311 	 * got the valid register number of BCH_ECC_DATAx since
312 	 * one register include 4 error bytes
313 	 */
314 	j = (err_cnt + 3) / 4;
315 	j = (j > 6) ? 6 : j;
316 	for (i = 0; i < j; i++)
317 		err_data[i] = readl(nand->regs + MA35_NFI_REG_NANDECCED0 + i * 4);
318 
319 	for (i = 0; i < j; i++) {
320 		temp_data[i * 4 + 0] = err_data[i] & 0xff;
321 		temp_data[i * 4 + 1] = (err_data[i] >> 8) & 0xff;
322 		temp_data[i * 4 + 2] = (err_data[i] >> 16) & 0xff;
323 		temp_data[i * 4 + 3] = (err_data[i] >> 24) & 0xff;
324 	}
325 
326 	/*
327 	 * got valid REG_BCH_ECC_ADDRx and parse them to temp_addr[]
328 	 * got the valid register number of REG_BCH_ECC_ADDRx since
329 	 * one register include 2 error addresses
330 	 */
331 	j = (err_cnt + 1) / 2;
332 	j = (j > 12) ? 12 : j;
333 	for (i = 0; i < j; i++) {
334 		temp_addr[i * 2 + 0] = readl(nand->regs + MA35_NFI_REG_NANDECCEA0 + i * 4)
335 					& 0x07ff;
336 		temp_addr[i * 2 + 1] = (readl(nand->regs + MA35_NFI_REG_NANDECCEA0 + i * 4)
337 					>> 16) & 0x07ff;
338 	}
339 
340 	/* pointer to begin address of field that with data error */
341 	addr += index * chip->ecc.size;
342 
343 	/* correct each error bytes */
344 	for (i = 0; i < err_cnt; i++) {
345 		u32 corrected_index = temp_addr[i];
346 
347 		if (corrected_index < chip->ecc.size) {
348 			/* for wrong data in field */
349 			*(addr + corrected_index) ^= temp_data[i];
350 		} else if (corrected_index < (chip->ecc.size + 3)) {
351 			/* for wrong first-3-bytes in redundancy area */
352 			corrected_index -= chip->ecc.size;
353 			temp_addr[i] += (parity_len * index);	/* field offset */
354 
355 			value = readl(nand->regs + MA35_NFI_REG_NANDRA0);
356 			value ^= temp_data[i] << (8 * corrected_index);
357 			writel(value, nand->regs + MA35_NFI_REG_NANDRA0);
358 		} else {
359 			/*
360 			 * for wrong parity code in redundancy area
361 			 * ERR_ADDRx = [data in field] + [3 bytes] + [xx] + [parity code]
362 			 *                               |<--     padding bytes      -->|
363 			 * The ERR_ADDRx for last parity code always = field size + padding size.
364 			 * The first parity code = field size + padding size - parity code length.
365 			 * For example, for BCH T12, the first parity code = 512 + 32 - 23 = 521.
366 			 * That is, error byte address offset within field is
367 			 */
368 			corrected_index -= (chip->ecc.size + padding_len - parity_len);
369 
370 			/*
371 			 * final address = first parity code of first field +
372 			 *                 offset of fields +
373 			 *                 offset within field
374 			 */
375 			offset = (readl(nand->regs + MA35_NFI_REG_NANDRACTL) & 0x1ff) -
376 				(parity_len * chip->ecc.steps) +
377 				(parity_len * index) + corrected_index;
378 
379 			remain = offset % 4;
380 			value = readl(nand->regs + MA35_NFI_REG_NANDRA0 + offset - remain);
381 			value ^= temp_data[i] << (8 * remain);
382 			writel(value, nand->regs + MA35_NFI_REG_NANDRA0 + offset - remain);
383 		}
384 	}
385 }
386 
387 static int ma35_nfi_ecc_check(struct nand_chip *chip, u8 *addr)
388 {
389 	struct ma35_nand_info *nand = nand_get_controller_data(chip);
390 	struct ma35_nand_chip *nvtnand = to_ma35_nand(chip);
391 	struct mtd_info *mtd = nand_to_mtd(chip);
392 	int maxbitflips = 0;
393 	int cnt = 0;
394 	u32 status;
395 	int i, j;
396 
397 	for (j = 0; j < nvtnand->eccstatus; j++) {
398 		status = readl(nand->regs + MA35_NFI_REG_NANDECCES0 + j * 4);
399 		if (!status)
400 			continue;
401 
402 		for (i = 0; i < 4; i++) {
403 			if ((status & ECC_STATUS_MASK) == 0x01) {
404 				/* Correctable error */
405 				cnt = (status >> 2) & ECC_ERR_CNT_MASK;
406 				ma35_nfi_correct(chip, j * 4 + i, cnt, addr);
407 				maxbitflips = max_t(u32, maxbitflips, cnt);
408 				mtd->ecc_stats.corrected += cnt;
409 			} else {
410 				/* Uncorrectable error */
411 				mtd->ecc_stats.failed++;
412 				dev_err(nand->dev, "uncorrectable error! 0x%4x\n", status);
413 				return -EBADMSG;
414 			}
415 			status >>= 8;
416 		}
417 	}
418 	return maxbitflips;
419 }
420 
421 static void ma35_nand_dmac_init(struct ma35_nand_info *nand)
422 {
423 	/* DMAC reset and enable */
424 	writel(DMA_RST | DMA_EN, nand->regs + MA35_NFI_REG_DMACTL);
425 	writel(DMA_EN, nand->regs + MA35_NFI_REG_DMACTL);
426 
427 	/* Clear DMA finished flag and enable */
428 	writel(INT_DMA | INT_ECC, nand->regs + MA35_NFI_REG_NANDINTSTS);
429 	writel(INT_DMA, nand->regs + MA35_NFI_REG_NANDINTEN);
430 }
431 
432 static int ma35_nand_do_write(struct nand_chip *chip, const u8 *addr, u32 len)
433 {
434 	struct ma35_nand_info *nand = nand_get_controller_data(chip);
435 	struct mtd_info *mtd = nand_to_mtd(chip);
436 	dma_addr_t dma_addr;
437 	int ret = 0, i;
438 	u32 reg;
439 
440 	if (len != mtd->writesize) {
441 		for (i = 0; i < len; i++)
442 			writel(addr[i], nand->regs + MA35_NFI_REG_NANDDATA);
443 		return 0;
444 	}
445 
446 	ma35_nand_dmac_init(nand);
447 
448 	/* To mark this page as dirty. */
449 	reg = readl(nand->regs + MA35_NFI_REG_NANDRA0);
450 	if (reg & 0xffff0000)
451 		writel(reg & 0xffff, nand->regs + MA35_NFI_REG_NANDRA0);
452 
453 	dma_addr = dma_map_single(nand->dev, (void *)addr, len, DMA_TO_DEVICE);
454 	ret = dma_mapping_error(nand->dev, dma_addr);
455 	if (ret) {
456 		dev_err(nand->dev, "dma mapping error\n");
457 		return -EINVAL;
458 	}
459 	dma_sync_single_for_device(nand->dev, dma_addr, len, DMA_TO_DEVICE);
460 
461 	reinit_completion(&nand->complete);
462 	writel(dma_addr, nand->regs + MA35_NFI_REG_DMASA);
463 	writel(readl(nand->regs + MA35_NFI_REG_NANDCTL) | DMA_W_EN,
464 	       nand->regs + MA35_NFI_REG_NANDCTL);
465 	ret = wait_for_completion_timeout(&nand->complete, msecs_to_jiffies(1000));
466 	if (!ret) {
467 		dev_err(nand->dev, "write timeout\n");
468 		ret = -ETIMEDOUT;
469 	}
470 
471 	dma_unmap_single(nand->dev, dma_addr, len, DMA_TO_DEVICE);
472 
473 	return ret;
474 }
475 
476 static int ma35_nand_do_read(struct nand_chip *chip, u8 *addr, u32 len)
477 {
478 	struct ma35_nand_info *nand = nand_get_controller_data(chip);
479 	struct mtd_info *mtd = nand_to_mtd(chip);
480 	int ret = 0, cnt = 0, i;
481 	dma_addr_t dma_addr;
482 	u32 reg;
483 
484 	if (len != mtd->writesize) {
485 		for (i = 0; i < len; i++)
486 			addr[i] = readb(nand->regs + MA35_NFI_REG_NANDDATA);
487 		return 0;
488 	}
489 
490 	ma35_nand_dmac_init(nand);
491 
492 	/* Setup and start DMA using dma_addr */
493 	dma_addr = dma_map_single(nand->dev, (void *)addr, len, DMA_FROM_DEVICE);
494 	ret = dma_mapping_error(nand->dev, dma_addr);
495 	if (ret) {
496 		dev_err(nand->dev, "dma mapping error\n");
497 		return -EINVAL;
498 	}
499 
500 	reinit_completion(&nand->complete);
501 	writel(dma_addr, nand->regs + MA35_NFI_REG_DMASA);
502 	writel(readl(nand->regs + MA35_NFI_REG_NANDCTL) | DMA_R_EN,
503 	       nand->regs + MA35_NFI_REG_NANDCTL);
504 	ret = wait_for_completion_timeout(&nand->complete, msecs_to_jiffies(1000));
505 	if (!ret) {
506 		dev_err(nand->dev, "read timeout\n");
507 		ret = -ETIMEDOUT;
508 	}
509 
510 	dma_unmap_single(nand->dev, dma_addr, len, DMA_FROM_DEVICE);
511 
512 	reg = readl(nand->regs + MA35_NFI_REG_NANDINTSTS);
513 	if (reg & INT_ECC) {
514 		cnt = ma35_nfi_ecc_check(chip, addr);
515 		if (cnt < 0) {
516 			writel(DMA_RST | DMA_EN, nand->regs + MA35_NFI_REG_DMACTL);
517 			writel(readl(nand->regs + MA35_NFI_REG_NANDCTL) | SWRST,
518 			       nand->regs + MA35_NFI_REG_NANDCTL);
519 		}
520 		writel(INT_ECC, nand->regs + MA35_NFI_REG_NANDINTSTS);
521 	}
522 
523 	ret = ret < 0 ? ret : cnt;
524 	return ret;
525 }
526 
527 static int ma35_nand_format_subpage(struct nand_chip *chip, u32 offset,
528 				    u32 len, const u8 *buf)
529 {
530 	struct ma35_nand_info *nand = nand_get_controller_data(chip);
531 	struct mtd_info *mtd = nand_to_mtd(chip);
532 	u32 page_off = round_down(offset, chip->ecc.size);
533 	u32 end = DIV_ROUND_UP(page_off + len, chip->ecc.size);
534 	u32 start = page_off / chip->ecc.size;
535 	u32 reg;
536 	int i;
537 
538 	reg = readl(nand->regs + MA35_NFI_REG_NANDRACTL) | 0xffff0000;
539 	memset(nand->buffer, 0xff, mtd->writesize);
540 	for (i = start; i < end; i++) {
541 		memcpy(nand->buffer + i * chip->ecc.size,
542 		       buf + i * chip->ecc.size, chip->ecc.size);
543 		reg &= ~(1 << (i + 16));
544 	}
545 	writel(reg, nand->regs + MA35_NFI_REG_NANDRACTL);
546 
547 	return 0;
548 }
549 
550 static int ma35_nand_write_subpage_hwecc(struct nand_chip *chip, u32 offset,
551 					 u32 data_len, const u8 *buf,
552 					 int oob_required, int page)
553 {
554 	struct ma35_nand_info *nand = nand_get_controller_data(chip);
555 	struct mtd_info *mtd = nand_to_mtd(chip);
556 	u32 reg, oobpoi, index;
557 	int i;
558 
559 	/* Enable HW ECC engine */
560 	reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
561 	writel(reg | ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
562 
563 	ma35_nand_target_enable(chip, chip->cur_cs);
564 
565 	ma35_clear_spare(chip, mtd->oobsize);
566 	ma35_write_spare(chip, mtd->oobsize - chip->ecc.total,
567 			 (u32 *)chip->oob_poi);
568 
569 	ma35_nand_format_subpage(chip, offset, data_len, buf);
570 	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
571 	ma35_nand_do_write(chip, nand->buffer, mtd->writesize);
572 	nand_prog_page_end_op(chip);
573 
574 	oobpoi = mtd->oobsize - chip->ecc.total;
575 	reg = readl(nand->regs + MA35_NFI_REG_NANDRACTL);
576 	for (i = 0; i < chip->ecc.steps; i++) {
577 		index = i * chip->ecc.bytes;
578 		if (!(reg & (1 << (i + 16)))) {
579 			ma35_read_spare(chip, chip->ecc.bytes,
580 					(u32 *)(chip->oob_poi + oobpoi + index),
581 					oobpoi + index);
582 		}
583 	}
584 
585 	writel(mtd->oobsize, nand->regs + MA35_NFI_REG_NANDRACTL);
586 	/* Disable HW ECC engine */
587 	reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
588 	writel(reg & ~ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
589 
590 	return 0;
591 }
592 
593 static int ma35_nand_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
594 				      int oob_required, int page)
595 {
596 	struct ma35_nand_info *nand = nand_get_controller_data(chip);
597 	struct mtd_info *mtd = nand_to_mtd(chip);
598 	u32 reg;
599 
600 	/* Enable HW ECC engine */
601 	reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
602 	writel(reg | ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
603 
604 	ma35_nand_target_enable(chip, chip->cur_cs);
605 
606 	ma35_clear_spare(chip, mtd->oobsize);
607 	ma35_write_spare(chip, mtd->oobsize - chip->ecc.total,
608 			 (u32 *)chip->oob_poi);
609 
610 	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
611 	ma35_nand_do_write(chip, buf, mtd->writesize);
612 	nand_prog_page_end_op(chip);
613 
614 	ma35_read_spare(chip, chip->ecc.total,
615 			(u32 *)(chip->oob_poi + (mtd->oobsize - chip->ecc.total)),
616 			mtd->oobsize - chip->ecc.total);
617 
618 	/* Disable HW ECC engine */
619 	writel(reg & ~ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
620 
621 	return 0;
622 }
623 
624 static int ma35_nand_read_subpage_hwecc(struct nand_chip *chip, u32 offset,
625 					u32 data_len, u8 *buf, int page)
626 {
627 	struct ma35_nand_info *nand = nand_get_controller_data(chip);
628 	struct mtd_info *mtd = nand_to_mtd(chip);
629 	int bitflips = 0;
630 	u32 reg;
631 
632 	/* Enable HW ECC engine */
633 	reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
634 	writel(reg | ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
635 
636 	ma35_nand_target_enable(chip, chip->cur_cs);
637 	nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
638 	ma35_write_spare(chip, mtd->oobsize, (u32 *)chip->oob_poi);
639 
640 	reg = readl(nand->regs + MA35_NFI_REG_NANDRA0);
641 	if (PREFIX_RA_IS_EMPTY(reg)) {
642 		memset((void *)buf, 0xff, mtd->writesize);
643 	} else {
644 		nand_read_page_op(chip, page, offset, NULL, 0);
645 		bitflips = ma35_nand_do_read(chip, buf + offset, data_len);
646 		ma35_read_spare(chip, mtd->oobsize, (u32 *)chip->oob_poi, 0);
647 	}
648 
649 	/* Disable HW ECC engine */
650 	reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
651 	writel(reg & ~ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
652 
653 	return bitflips;
654 }
655 
656 static int ma35_nand_read_page_hwecc(struct nand_chip *chip, u8 *buf,
657 				     int oob_required, int page)
658 {
659 	struct ma35_nand_info *nand = nand_get_controller_data(chip);
660 	struct mtd_info *mtd = nand_to_mtd(chip);
661 	int bitflips = 0;
662 	u32 reg;
663 
664 	/* Enable HW ECC engine */
665 	reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
666 	writel(reg | ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
667 
668 	ma35_nand_target_enable(chip, chip->cur_cs);
669 	nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
670 	ma35_write_spare(chip, mtd->oobsize, (u32 *)chip->oob_poi);
671 
672 	reg = readl(nand->regs + MA35_NFI_REG_NANDRA0);
673 	if (PREFIX_RA_IS_EMPTY(reg)) {
674 		memset((void *)buf, 0xff, mtd->writesize);
675 	} else {
676 		nand_read_page_op(chip, page, 0, NULL, 0);
677 		bitflips = ma35_nand_do_read(chip, buf, mtd->writesize);
678 		ma35_read_spare(chip, mtd->oobsize, (u32 *)chip->oob_poi, 0);
679 	}
680 
681 	/* Disable HW ECC engine */
682 	reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
683 	writel(reg & ~ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
684 
685 	return bitflips;
686 }
687 
688 static int ma35_nand_read_oob_hwecc(struct nand_chip *chip, int page)
689 {
690 	struct ma35_nand_info *nand = nand_get_controller_data(chip);
691 	struct mtd_info *mtd = nand_to_mtd(chip);
692 	u32 reg;
693 
694 	ma35_nand_target_enable(chip, chip->cur_cs);
695 	nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
696 
697 	/* copy OOB data to controller redundant area for page read */
698 	ma35_write_spare(chip, mtd->oobsize, (u32 *)chip->oob_poi);
699 
700 	reg = readl(nand->regs + MA35_NFI_REG_NANDRA0);
701 	if (PREFIX_RA_IS_EMPTY(reg))
702 		memset((void *)chip->oob_poi, 0xff, mtd->oobsize);
703 
704 	return 0;
705 }
706 
707 static inline void ma35_hw_init(struct ma35_nand_info *nand)
708 {
709 	u32 reg;
710 
711 	/* Disable flash wp. */
712 	writel(DISABLE_WP, nand->regs + MA35_NFI_REG_NANDECTL);
713 
714 	/* resets the internal state machine and counters */
715 	reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
716 	reg |= SWRST;
717 	writel(reg, nand->regs + MA35_NFI_REG_NANDCTL);
718 }
719 
720 static irqreturn_t ma35_nand_irq(int irq, void *id)
721 {
722 	struct ma35_nand_info *nand = (struct ma35_nand_info *)id;
723 	u32 isr;
724 
725 	isr = readl(nand->regs + MA35_NFI_REG_NANDINTSTS);
726 	if (isr & INT_DMA) {
727 		writel(INT_DMA, nand->regs + MA35_NFI_REG_NANDINTSTS);
728 		complete(&nand->complete);
729 		return IRQ_HANDLED;
730 	}
731 
732 	return IRQ_NONE;
733 }
734 
735 static int ma35_nand_attach_chip(struct nand_chip *chip)
736 {
737 	struct ma35_nand_info *nand = nand_get_controller_data(chip);
738 	struct mtd_info *mtd = nand_to_mtd(chip);
739 	struct device *dev = mtd->dev.parent;
740 	u32 reg;
741 
742 	if (chip->options & NAND_BUSWIDTH_16) {
743 		dev_err(dev, "16 bits bus width not supported");
744 		return -EINVAL;
745 	}
746 
747 	reg = readl(nand->regs + MA35_NFI_REG_NANDCTL) & (~PSIZE_MASK);
748 	switch (mtd->writesize) {
749 	case SZ_2K:
750 		writel(reg | PSIZE_2K, nand->regs + MA35_NFI_REG_NANDCTL);
751 		break;
752 	case SZ_4K:
753 		writel(reg | PSIZE_4K, nand->regs + MA35_NFI_REG_NANDCTL);
754 		break;
755 	case SZ_8K:
756 		writel(reg | PSIZE_8K, nand->regs + MA35_NFI_REG_NANDCTL);
757 		break;
758 	default:
759 		dev_err(dev, "Unsupported page size");
760 		return -EINVAL;
761 	}
762 
763 	switch (chip->ecc.engine_type) {
764 	case NAND_ECC_ENGINE_TYPE_ON_HOST:
765 		/* Do not store BBT bits in the OOB section as it is not protected */
766 		if (chip->bbt_options & NAND_BBT_USE_FLASH)
767 			chip->bbt_options |= NAND_BBT_NO_OOB;
768 		chip->options |= NAND_USES_DMA | NAND_SUBPAGE_READ;
769 		chip->ecc.write_subpage = ma35_nand_write_subpage_hwecc;
770 		chip->ecc.write_page = ma35_nand_write_page_hwecc;
771 		chip->ecc.read_subpage = ma35_nand_read_subpage_hwecc;
772 		chip->ecc.read_page  = ma35_nand_read_page_hwecc;
773 		chip->ecc.read_oob   = ma35_nand_read_oob_hwecc;
774 		return ma35_nand_hwecc_init(chip, nand);
775 	case NAND_ECC_ENGINE_TYPE_NONE:
776 	case NAND_ECC_ENGINE_TYPE_SOFT:
777 	case NAND_ECC_ENGINE_TYPE_ON_DIE:
778 		break;
779 	default:
780 		return -EINVAL;
781 	}
782 
783 	return 0;
784 }
785 
786 static int ma35_nfc_exec_instr(struct nand_chip *chip,
787 			       const struct nand_op_instr *instr)
788 {
789 	struct ma35_nand_info *nand = nand_get_controller_data(chip);
790 	unsigned int i;
791 	int ret = 0;
792 	u32 status;
793 
794 	switch (instr->type) {
795 	case NAND_OP_CMD_INSTR:
796 		writel(instr->ctx.cmd.opcode, nand->regs + MA35_NFI_REG_NANDCMD);
797 		break;
798 	case NAND_OP_ADDR_INSTR:
799 		for (i = 0; i < instr->ctx.addr.naddrs; i++) {
800 			if (i == (instr->ctx.addr.naddrs - 1))
801 				writel(instr->ctx.addr.addrs[i] | ENDADDR,
802 				       nand->regs + MA35_NFI_REG_NANDADDR);
803 			else
804 				writel(instr->ctx.addr.addrs[i],
805 				       nand->regs + MA35_NFI_REG_NANDADDR);
806 		}
807 		break;
808 	case NAND_OP_DATA_IN_INSTR:
809 		ret = ma35_nand_do_read(chip, instr->ctx.data.buf.in, instr->ctx.data.len);
810 		break;
811 	case NAND_OP_DATA_OUT_INSTR:
812 		ret = ma35_nand_do_write(chip, instr->ctx.data.buf.out, instr->ctx.data.len);
813 		break;
814 	case NAND_OP_WAITRDY_INSTR:
815 		return readl_poll_timeout(nand->regs + MA35_NFI_REG_NANDINTSTS, status,
816 					  status & INT_RB0, 20,
817 					  instr->ctx.waitrdy.timeout_ms * MSEC_PER_SEC);
818 	default:
819 		ret = -EINVAL;
820 		break;
821 	}
822 
823 	return ret;
824 }
825 
826 static int ma35_nfc_exec_op(struct nand_chip *chip,
827 			    const struct nand_operation *op,
828 			    bool check_only)
829 {
830 	int ret = 0;
831 	u32 i;
832 
833 	if (check_only)
834 		return 0;
835 
836 	ma35_nand_target_enable(chip, op->cs);
837 
838 	for (i = 0; i < op->ninstrs; i++) {
839 		ret = ma35_nfc_exec_instr(chip, &op->instrs[i]);
840 		if (ret)
841 			break;
842 	}
843 
844 	return ret;
845 }
846 
847 static const struct nand_controller_ops ma35_nfc_ops = {
848 	.attach_chip = ma35_nand_attach_chip,
849 	.exec_op = ma35_nfc_exec_op,
850 };
851 
852 static int ma35_nand_chip_init(struct device *dev, struct ma35_nand_info *nand,
853 			       struct device_node *np)
854 {
855 	struct ma35_nand_chip *nvtnand;
856 	struct nand_chip *chip;
857 	struct mtd_info *mtd;
858 	int nsels;
859 	int ret;
860 	u32 cs;
861 	int i;
862 
863 	nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
864 	if (!nsels || nsels > MA35_MAX_NSELS) {
865 		dev_err(dev, "invalid reg property size %d\n", nsels);
866 		return -EINVAL;
867 	}
868 
869 	nvtnand = devm_kzalloc(dev, struct_size(nvtnand, sels, nsels),
870 			       GFP_KERNEL);
871 	if (!nvtnand)
872 		return -ENOMEM;
873 
874 	nvtnand->nsels = nsels;
875 	for (i = 0; i < nsels; i++) {
876 		ret = of_property_read_u32_index(np, "reg", i, &cs);
877 		if (ret) {
878 			dev_err(dev, "reg property failure : %d\n", ret);
879 			return ret;
880 		}
881 
882 		if (cs >= MA35_MAX_NSELS) {
883 			dev_err(dev, "invalid CS: %u\n", cs);
884 			return -EINVAL;
885 		}
886 
887 		if (test_and_set_bit(cs, &nand->assigned_cs)) {
888 			dev_err(dev, "CS %u already assigned\n", cs);
889 			return -EINVAL;
890 		}
891 
892 		nvtnand->sels[i] = cs;
893 	}
894 
895 	chip = &nvtnand->chip;
896 	chip->controller = &nand->controller;
897 
898 	nand_set_flash_node(chip, np);
899 	nand_set_controller_data(chip, nand);
900 
901 	mtd = nand_to_mtd(chip);
902 	mtd->owner = THIS_MODULE;
903 	mtd->dev.parent = dev;
904 
905 	mtd_set_ooblayout(mtd, &ma35_ooblayout_ops);
906 	ret = nand_scan(chip, nsels);
907 	if (ret)
908 		return ret;
909 
910 	ret = mtd_device_register(mtd, NULL, 0);
911 	if (ret) {
912 		nand_cleanup(chip);
913 		return ret;
914 	}
915 
916 	list_add_tail(&nvtnand->node, &nand->chips);
917 
918 	return 0;
919 }
920 
921 static void ma35_chips_cleanup(struct ma35_nand_info *nand)
922 {
923 	struct ma35_nand_chip *nvtnand, *tmp;
924 	struct nand_chip *chip;
925 	int ret;
926 
927 	list_for_each_entry_safe(nvtnand, tmp, &nand->chips, node) {
928 		chip = &nvtnand->chip;
929 		ret = mtd_device_unregister(nand_to_mtd(chip));
930 		WARN_ON(ret);
931 		nand_cleanup(chip);
932 		list_del(&nvtnand->node);
933 	}
934 }
935 
936 static int ma35_nand_chips_init(struct device *dev, struct ma35_nand_info *nand)
937 {
938 	struct device_node *np = dev->of_node, *nand_np;
939 	int ret;
940 
941 	for_each_child_of_node(np, nand_np) {
942 		ret = ma35_nand_chip_init(dev, nand, nand_np);
943 		if (ret) {
944 			ma35_chips_cleanup(nand);
945 			return ret;
946 		}
947 	}
948 	return 0;
949 }
950 
951 static int ma35_nand_probe(struct platform_device *pdev)
952 {
953 	struct ma35_nand_info *nand;
954 	int ret = 0;
955 
956 	nand = devm_kzalloc(&pdev->dev, sizeof(*nand), GFP_KERNEL);
957 	if (!nand)
958 		return -ENOMEM;
959 
960 	nand_controller_init(&nand->controller);
961 	INIT_LIST_HEAD(&nand->chips);
962 	nand->controller.ops = &ma35_nfc_ops;
963 
964 	init_completion(&nand->complete);
965 
966 	nand->regs = devm_platform_ioremap_resource(pdev, 0);
967 	if (IS_ERR(nand->regs))
968 		return PTR_ERR(nand->regs);
969 
970 	nand->dev = &pdev->dev;
971 
972 	nand->clk = devm_clk_get_enabled(&pdev->dev, "nand_gate");
973 	if (IS_ERR(nand->clk))
974 		return dev_err_probe(&pdev->dev, PTR_ERR(nand->clk),
975 				     "failed to find NAND clock\n");
976 
977 	nand->irq = platform_get_irq(pdev, 0);
978 	if (nand->irq < 0)
979 		return dev_err_probe(&pdev->dev, nand->irq,
980 				     "failed to get platform irq\n");
981 
982 	ret = devm_request_irq(&pdev->dev, nand->irq, ma35_nand_irq,
983 			       IRQF_TRIGGER_HIGH, "ma35d1-nand-controller", nand);
984 	if (ret) {
985 		dev_err(&pdev->dev, "failed to request NAND irq\n");
986 		return -ENXIO;
987 	}
988 
989 	platform_set_drvdata(pdev, nand);
990 
991 	writel(GRST | NAND_EN, nand->regs + MA35_NFI_REG_GCTL);
992 	ma35_hw_init(nand);
993 	ret = ma35_nand_chips_init(&pdev->dev, nand);
994 	if (ret) {
995 		dev_err(&pdev->dev, "failed to init NAND chips\n");
996 		clk_disable(nand->clk);
997 		return ret;
998 	}
999 
1000 	return ret;
1001 }
1002 
1003 static void ma35_nand_remove(struct platform_device *pdev)
1004 {
1005 	struct ma35_nand_info *nand = platform_get_drvdata(pdev);
1006 
1007 	ma35_chips_cleanup(nand);
1008 }
1009 
1010 static const struct of_device_id ma35_nand_of_match[] = {
1011 	{ .compatible = "nuvoton,ma35d1-nand-controller" },
1012 	{},
1013 };
1014 MODULE_DEVICE_TABLE(of, ma35_nand_of_match);
1015 
1016 static struct platform_driver ma35_nand_driver = {
1017 	.driver = {
1018 		.name = "ma35d1-nand-controller",
1019 		.of_match_table = ma35_nand_of_match,
1020 	},
1021 	.probe = ma35_nand_probe,
1022 	.remove = ma35_nand_remove,
1023 };
1024 
1025 module_platform_driver(ma35_nand_driver);
1026 
1027 MODULE_DESCRIPTION("Nuvoton ma35 NAND driver");
1028 MODULE_AUTHOR("Hui-Ping Chen <hpchen0nvt@gmail.com>");
1029 MODULE_LICENSE("GPL");
1030