xref: /linux/drivers/mtd/nand/raw/davinci_nand.c (revision 90d32e92011eaae8e70a9169b4e7acf4ca8f9d3a)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * davinci_nand.c - NAND Flash Driver for DaVinci family chips
4  *
5  * Copyright © 2006 Texas Instruments.
6  *
7  * Port to 2.6.23 Copyright © 2008 by:
8  *   Sander Huijsen <Shuijsen@optelecom-nkf.com>
9  *   Troy Kisky <troy.kisky@boundarydevices.com>
10  *   Dirk Behme <Dirk.Behme@gmail.com>
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/platform_device.h>
16 #include <linux/err.h>
17 #include <linux/iopoll.h>
18 #include <linux/mtd/rawnand.h>
19 #include <linux/mtd/partitions.h>
20 #include <linux/slab.h>
21 #include <linux/of.h>
22 
23 #include <linux/platform_data/mtd-davinci.h>
24 #include <linux/platform_data/mtd-davinci-aemif.h>
25 
26 /*
27  * This is a device driver for the NAND flash controller found on the
28  * various DaVinci family chips.  It handles up to four SoC chipselects,
29  * and some flavors of secondary chipselect (e.g. based on A12) as used
30  * with multichip packages.
31  *
32  * The 1-bit ECC hardware is supported, as well as the newer 4-bit ECC
33  * available on chips like the DM355 and OMAP-L137 and needed with the
34  * more error-prone MLC NAND chips.
35  *
36  * This driver assumes EM_WAIT connects all the NAND devices' RDY/nBUSY
37  * outputs in a "wire-AND" configuration, with no per-chip signals.
38  */
39 struct davinci_nand_info {
40 	struct nand_controller	controller;
41 	struct nand_chip	chip;
42 
43 	struct platform_device	*pdev;
44 
45 	bool			is_readmode;
46 
47 	void __iomem		*base;
48 	void __iomem		*vaddr;
49 
50 	void __iomem		*current_cs;
51 
52 	uint32_t		mask_chipsel;
53 	uint32_t		mask_ale;
54 	uint32_t		mask_cle;
55 
56 	uint32_t		core_chipsel;
57 
58 	struct davinci_aemif_timing	*timing;
59 };
60 
61 static DEFINE_SPINLOCK(davinci_nand_lock);
62 static bool ecc4_busy;
63 
64 static inline struct davinci_nand_info *to_davinci_nand(struct mtd_info *mtd)
65 {
66 	return container_of(mtd_to_nand(mtd), struct davinci_nand_info, chip);
67 }
68 
69 static inline unsigned int davinci_nand_readl(struct davinci_nand_info *info,
70 		int offset)
71 {
72 	return __raw_readl(info->base + offset);
73 }
74 
75 static inline void davinci_nand_writel(struct davinci_nand_info *info,
76 		int offset, unsigned long value)
77 {
78 	__raw_writel(value, info->base + offset);
79 }
80 
81 /*----------------------------------------------------------------------*/
82 
83 /*
84  * 1-bit hardware ECC ... context maintained for each core chipselect
85  */
86 
87 static inline uint32_t nand_davinci_readecc_1bit(struct mtd_info *mtd)
88 {
89 	struct davinci_nand_info *info = to_davinci_nand(mtd);
90 
91 	return davinci_nand_readl(info, NANDF1ECC_OFFSET
92 			+ 4 * info->core_chipsel);
93 }
94 
95 static void nand_davinci_hwctl_1bit(struct nand_chip *chip, int mode)
96 {
97 	struct davinci_nand_info *info;
98 	uint32_t nandcfr;
99 	unsigned long flags;
100 
101 	info = to_davinci_nand(nand_to_mtd(chip));
102 
103 	/* Reset ECC hardware */
104 	nand_davinci_readecc_1bit(nand_to_mtd(chip));
105 
106 	spin_lock_irqsave(&davinci_nand_lock, flags);
107 
108 	/* Restart ECC hardware */
109 	nandcfr = davinci_nand_readl(info, NANDFCR_OFFSET);
110 	nandcfr |= BIT(8 + info->core_chipsel);
111 	davinci_nand_writel(info, NANDFCR_OFFSET, nandcfr);
112 
113 	spin_unlock_irqrestore(&davinci_nand_lock, flags);
114 }
115 
116 /*
117  * Read hardware ECC value and pack into three bytes
118  */
119 static int nand_davinci_calculate_1bit(struct nand_chip *chip,
120 				       const u_char *dat, u_char *ecc_code)
121 {
122 	unsigned int ecc_val = nand_davinci_readecc_1bit(nand_to_mtd(chip));
123 	unsigned int ecc24 = (ecc_val & 0x0fff) | ((ecc_val & 0x0fff0000) >> 4);
124 
125 	/* invert so that erased block ecc is correct */
126 	ecc24 = ~ecc24;
127 	ecc_code[0] = (u_char)(ecc24);
128 	ecc_code[1] = (u_char)(ecc24 >> 8);
129 	ecc_code[2] = (u_char)(ecc24 >> 16);
130 
131 	return 0;
132 }
133 
134 static int nand_davinci_correct_1bit(struct nand_chip *chip, u_char *dat,
135 				     u_char *read_ecc, u_char *calc_ecc)
136 {
137 	uint32_t eccNand = read_ecc[0] | (read_ecc[1] << 8) |
138 					  (read_ecc[2] << 16);
139 	uint32_t eccCalc = calc_ecc[0] | (calc_ecc[1] << 8) |
140 					  (calc_ecc[2] << 16);
141 	uint32_t diff = eccCalc ^ eccNand;
142 
143 	if (diff) {
144 		if ((((diff >> 12) ^ diff) & 0xfff) == 0xfff) {
145 			/* Correctable error */
146 			if ((diff >> (12 + 3)) < chip->ecc.size) {
147 				dat[diff >> (12 + 3)] ^= BIT((diff >> 12) & 7);
148 				return 1;
149 			} else {
150 				return -EBADMSG;
151 			}
152 		} else if (!(diff & (diff - 1))) {
153 			/* Single bit ECC error in the ECC itself,
154 			 * nothing to fix */
155 			return 1;
156 		} else {
157 			/* Uncorrectable error */
158 			return -EBADMSG;
159 		}
160 
161 	}
162 	return 0;
163 }
164 
165 /*----------------------------------------------------------------------*/
166 
167 /*
168  * 4-bit hardware ECC ... context maintained over entire AEMIF
169  *
170  * This is a syndrome engine, but we avoid NAND_ECC_PLACEMENT_INTERLEAVED
171  * since that forces use of a problematic "infix OOB" layout.
172  * Among other things, it trashes manufacturer bad block markers.
173  * Also, and specific to this hardware, it ECC-protects the "prepad"
174  * in the OOB ... while having ECC protection for parts of OOB would
175  * seem useful, the current MTD stack sometimes wants to update the
176  * OOB without recomputing ECC.
177  */
178 
179 static void nand_davinci_hwctl_4bit(struct nand_chip *chip, int mode)
180 {
181 	struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
182 	unsigned long flags;
183 	u32 val;
184 
185 	/* Reset ECC hardware */
186 	davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
187 
188 	spin_lock_irqsave(&davinci_nand_lock, flags);
189 
190 	/* Start 4-bit ECC calculation for read/write */
191 	val = davinci_nand_readl(info, NANDFCR_OFFSET);
192 	val &= ~(0x03 << 4);
193 	val |= (info->core_chipsel << 4) | BIT(12);
194 	davinci_nand_writel(info, NANDFCR_OFFSET, val);
195 
196 	info->is_readmode = (mode == NAND_ECC_READ);
197 
198 	spin_unlock_irqrestore(&davinci_nand_lock, flags);
199 }
200 
201 /* Read raw ECC code after writing to NAND. */
202 static void
203 nand_davinci_readecc_4bit(struct davinci_nand_info *info, u32 code[4])
204 {
205 	const u32 mask = 0x03ff03ff;
206 
207 	code[0] = davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET) & mask;
208 	code[1] = davinci_nand_readl(info, NAND_4BIT_ECC2_OFFSET) & mask;
209 	code[2] = davinci_nand_readl(info, NAND_4BIT_ECC3_OFFSET) & mask;
210 	code[3] = davinci_nand_readl(info, NAND_4BIT_ECC4_OFFSET) & mask;
211 }
212 
213 /* Terminate read ECC; or return ECC (as bytes) of data written to NAND. */
214 static int nand_davinci_calculate_4bit(struct nand_chip *chip,
215 				       const u_char *dat, u_char *ecc_code)
216 {
217 	struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
218 	u32 raw_ecc[4], *p;
219 	unsigned i;
220 
221 	/* After a read, terminate ECC calculation by a dummy read
222 	 * of some 4-bit ECC register.  ECC covers everything that
223 	 * was read; correct() just uses the hardware state, so
224 	 * ecc_code is not needed.
225 	 */
226 	if (info->is_readmode) {
227 		davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
228 		return 0;
229 	}
230 
231 	/* Pack eight raw 10-bit ecc values into ten bytes, making
232 	 * two passes which each convert four values (in upper and
233 	 * lower halves of two 32-bit words) into five bytes.  The
234 	 * ROM boot loader uses this same packing scheme.
235 	 */
236 	nand_davinci_readecc_4bit(info, raw_ecc);
237 	for (i = 0, p = raw_ecc; i < 2; i++, p += 2) {
238 		*ecc_code++ =   p[0]        & 0xff;
239 		*ecc_code++ = ((p[0] >>  8) & 0x03) | ((p[0] >> 14) & 0xfc);
240 		*ecc_code++ = ((p[0] >> 22) & 0x0f) | ((p[1] <<  4) & 0xf0);
241 		*ecc_code++ = ((p[1] >>  4) & 0x3f) | ((p[1] >> 10) & 0xc0);
242 		*ecc_code++ =  (p[1] >> 18) & 0xff;
243 	}
244 
245 	return 0;
246 }
247 
248 /* Correct up to 4 bits in data we just read, using state left in the
249  * hardware plus the ecc_code computed when it was first written.
250  */
251 static int nand_davinci_correct_4bit(struct nand_chip *chip, u_char *data,
252 				     u_char *ecc_code, u_char *null)
253 {
254 	int i;
255 	struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
256 	unsigned short ecc10[8];
257 	unsigned short *ecc16;
258 	u32 syndrome[4];
259 	u32 ecc_state;
260 	unsigned num_errors, corrected;
261 	unsigned long timeo;
262 
263 	/* Unpack ten bytes into eight 10 bit values.  We know we're
264 	 * little-endian, and use type punning for less shifting/masking.
265 	 */
266 	if (WARN_ON(0x01 & (uintptr_t)ecc_code))
267 		return -EINVAL;
268 	ecc16 = (unsigned short *)ecc_code;
269 
270 	ecc10[0] =  (ecc16[0] >>  0) & 0x3ff;
271 	ecc10[1] = ((ecc16[0] >> 10) & 0x3f) | ((ecc16[1] << 6) & 0x3c0);
272 	ecc10[2] =  (ecc16[1] >>  4) & 0x3ff;
273 	ecc10[3] = ((ecc16[1] >> 14) & 0x3)  | ((ecc16[2] << 2) & 0x3fc);
274 	ecc10[4] =  (ecc16[2] >>  8)         | ((ecc16[3] << 8) & 0x300);
275 	ecc10[5] =  (ecc16[3] >>  2) & 0x3ff;
276 	ecc10[6] = ((ecc16[3] >> 12) & 0xf)  | ((ecc16[4] << 4) & 0x3f0);
277 	ecc10[7] =  (ecc16[4] >>  6) & 0x3ff;
278 
279 	/* Tell ECC controller about the expected ECC codes. */
280 	for (i = 7; i >= 0; i--)
281 		davinci_nand_writel(info, NAND_4BIT_ECC_LOAD_OFFSET, ecc10[i]);
282 
283 	/* Allow time for syndrome calculation ... then read it.
284 	 * A syndrome of all zeroes 0 means no detected errors.
285 	 */
286 	davinci_nand_readl(info, NANDFSR_OFFSET);
287 	nand_davinci_readecc_4bit(info, syndrome);
288 	if (!(syndrome[0] | syndrome[1] | syndrome[2] | syndrome[3]))
289 		return 0;
290 
291 	/*
292 	 * Clear any previous address calculation by doing a dummy read of an
293 	 * error address register.
294 	 */
295 	davinci_nand_readl(info, NAND_ERR_ADD1_OFFSET);
296 
297 	/* Start address calculation, and wait for it to complete.
298 	 * We _could_ start reading more data while this is working,
299 	 * to speed up the overall page read.
300 	 */
301 	davinci_nand_writel(info, NANDFCR_OFFSET,
302 			davinci_nand_readl(info, NANDFCR_OFFSET) | BIT(13));
303 
304 	/*
305 	 * ECC_STATE field reads 0x3 (Error correction complete) immediately
306 	 * after setting the 4BITECC_ADD_CALC_START bit. So if you immediately
307 	 * begin trying to poll for the state, you may fall right out of your
308 	 * loop without any of the correction calculations having taken place.
309 	 * The recommendation from the hardware team is to initially delay as
310 	 * long as ECC_STATE reads less than 4. After that, ECC HW has entered
311 	 * correction state.
312 	 */
313 	timeo = jiffies + usecs_to_jiffies(100);
314 	do {
315 		ecc_state = (davinci_nand_readl(info,
316 				NANDFSR_OFFSET) >> 8) & 0x0f;
317 		cpu_relax();
318 	} while ((ecc_state < 4) && time_before(jiffies, timeo));
319 
320 	for (;;) {
321 		u32	fsr = davinci_nand_readl(info, NANDFSR_OFFSET);
322 
323 		switch ((fsr >> 8) & 0x0f) {
324 		case 0:		/* no error, should not happen */
325 			davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
326 			return 0;
327 		case 1:		/* five or more errors detected */
328 			davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
329 			return -EBADMSG;
330 		case 2:		/* error addresses computed */
331 		case 3:
332 			num_errors = 1 + ((fsr >> 16) & 0x03);
333 			goto correct;
334 		default:	/* still working on it */
335 			cpu_relax();
336 			continue;
337 		}
338 	}
339 
340 correct:
341 	/* correct each error */
342 	for (i = 0, corrected = 0; i < num_errors; i++) {
343 		int error_address, error_value;
344 
345 		if (i > 1) {
346 			error_address = davinci_nand_readl(info,
347 						NAND_ERR_ADD2_OFFSET);
348 			error_value = davinci_nand_readl(info,
349 						NAND_ERR_ERRVAL2_OFFSET);
350 		} else {
351 			error_address = davinci_nand_readl(info,
352 						NAND_ERR_ADD1_OFFSET);
353 			error_value = davinci_nand_readl(info,
354 						NAND_ERR_ERRVAL1_OFFSET);
355 		}
356 
357 		if (i & 1) {
358 			error_address >>= 16;
359 			error_value >>= 16;
360 		}
361 		error_address &= 0x3ff;
362 		error_address = (512 + 7) - error_address;
363 
364 		if (error_address < 512) {
365 			data[error_address] ^= error_value;
366 			corrected++;
367 		}
368 	}
369 
370 	return corrected;
371 }
372 
373 /*----------------------------------------------------------------------*/
374 
375 /* An ECC layout for using 4-bit ECC with small-page flash, storing
376  * ten ECC bytes plus the manufacturer's bad block marker byte, and
377  * and not overlapping the default BBT markers.
378  */
379 static int hwecc4_ooblayout_small_ecc(struct mtd_info *mtd, int section,
380 				      struct mtd_oob_region *oobregion)
381 {
382 	if (section > 2)
383 		return -ERANGE;
384 
385 	if (!section) {
386 		oobregion->offset = 0;
387 		oobregion->length = 5;
388 	} else if (section == 1) {
389 		oobregion->offset = 6;
390 		oobregion->length = 2;
391 	} else {
392 		oobregion->offset = 13;
393 		oobregion->length = 3;
394 	}
395 
396 	return 0;
397 }
398 
399 static int hwecc4_ooblayout_small_free(struct mtd_info *mtd, int section,
400 				       struct mtd_oob_region *oobregion)
401 {
402 	if (section > 1)
403 		return -ERANGE;
404 
405 	if (!section) {
406 		oobregion->offset = 8;
407 		oobregion->length = 5;
408 	} else {
409 		oobregion->offset = 16;
410 		oobregion->length = mtd->oobsize - 16;
411 	}
412 
413 	return 0;
414 }
415 
416 static const struct mtd_ooblayout_ops hwecc4_small_ooblayout_ops = {
417 	.ecc = hwecc4_ooblayout_small_ecc,
418 	.free = hwecc4_ooblayout_small_free,
419 };
420 
421 #if defined(CONFIG_OF)
422 static const struct of_device_id davinci_nand_of_match[] = {
423 	{.compatible = "ti,davinci-nand", },
424 	{.compatible = "ti,keystone-nand", },
425 	{},
426 };
427 MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
428 
429 static struct davinci_nand_pdata
430 	*nand_davinci_get_pdata(struct platform_device *pdev)
431 {
432 	if (!dev_get_platdata(&pdev->dev) && pdev->dev.of_node) {
433 		struct davinci_nand_pdata *pdata;
434 		const char *mode;
435 		u32 prop;
436 
437 		pdata =  devm_kzalloc(&pdev->dev,
438 				sizeof(struct davinci_nand_pdata),
439 				GFP_KERNEL);
440 		pdev->dev.platform_data = pdata;
441 		if (!pdata)
442 			return ERR_PTR(-ENOMEM);
443 		if (!of_property_read_u32(pdev->dev.of_node,
444 			"ti,davinci-chipselect", &prop))
445 			pdata->core_chipsel = prop;
446 		else
447 			return ERR_PTR(-EINVAL);
448 
449 		if (!of_property_read_u32(pdev->dev.of_node,
450 			"ti,davinci-mask-ale", &prop))
451 			pdata->mask_ale = prop;
452 		if (!of_property_read_u32(pdev->dev.of_node,
453 			"ti,davinci-mask-cle", &prop))
454 			pdata->mask_cle = prop;
455 		if (!of_property_read_u32(pdev->dev.of_node,
456 			"ti,davinci-mask-chipsel", &prop))
457 			pdata->mask_chipsel = prop;
458 		if (!of_property_read_string(pdev->dev.of_node,
459 			"ti,davinci-ecc-mode", &mode)) {
460 			if (!strncmp("none", mode, 4))
461 				pdata->engine_type = NAND_ECC_ENGINE_TYPE_NONE;
462 			if (!strncmp("soft", mode, 4))
463 				pdata->engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
464 			if (!strncmp("hw", mode, 2))
465 				pdata->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
466 		}
467 		if (!of_property_read_u32(pdev->dev.of_node,
468 			"ti,davinci-ecc-bits", &prop))
469 			pdata->ecc_bits = prop;
470 
471 		if (!of_property_read_u32(pdev->dev.of_node,
472 			"ti,davinci-nand-buswidth", &prop) && prop == 16)
473 			pdata->options |= NAND_BUSWIDTH_16;
474 
475 		if (of_property_read_bool(pdev->dev.of_node,
476 			"ti,davinci-nand-use-bbt"))
477 			pdata->bbt_options = NAND_BBT_USE_FLASH;
478 
479 		/*
480 		 * Since kernel v4.8, this driver has been fixed to enable
481 		 * use of 4-bit hardware ECC with subpages and verified on
482 		 * TI's keystone EVMs (K2L, K2HK and K2E).
483 		 * However, in the interest of not breaking systems using
484 		 * existing UBI partitions, sub-page writes are not being
485 		 * (re)enabled. If you want to use subpage writes on Keystone
486 		 * platforms (i.e. do not have any existing UBI partitions),
487 		 * then use "ti,davinci-nand" as the compatible in your
488 		 * device-tree file.
489 		 */
490 		if (of_device_is_compatible(pdev->dev.of_node,
491 					    "ti,keystone-nand")) {
492 			pdata->options |= NAND_NO_SUBPAGE_WRITE;
493 		}
494 	}
495 
496 	return dev_get_platdata(&pdev->dev);
497 }
498 #else
499 static struct davinci_nand_pdata
500 	*nand_davinci_get_pdata(struct platform_device *pdev)
501 {
502 	return dev_get_platdata(&pdev->dev);
503 }
504 #endif
505 
506 static int davinci_nand_attach_chip(struct nand_chip *chip)
507 {
508 	struct mtd_info *mtd = nand_to_mtd(chip);
509 	struct davinci_nand_info *info = to_davinci_nand(mtd);
510 	struct davinci_nand_pdata *pdata = nand_davinci_get_pdata(info->pdev);
511 	int ret = 0;
512 
513 	if (IS_ERR(pdata))
514 		return PTR_ERR(pdata);
515 
516 	/* Use board-specific ECC config */
517 	chip->ecc.engine_type = pdata->engine_type;
518 	chip->ecc.placement = pdata->ecc_placement;
519 
520 	switch (chip->ecc.engine_type) {
521 	case NAND_ECC_ENGINE_TYPE_NONE:
522 		pdata->ecc_bits = 0;
523 		break;
524 	case NAND_ECC_ENGINE_TYPE_SOFT:
525 		pdata->ecc_bits = 0;
526 		/*
527 		 * This driver expects Hamming based ECC when engine_type is set
528 		 * to NAND_ECC_ENGINE_TYPE_SOFT. Force ecc.algo to
529 		 * NAND_ECC_ALGO_HAMMING to avoid adding an extra ->ecc_algo
530 		 * field to davinci_nand_pdata.
531 		 */
532 		chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
533 		break;
534 	case NAND_ECC_ENGINE_TYPE_ON_HOST:
535 		if (pdata->ecc_bits == 4) {
536 			int chunks = mtd->writesize / 512;
537 
538 			if (!chunks || mtd->oobsize < 16) {
539 				dev_dbg(&info->pdev->dev, "too small\n");
540 				return -EINVAL;
541 			}
542 
543 			/*
544 			 * No sanity checks:  CPUs must support this,
545 			 * and the chips may not use NAND_BUSWIDTH_16.
546 			 */
547 
548 			/* No sharing 4-bit hardware between chipselects yet */
549 			spin_lock_irq(&davinci_nand_lock);
550 			if (ecc4_busy)
551 				ret = -EBUSY;
552 			else
553 				ecc4_busy = true;
554 			spin_unlock_irq(&davinci_nand_lock);
555 
556 			if (ret == -EBUSY)
557 				return ret;
558 
559 			chip->ecc.calculate = nand_davinci_calculate_4bit;
560 			chip->ecc.correct = nand_davinci_correct_4bit;
561 			chip->ecc.hwctl = nand_davinci_hwctl_4bit;
562 			chip->ecc.bytes = 10;
563 			chip->ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
564 			chip->ecc.algo = NAND_ECC_ALGO_BCH;
565 
566 			/*
567 			 * Update ECC layout if needed ... for 1-bit HW ECC, the
568 			 * default is OK, but it allocates 6 bytes when only 3
569 			 * are needed (for each 512 bytes). For 4-bit HW ECC,
570 			 * the default is not usable: 10 bytes needed, not 6.
571 			 *
572 			 * For small page chips, preserve the manufacturer's
573 			 * badblock marking data ... and make sure a flash BBT
574 			 * table marker fits in the free bytes.
575 			 */
576 			if (chunks == 1) {
577 				mtd_set_ooblayout(mtd,
578 						  &hwecc4_small_ooblayout_ops);
579 			} else if (chunks == 4 || chunks == 8) {
580 				mtd_set_ooblayout(mtd,
581 						  nand_get_large_page_ooblayout());
582 				chip->ecc.read_page = nand_read_page_hwecc_oob_first;
583 			} else {
584 				return -EIO;
585 			}
586 		} else {
587 			/* 1bit ecc hamming */
588 			chip->ecc.calculate = nand_davinci_calculate_1bit;
589 			chip->ecc.correct = nand_davinci_correct_1bit;
590 			chip->ecc.hwctl = nand_davinci_hwctl_1bit;
591 			chip->ecc.bytes = 3;
592 			chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
593 		}
594 		chip->ecc.size = 512;
595 		chip->ecc.strength = pdata->ecc_bits;
596 		break;
597 	default:
598 		return -EINVAL;
599 	}
600 
601 	return ret;
602 }
603 
604 static void nand_davinci_data_in(struct davinci_nand_info *info, void *buf,
605 				 unsigned int len, bool force_8bit)
606 {
607 	u32 alignment = ((uintptr_t)buf | len) & 3;
608 
609 	if (force_8bit || (alignment & 1))
610 		ioread8_rep(info->current_cs, buf, len);
611 	else if (alignment & 3)
612 		ioread16_rep(info->current_cs, buf, len >> 1);
613 	else
614 		ioread32_rep(info->current_cs, buf, len >> 2);
615 }
616 
617 static void nand_davinci_data_out(struct davinci_nand_info *info,
618 				  const void *buf, unsigned int len,
619 				  bool force_8bit)
620 {
621 	u32 alignment = ((uintptr_t)buf | len) & 3;
622 
623 	if (force_8bit || (alignment & 1))
624 		iowrite8_rep(info->current_cs, buf, len);
625 	else if (alignment & 3)
626 		iowrite16_rep(info->current_cs, buf, len >> 1);
627 	else
628 		iowrite32_rep(info->current_cs, buf, len >> 2);
629 }
630 
631 static int davinci_nand_exec_instr(struct davinci_nand_info *info,
632 				   const struct nand_op_instr *instr)
633 {
634 	unsigned int i, timeout_us;
635 	u32 status;
636 	int ret;
637 
638 	switch (instr->type) {
639 	case NAND_OP_CMD_INSTR:
640 		iowrite8(instr->ctx.cmd.opcode,
641 			 info->current_cs + info->mask_cle);
642 		break;
643 
644 	case NAND_OP_ADDR_INSTR:
645 		for (i = 0; i < instr->ctx.addr.naddrs; i++) {
646 			iowrite8(instr->ctx.addr.addrs[i],
647 				 info->current_cs + info->mask_ale);
648 		}
649 		break;
650 
651 	case NAND_OP_DATA_IN_INSTR:
652 		nand_davinci_data_in(info, instr->ctx.data.buf.in,
653 				     instr->ctx.data.len,
654 				     instr->ctx.data.force_8bit);
655 		break;
656 
657 	case NAND_OP_DATA_OUT_INSTR:
658 		nand_davinci_data_out(info, instr->ctx.data.buf.out,
659 				      instr->ctx.data.len,
660 				      instr->ctx.data.force_8bit);
661 		break;
662 
663 	case NAND_OP_WAITRDY_INSTR:
664 		timeout_us = instr->ctx.waitrdy.timeout_ms * 1000;
665 		ret = readl_relaxed_poll_timeout(info->base + NANDFSR_OFFSET,
666 						 status, status & BIT(0), 100,
667 						 timeout_us);
668 		if (ret)
669 			return ret;
670 
671 		break;
672 	}
673 
674 	if (instr->delay_ns) {
675 		/* Dummy read to be sure that command is sent before ndelay starts */
676 		davinci_nand_readl(info, 0);
677 		ndelay(instr->delay_ns);
678 	}
679 
680 	return 0;
681 }
682 
683 static int davinci_nand_exec_op(struct nand_chip *chip,
684 				const struct nand_operation *op,
685 				bool check_only)
686 {
687 	struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
688 	unsigned int i;
689 
690 	if (check_only)
691 		return 0;
692 
693 	info->current_cs = info->vaddr + (op->cs * info->mask_chipsel);
694 
695 	for (i = 0; i < op->ninstrs; i++) {
696 		int ret;
697 
698 		ret = davinci_nand_exec_instr(info, &op->instrs[i]);
699 		if (ret)
700 			return ret;
701 	}
702 
703 	return 0;
704 }
705 
706 static const struct nand_controller_ops davinci_nand_controller_ops = {
707 	.attach_chip = davinci_nand_attach_chip,
708 	.exec_op = davinci_nand_exec_op,
709 };
710 
711 static int nand_davinci_probe(struct platform_device *pdev)
712 {
713 	struct davinci_nand_pdata	*pdata;
714 	struct davinci_nand_info	*info;
715 	struct resource			*res1;
716 	struct resource			*res2;
717 	void __iomem			*vaddr;
718 	void __iomem			*base;
719 	int				ret;
720 	uint32_t			val;
721 	struct mtd_info			*mtd;
722 
723 	pdata = nand_davinci_get_pdata(pdev);
724 	if (IS_ERR(pdata))
725 		return PTR_ERR(pdata);
726 
727 	/* insist on board-specific configuration */
728 	if (!pdata)
729 		return -ENODEV;
730 
731 	/* which external chipselect will we be managing? */
732 	if (pdata->core_chipsel > 3)
733 		return -ENODEV;
734 
735 	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
736 	if (!info)
737 		return -ENOMEM;
738 
739 	platform_set_drvdata(pdev, info);
740 
741 	res1 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
742 	res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
743 	if (!res1 || !res2) {
744 		dev_err(&pdev->dev, "resource missing\n");
745 		return -EINVAL;
746 	}
747 
748 	vaddr = devm_ioremap_resource(&pdev->dev, res1);
749 	if (IS_ERR(vaddr))
750 		return PTR_ERR(vaddr);
751 
752 	/*
753 	 * This registers range is used to setup NAND settings. In case with
754 	 * TI AEMIF driver, the same memory address range is requested already
755 	 * by AEMIF, so we cannot request it twice, just ioremap.
756 	 * The AEMIF and NAND drivers not use the same registers in this range.
757 	 */
758 	base = devm_ioremap(&pdev->dev, res2->start, resource_size(res2));
759 	if (!base) {
760 		dev_err(&pdev->dev, "ioremap failed for resource %pR\n", res2);
761 		return -EADDRNOTAVAIL;
762 	}
763 
764 	info->pdev		= pdev;
765 	info->base		= base;
766 	info->vaddr		= vaddr;
767 
768 	mtd			= nand_to_mtd(&info->chip);
769 	mtd->dev.parent		= &pdev->dev;
770 	nand_set_flash_node(&info->chip, pdev->dev.of_node);
771 
772 	/* options such as NAND_BBT_USE_FLASH */
773 	info->chip.bbt_options	= pdata->bbt_options;
774 	/* options such as 16-bit widths */
775 	info->chip.options	= pdata->options;
776 	info->chip.bbt_td	= pdata->bbt_td;
777 	info->chip.bbt_md	= pdata->bbt_md;
778 	info->timing		= pdata->timing;
779 
780 	info->current_cs	= info->vaddr;
781 	info->core_chipsel	= pdata->core_chipsel;
782 	info->mask_chipsel	= pdata->mask_chipsel;
783 
784 	/* use nandboot-capable ALE/CLE masks by default */
785 	info->mask_ale		= pdata->mask_ale ? : MASK_ALE;
786 	info->mask_cle		= pdata->mask_cle ? : MASK_CLE;
787 
788 	spin_lock_irq(&davinci_nand_lock);
789 
790 	/* put CSxNAND into NAND mode */
791 	val = davinci_nand_readl(info, NANDFCR_OFFSET);
792 	val |= BIT(info->core_chipsel);
793 	davinci_nand_writel(info, NANDFCR_OFFSET, val);
794 
795 	spin_unlock_irq(&davinci_nand_lock);
796 
797 	/* Scan to find existence of the device(s) */
798 	nand_controller_init(&info->controller);
799 	info->controller.ops = &davinci_nand_controller_ops;
800 	info->chip.controller = &info->controller;
801 	ret = nand_scan(&info->chip, pdata->mask_chipsel ? 2 : 1);
802 	if (ret < 0) {
803 		dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
804 		return ret;
805 	}
806 
807 	if (pdata->parts)
808 		ret = mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
809 	else
810 		ret = mtd_device_register(mtd, NULL, 0);
811 	if (ret < 0)
812 		goto err_cleanup_nand;
813 
814 	val = davinci_nand_readl(info, NRCSR_OFFSET);
815 	dev_info(&pdev->dev, "controller rev. %d.%d\n",
816 	       (val >> 8) & 0xff, val & 0xff);
817 
818 	return 0;
819 
820 err_cleanup_nand:
821 	nand_cleanup(&info->chip);
822 
823 	return ret;
824 }
825 
826 static void nand_davinci_remove(struct platform_device *pdev)
827 {
828 	struct davinci_nand_info *info = platform_get_drvdata(pdev);
829 	struct nand_chip *chip = &info->chip;
830 	int ret;
831 
832 	spin_lock_irq(&davinci_nand_lock);
833 	if (chip->ecc.placement == NAND_ECC_PLACEMENT_INTERLEAVED)
834 		ecc4_busy = false;
835 	spin_unlock_irq(&davinci_nand_lock);
836 
837 	ret = mtd_device_unregister(nand_to_mtd(chip));
838 	WARN_ON(ret);
839 	nand_cleanup(chip);
840 }
841 
842 static struct platform_driver nand_davinci_driver = {
843 	.probe		= nand_davinci_probe,
844 	.remove_new	= nand_davinci_remove,
845 	.driver		= {
846 		.name	= "davinci_nand",
847 		.of_match_table = of_match_ptr(davinci_nand_of_match),
848 	},
849 };
850 MODULE_ALIAS("platform:davinci_nand");
851 
852 module_platform_driver(nand_davinci_driver);
853 
854 MODULE_LICENSE("GPL");
855 MODULE_AUTHOR("Texas Instruments");
856 MODULE_DESCRIPTION("Davinci NAND flash driver");
857 
858