xref: /linux/drivers/mtd/nand/raw/davinci_nand.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * davinci_nand.c - NAND Flash Driver for DaVinci family chips
4  *
5  * Copyright © 2006 Texas Instruments.
6  *
7  * Port to 2.6.23 Copyright © 2008 by:
8  *   Sander Huijsen <Shuijsen@optelecom-nkf.com>
9  *   Troy Kisky <troy.kisky@boundarydevices.com>
10  *   Dirk Behme <Dirk.Behme@gmail.com>
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/platform_device.h>
16 #include <linux/err.h>
17 #include <linux/iopoll.h>
18 #include <linux/mtd/rawnand.h>
19 #include <linux/mtd/partitions.h>
20 #include <linux/slab.h>
21 #include <linux/of.h>
22 
23 #define NRCSR_OFFSET		0x00
24 #define NANDFCR_OFFSET		0x60
25 #define NANDFSR_OFFSET		0x64
26 #define NANDF1ECC_OFFSET	0x70
27 
28 /* 4-bit ECC syndrome registers */
29 #define NAND_4BIT_ECC_LOAD_OFFSET	0xbc
30 #define NAND_4BIT_ECC1_OFFSET		0xc0
31 #define NAND_4BIT_ECC2_OFFSET		0xc4
32 #define NAND_4BIT_ECC3_OFFSET		0xc8
33 #define NAND_4BIT_ECC4_OFFSET		0xcc
34 #define NAND_ERR_ADD1_OFFSET		0xd0
35 #define NAND_ERR_ADD2_OFFSET		0xd4
36 #define NAND_ERR_ERRVAL1_OFFSET		0xd8
37 #define NAND_ERR_ERRVAL2_OFFSET		0xdc
38 
39 /* NOTE:  boards don't need to use these address bits
40  * for ALE/CLE unless they support booting from NAND.
41  * They're used unless platform data overrides them.
42  */
43 #define	MASK_ALE		0x08
44 #define	MASK_CLE		0x10
45 
46 struct davinci_nand_pdata {
47 	uint32_t		mask_ale;
48 	uint32_t		mask_cle;
49 
50 	/*
51 	 * 0-indexed chip-select number of the asynchronous
52 	 * interface to which the NAND device has been connected.
53 	 *
54 	 * So, if you have NAND connected to CS3 of DA850, you
55 	 * will pass '1' here. Since the asynchronous interface
56 	 * on DA850 starts from CS2.
57 	 */
58 	uint32_t		core_chipsel;
59 
60 	/* for packages using two chipselects */
61 	uint32_t		mask_chipsel;
62 
63 	/* board's default static partition info */
64 	struct mtd_partition	*parts;
65 	unsigned int		nr_parts;
66 
67 	/* none  == NAND_ECC_ENGINE_TYPE_NONE (strongly *not* advised!!)
68 	 * soft  == NAND_ECC_ENGINE_TYPE_SOFT
69 	 * else  == NAND_ECC_ENGINE_TYPE_ON_HOST, according to ecc_bits
70 	 *
71 	 * All DaVinci-family chips support 1-bit hardware ECC.
72 	 * Newer ones also support 4-bit ECC, but are awkward
73 	 * using it with large page chips.
74 	 */
75 	enum nand_ecc_engine_type engine_type;
76 	enum nand_ecc_placement ecc_placement;
77 	u8			ecc_bits;
78 
79 	/* e.g. NAND_BUSWIDTH_16 */
80 	unsigned int		options;
81 	/* e.g. NAND_BBT_USE_FLASH */
82 	unsigned int		bbt_options;
83 
84 	/* Main and mirror bbt descriptor overrides */
85 	struct nand_bbt_descr	*bbt_td;
86 	struct nand_bbt_descr	*bbt_md;
87 };
88 
89 /*
90  * This is a device driver for the NAND flash controller found on the
91  * various DaVinci family chips.  It handles up to four SoC chipselects,
92  * and some flavors of secondary chipselect (e.g. based on A12) as used
93  * with multichip packages.
94  *
95  * The 1-bit ECC hardware is supported, as well as the newer 4-bit ECC
96  * available on chips like the DM355 and OMAP-L137 and needed with the
97  * more error-prone MLC NAND chips.
98  *
99  * This driver assumes EM_WAIT connects all the NAND devices' RDY/nBUSY
100  * outputs in a "wire-AND" configuration, with no per-chip signals.
101  */
102 struct davinci_nand_info {
103 	struct nand_controller	controller;
104 	struct nand_chip	chip;
105 
106 	struct platform_device	*pdev;
107 
108 	bool			is_readmode;
109 
110 	void __iomem		*base;
111 	void __iomem		*vaddr;
112 
113 	void __iomem		*current_cs;
114 
115 	uint32_t		mask_chipsel;
116 	uint32_t		mask_ale;
117 	uint32_t		mask_cle;
118 
119 	uint32_t		core_chipsel;
120 };
121 
122 static DEFINE_SPINLOCK(davinci_nand_lock);
123 static bool ecc4_busy;
124 
to_davinci_nand(struct mtd_info * mtd)125 static inline struct davinci_nand_info *to_davinci_nand(struct mtd_info *mtd)
126 {
127 	return container_of(mtd_to_nand(mtd), struct davinci_nand_info, chip);
128 }
129 
davinci_nand_readl(struct davinci_nand_info * info,int offset)130 static inline unsigned int davinci_nand_readl(struct davinci_nand_info *info,
131 		int offset)
132 {
133 	return __raw_readl(info->base + offset);
134 }
135 
davinci_nand_writel(struct davinci_nand_info * info,int offset,unsigned long value)136 static inline void davinci_nand_writel(struct davinci_nand_info *info,
137 		int offset, unsigned long value)
138 {
139 	__raw_writel(value, info->base + offset);
140 }
141 
142 /*----------------------------------------------------------------------*/
143 
144 /*
145  * 1-bit hardware ECC ... context maintained for each core chipselect
146  */
147 
nand_davinci_readecc_1bit(struct mtd_info * mtd)148 static inline uint32_t nand_davinci_readecc_1bit(struct mtd_info *mtd)
149 {
150 	struct davinci_nand_info *info = to_davinci_nand(mtd);
151 
152 	return davinci_nand_readl(info, NANDF1ECC_OFFSET
153 			+ 4 * info->core_chipsel);
154 }
155 
nand_davinci_hwctl_1bit(struct nand_chip * chip,int mode)156 static void nand_davinci_hwctl_1bit(struct nand_chip *chip, int mode)
157 {
158 	struct davinci_nand_info *info;
159 	uint32_t nandcfr;
160 	unsigned long flags;
161 
162 	info = to_davinci_nand(nand_to_mtd(chip));
163 
164 	/* Reset ECC hardware */
165 	nand_davinci_readecc_1bit(nand_to_mtd(chip));
166 
167 	spin_lock_irqsave(&davinci_nand_lock, flags);
168 
169 	/* Restart ECC hardware */
170 	nandcfr = davinci_nand_readl(info, NANDFCR_OFFSET);
171 	nandcfr |= BIT(8 + info->core_chipsel);
172 	davinci_nand_writel(info, NANDFCR_OFFSET, nandcfr);
173 
174 	spin_unlock_irqrestore(&davinci_nand_lock, flags);
175 }
176 
177 /*
178  * Read hardware ECC value and pack into three bytes
179  */
nand_davinci_calculate_1bit(struct nand_chip * chip,const u_char * dat,u_char * ecc_code)180 static int nand_davinci_calculate_1bit(struct nand_chip *chip,
181 				       const u_char *dat, u_char *ecc_code)
182 {
183 	unsigned int ecc_val = nand_davinci_readecc_1bit(nand_to_mtd(chip));
184 	unsigned int ecc24 = (ecc_val & 0x0fff) | ((ecc_val & 0x0fff0000) >> 4);
185 
186 	/* invert so that erased block ecc is correct */
187 	ecc24 = ~ecc24;
188 	ecc_code[0] = (u_char)(ecc24);
189 	ecc_code[1] = (u_char)(ecc24 >> 8);
190 	ecc_code[2] = (u_char)(ecc24 >> 16);
191 
192 	return 0;
193 }
194 
nand_davinci_correct_1bit(struct nand_chip * chip,u_char * dat,u_char * read_ecc,u_char * calc_ecc)195 static int nand_davinci_correct_1bit(struct nand_chip *chip, u_char *dat,
196 				     u_char *read_ecc, u_char *calc_ecc)
197 {
198 	uint32_t eccNand = read_ecc[0] | (read_ecc[1] << 8) |
199 					  (read_ecc[2] << 16);
200 	uint32_t eccCalc = calc_ecc[0] | (calc_ecc[1] << 8) |
201 					  (calc_ecc[2] << 16);
202 	uint32_t diff = eccCalc ^ eccNand;
203 
204 	if (diff) {
205 		if ((((diff >> 12) ^ diff) & 0xfff) == 0xfff) {
206 			/* Correctable error */
207 			if ((diff >> (12 + 3)) < chip->ecc.size) {
208 				dat[diff >> (12 + 3)] ^= BIT((diff >> 12) & 7);
209 				return 1;
210 			} else {
211 				return -EBADMSG;
212 			}
213 		} else if (!(diff & (diff - 1))) {
214 			/* Single bit ECC error in the ECC itself,
215 			 * nothing to fix */
216 			return 1;
217 		} else {
218 			/* Uncorrectable error */
219 			return -EBADMSG;
220 		}
221 
222 	}
223 	return 0;
224 }
225 
226 /*----------------------------------------------------------------------*/
227 
228 /*
229  * 4-bit hardware ECC ... context maintained over entire AEMIF
230  *
231  * This is a syndrome engine, but we avoid NAND_ECC_PLACEMENT_INTERLEAVED
232  * since that forces use of a problematic "infix OOB" layout.
233  * Among other things, it trashes manufacturer bad block markers.
234  * Also, and specific to this hardware, it ECC-protects the "prepad"
235  * in the OOB ... while having ECC protection for parts of OOB would
236  * seem useful, the current MTD stack sometimes wants to update the
237  * OOB without recomputing ECC.
238  */
239 
nand_davinci_hwctl_4bit(struct nand_chip * chip,int mode)240 static void nand_davinci_hwctl_4bit(struct nand_chip *chip, int mode)
241 {
242 	struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
243 	unsigned long flags;
244 	u32 val;
245 
246 	/* Reset ECC hardware */
247 	davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
248 
249 	spin_lock_irqsave(&davinci_nand_lock, flags);
250 
251 	/* Start 4-bit ECC calculation for read/write */
252 	val = davinci_nand_readl(info, NANDFCR_OFFSET);
253 	val &= ~(0x03 << 4);
254 	val |= (info->core_chipsel << 4) | BIT(12);
255 	davinci_nand_writel(info, NANDFCR_OFFSET, val);
256 
257 	info->is_readmode = (mode == NAND_ECC_READ);
258 
259 	spin_unlock_irqrestore(&davinci_nand_lock, flags);
260 }
261 
262 /* Read raw ECC code after writing to NAND. */
263 static void
nand_davinci_readecc_4bit(struct davinci_nand_info * info,u32 code[4])264 nand_davinci_readecc_4bit(struct davinci_nand_info *info, u32 code[4])
265 {
266 	const u32 mask = 0x03ff03ff;
267 
268 	code[0] = davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET) & mask;
269 	code[1] = davinci_nand_readl(info, NAND_4BIT_ECC2_OFFSET) & mask;
270 	code[2] = davinci_nand_readl(info, NAND_4BIT_ECC3_OFFSET) & mask;
271 	code[3] = davinci_nand_readl(info, NAND_4BIT_ECC4_OFFSET) & mask;
272 }
273 
274 /* Terminate read ECC; or return ECC (as bytes) of data written to NAND. */
nand_davinci_calculate_4bit(struct nand_chip * chip,const u_char * dat,u_char * ecc_code)275 static int nand_davinci_calculate_4bit(struct nand_chip *chip,
276 				       const u_char *dat, u_char *ecc_code)
277 {
278 	struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
279 	u32 raw_ecc[4], *p;
280 	unsigned i;
281 
282 	/* After a read, terminate ECC calculation by a dummy read
283 	 * of some 4-bit ECC register.  ECC covers everything that
284 	 * was read; correct() just uses the hardware state, so
285 	 * ecc_code is not needed.
286 	 */
287 	if (info->is_readmode) {
288 		davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
289 		return 0;
290 	}
291 
292 	/* Pack eight raw 10-bit ecc values into ten bytes, making
293 	 * two passes which each convert four values (in upper and
294 	 * lower halves of two 32-bit words) into five bytes.  The
295 	 * ROM boot loader uses this same packing scheme.
296 	 */
297 	nand_davinci_readecc_4bit(info, raw_ecc);
298 	for (i = 0, p = raw_ecc; i < 2; i++, p += 2) {
299 		*ecc_code++ =   p[0]        & 0xff;
300 		*ecc_code++ = ((p[0] >>  8) & 0x03) | ((p[0] >> 14) & 0xfc);
301 		*ecc_code++ = ((p[0] >> 22) & 0x0f) | ((p[1] <<  4) & 0xf0);
302 		*ecc_code++ = ((p[1] >>  4) & 0x3f) | ((p[1] >> 10) & 0xc0);
303 		*ecc_code++ =  (p[1] >> 18) & 0xff;
304 	}
305 
306 	return 0;
307 }
308 
309 /* Correct up to 4 bits in data we just read, using state left in the
310  * hardware plus the ecc_code computed when it was first written.
311  */
nand_davinci_correct_4bit(struct nand_chip * chip,u_char * data,u_char * ecc_code,u_char * null)312 static int nand_davinci_correct_4bit(struct nand_chip *chip, u_char *data,
313 				     u_char *ecc_code, u_char *null)
314 {
315 	int i;
316 	struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
317 	unsigned short ecc10[8];
318 	unsigned short *ecc16;
319 	u32 syndrome[4];
320 	u32 ecc_state;
321 	unsigned num_errors, corrected;
322 	unsigned long timeo;
323 
324 	/* Unpack ten bytes into eight 10 bit values.  We know we're
325 	 * little-endian, and use type punning for less shifting/masking.
326 	 */
327 	if (WARN_ON(0x01 & (uintptr_t)ecc_code))
328 		return -EINVAL;
329 	ecc16 = (unsigned short *)ecc_code;
330 
331 	ecc10[0] =  (ecc16[0] >>  0) & 0x3ff;
332 	ecc10[1] = ((ecc16[0] >> 10) & 0x3f) | ((ecc16[1] << 6) & 0x3c0);
333 	ecc10[2] =  (ecc16[1] >>  4) & 0x3ff;
334 	ecc10[3] = ((ecc16[1] >> 14) & 0x3)  | ((ecc16[2] << 2) & 0x3fc);
335 	ecc10[4] =  (ecc16[2] >>  8)         | ((ecc16[3] << 8) & 0x300);
336 	ecc10[5] =  (ecc16[3] >>  2) & 0x3ff;
337 	ecc10[6] = ((ecc16[3] >> 12) & 0xf)  | ((ecc16[4] << 4) & 0x3f0);
338 	ecc10[7] =  (ecc16[4] >>  6) & 0x3ff;
339 
340 	/* Tell ECC controller about the expected ECC codes. */
341 	for (i = 7; i >= 0; i--)
342 		davinci_nand_writel(info, NAND_4BIT_ECC_LOAD_OFFSET, ecc10[i]);
343 
344 	/* Allow time for syndrome calculation ... then read it.
345 	 * A syndrome of all zeroes 0 means no detected errors.
346 	 */
347 	davinci_nand_readl(info, NANDFSR_OFFSET);
348 	nand_davinci_readecc_4bit(info, syndrome);
349 	if (!(syndrome[0] | syndrome[1] | syndrome[2] | syndrome[3]))
350 		return 0;
351 
352 	/*
353 	 * Clear any previous address calculation by doing a dummy read of an
354 	 * error address register.
355 	 */
356 	davinci_nand_readl(info, NAND_ERR_ADD1_OFFSET);
357 
358 	/* Start address calculation, and wait for it to complete.
359 	 * We _could_ start reading more data while this is working,
360 	 * to speed up the overall page read.
361 	 */
362 	davinci_nand_writel(info, NANDFCR_OFFSET,
363 			davinci_nand_readl(info, NANDFCR_OFFSET) | BIT(13));
364 
365 	/*
366 	 * ECC_STATE field reads 0x3 (Error correction complete) immediately
367 	 * after setting the 4BITECC_ADD_CALC_START bit. So if you immediately
368 	 * begin trying to poll for the state, you may fall right out of your
369 	 * loop without any of the correction calculations having taken place.
370 	 * The recommendation from the hardware team is to initially delay as
371 	 * long as ECC_STATE reads less than 4. After that, ECC HW has entered
372 	 * correction state.
373 	 */
374 	timeo = jiffies + usecs_to_jiffies(100);
375 	do {
376 		ecc_state = (davinci_nand_readl(info,
377 				NANDFSR_OFFSET) >> 8) & 0x0f;
378 		cpu_relax();
379 	} while ((ecc_state < 4) && time_before(jiffies, timeo));
380 
381 	for (;;) {
382 		u32	fsr = davinci_nand_readl(info, NANDFSR_OFFSET);
383 
384 		switch ((fsr >> 8) & 0x0f) {
385 		case 0:		/* no error, should not happen */
386 			davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
387 			return 0;
388 		case 1:		/* five or more errors detected */
389 			davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
390 			return -EBADMSG;
391 		case 2:		/* error addresses computed */
392 		case 3:
393 			num_errors = 1 + ((fsr >> 16) & 0x03);
394 			goto correct;
395 		default:	/* still working on it */
396 			cpu_relax();
397 			continue;
398 		}
399 	}
400 
401 correct:
402 	/* correct each error */
403 	for (i = 0, corrected = 0; i < num_errors; i++) {
404 		int error_address, error_value;
405 
406 		if (i > 1) {
407 			error_address = davinci_nand_readl(info,
408 						NAND_ERR_ADD2_OFFSET);
409 			error_value = davinci_nand_readl(info,
410 						NAND_ERR_ERRVAL2_OFFSET);
411 		} else {
412 			error_address = davinci_nand_readl(info,
413 						NAND_ERR_ADD1_OFFSET);
414 			error_value = davinci_nand_readl(info,
415 						NAND_ERR_ERRVAL1_OFFSET);
416 		}
417 
418 		if (i & 1) {
419 			error_address >>= 16;
420 			error_value >>= 16;
421 		}
422 		error_address &= 0x3ff;
423 		error_address = (512 + 7) - error_address;
424 
425 		if (error_address < 512) {
426 			data[error_address] ^= error_value;
427 			corrected++;
428 		}
429 	}
430 
431 	return corrected;
432 }
433 
434 /*----------------------------------------------------------------------*/
435 
436 /* An ECC layout for using 4-bit ECC with small-page flash, storing
437  * ten ECC bytes plus the manufacturer's bad block marker byte, and
438  * and not overlapping the default BBT markers.
439  */
hwecc4_ooblayout_small_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)440 static int hwecc4_ooblayout_small_ecc(struct mtd_info *mtd, int section,
441 				      struct mtd_oob_region *oobregion)
442 {
443 	if (section > 2)
444 		return -ERANGE;
445 
446 	if (!section) {
447 		oobregion->offset = 0;
448 		oobregion->length = 5;
449 	} else if (section == 1) {
450 		oobregion->offset = 6;
451 		oobregion->length = 2;
452 	} else {
453 		oobregion->offset = 13;
454 		oobregion->length = 3;
455 	}
456 
457 	return 0;
458 }
459 
hwecc4_ooblayout_small_free(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)460 static int hwecc4_ooblayout_small_free(struct mtd_info *mtd, int section,
461 				       struct mtd_oob_region *oobregion)
462 {
463 	if (section > 1)
464 		return -ERANGE;
465 
466 	if (!section) {
467 		oobregion->offset = 8;
468 		oobregion->length = 5;
469 	} else {
470 		oobregion->offset = 16;
471 		oobregion->length = mtd->oobsize - 16;
472 	}
473 
474 	return 0;
475 }
476 
477 static const struct mtd_ooblayout_ops hwecc4_small_ooblayout_ops = {
478 	.ecc = hwecc4_ooblayout_small_ecc,
479 	.free = hwecc4_ooblayout_small_free,
480 };
481 
482 #if defined(CONFIG_OF)
483 static const struct of_device_id davinci_nand_of_match[] = {
484 	{.compatible = "ti,davinci-nand", },
485 	{.compatible = "ti,keystone-nand", },
486 	{},
487 };
488 MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
489 
490 static struct davinci_nand_pdata
nand_davinci_get_pdata(struct platform_device * pdev)491 	*nand_davinci_get_pdata(struct platform_device *pdev)
492 {
493 	if (!dev_get_platdata(&pdev->dev) && pdev->dev.of_node) {
494 		struct davinci_nand_pdata *pdata;
495 		const char *mode;
496 		u32 prop;
497 
498 		pdata =  devm_kzalloc(&pdev->dev,
499 				sizeof(struct davinci_nand_pdata),
500 				GFP_KERNEL);
501 		pdev->dev.platform_data = pdata;
502 		if (!pdata)
503 			return ERR_PTR(-ENOMEM);
504 		if (!of_property_read_u32(pdev->dev.of_node,
505 			"ti,davinci-chipselect", &prop))
506 			pdata->core_chipsel = prop;
507 		else
508 			return ERR_PTR(-EINVAL);
509 
510 		if (!of_property_read_u32(pdev->dev.of_node,
511 			"ti,davinci-mask-ale", &prop))
512 			pdata->mask_ale = prop;
513 		if (!of_property_read_u32(pdev->dev.of_node,
514 			"ti,davinci-mask-cle", &prop))
515 			pdata->mask_cle = prop;
516 		if (!of_property_read_u32(pdev->dev.of_node,
517 			"ti,davinci-mask-chipsel", &prop))
518 			pdata->mask_chipsel = prop;
519 		if (!of_property_read_string(pdev->dev.of_node,
520 			"ti,davinci-ecc-mode", &mode)) {
521 			if (!strncmp("none", mode, 4))
522 				pdata->engine_type = NAND_ECC_ENGINE_TYPE_NONE;
523 			if (!strncmp("soft", mode, 4))
524 				pdata->engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
525 			if (!strncmp("hw", mode, 2))
526 				pdata->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
527 		}
528 		if (!of_property_read_u32(pdev->dev.of_node,
529 			"ti,davinci-ecc-bits", &prop))
530 			pdata->ecc_bits = prop;
531 
532 		if (!of_property_read_u32(pdev->dev.of_node,
533 			"ti,davinci-nand-buswidth", &prop) && prop == 16)
534 			pdata->options |= NAND_BUSWIDTH_16;
535 
536 		if (of_property_read_bool(pdev->dev.of_node,
537 			"ti,davinci-nand-use-bbt"))
538 			pdata->bbt_options = NAND_BBT_USE_FLASH;
539 
540 		/*
541 		 * Since kernel v4.8, this driver has been fixed to enable
542 		 * use of 4-bit hardware ECC with subpages and verified on
543 		 * TI's keystone EVMs (K2L, K2HK and K2E).
544 		 * However, in the interest of not breaking systems using
545 		 * existing UBI partitions, sub-page writes are not being
546 		 * (re)enabled. If you want to use subpage writes on Keystone
547 		 * platforms (i.e. do not have any existing UBI partitions),
548 		 * then use "ti,davinci-nand" as the compatible in your
549 		 * device-tree file.
550 		 */
551 		if (of_device_is_compatible(pdev->dev.of_node,
552 					    "ti,keystone-nand")) {
553 			pdata->options |= NAND_NO_SUBPAGE_WRITE;
554 		}
555 	}
556 
557 	return dev_get_platdata(&pdev->dev);
558 }
559 #else
560 static struct davinci_nand_pdata
nand_davinci_get_pdata(struct platform_device * pdev)561 	*nand_davinci_get_pdata(struct platform_device *pdev)
562 {
563 	return dev_get_platdata(&pdev->dev);
564 }
565 #endif
566 
davinci_nand_attach_chip(struct nand_chip * chip)567 static int davinci_nand_attach_chip(struct nand_chip *chip)
568 {
569 	struct mtd_info *mtd = nand_to_mtd(chip);
570 	struct davinci_nand_info *info = to_davinci_nand(mtd);
571 	struct davinci_nand_pdata *pdata = nand_davinci_get_pdata(info->pdev);
572 	int ret = 0;
573 
574 	if (IS_ERR(pdata))
575 		return PTR_ERR(pdata);
576 
577 	/* Use board-specific ECC config */
578 	chip->ecc.engine_type = pdata->engine_type;
579 	chip->ecc.placement = pdata->ecc_placement;
580 
581 	switch (chip->ecc.engine_type) {
582 	case NAND_ECC_ENGINE_TYPE_NONE:
583 		pdata->ecc_bits = 0;
584 		break;
585 	case NAND_ECC_ENGINE_TYPE_SOFT:
586 		pdata->ecc_bits = 0;
587 		/*
588 		 * This driver expects Hamming based ECC when engine_type is set
589 		 * to NAND_ECC_ENGINE_TYPE_SOFT. Force ecc.algo to
590 		 * NAND_ECC_ALGO_HAMMING to avoid adding an extra ->ecc_algo
591 		 * field to davinci_nand_pdata.
592 		 */
593 		chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
594 		break;
595 	case NAND_ECC_ENGINE_TYPE_ON_HOST:
596 		if (pdata->ecc_bits == 4) {
597 			int chunks = mtd->writesize / 512;
598 
599 			if (!chunks || mtd->oobsize < 16) {
600 				dev_dbg(&info->pdev->dev, "too small\n");
601 				return -EINVAL;
602 			}
603 
604 			/*
605 			 * No sanity checks:  CPUs must support this,
606 			 * and the chips may not use NAND_BUSWIDTH_16.
607 			 */
608 
609 			/* No sharing 4-bit hardware between chipselects yet */
610 			spin_lock_irq(&davinci_nand_lock);
611 			if (ecc4_busy)
612 				ret = -EBUSY;
613 			else
614 				ecc4_busy = true;
615 			spin_unlock_irq(&davinci_nand_lock);
616 
617 			if (ret == -EBUSY)
618 				return ret;
619 
620 			chip->ecc.calculate = nand_davinci_calculate_4bit;
621 			chip->ecc.correct = nand_davinci_correct_4bit;
622 			chip->ecc.hwctl = nand_davinci_hwctl_4bit;
623 			chip->ecc.bytes = 10;
624 			chip->ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
625 			chip->ecc.algo = NAND_ECC_ALGO_BCH;
626 
627 			/*
628 			 * Update ECC layout if needed ... for 1-bit HW ECC, the
629 			 * default is OK, but it allocates 6 bytes when only 3
630 			 * are needed (for each 512 bytes). For 4-bit HW ECC,
631 			 * the default is not usable: 10 bytes needed, not 6.
632 			 *
633 			 * For small page chips, preserve the manufacturer's
634 			 * badblock marking data ... and make sure a flash BBT
635 			 * table marker fits in the free bytes.
636 			 */
637 			if (chunks == 1) {
638 				mtd_set_ooblayout(mtd,
639 						  &hwecc4_small_ooblayout_ops);
640 			} else if (chunks == 4 || chunks == 8) {
641 				mtd_set_ooblayout(mtd,
642 						  nand_get_large_page_ooblayout());
643 				chip->ecc.read_page = nand_read_page_hwecc_oob_first;
644 			} else {
645 				return -EIO;
646 			}
647 		} else {
648 			/* 1bit ecc hamming */
649 			chip->ecc.calculate = nand_davinci_calculate_1bit;
650 			chip->ecc.correct = nand_davinci_correct_1bit;
651 			chip->ecc.hwctl = nand_davinci_hwctl_1bit;
652 			chip->ecc.bytes = 3;
653 			chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
654 		}
655 		chip->ecc.size = 512;
656 		chip->ecc.strength = pdata->ecc_bits;
657 		break;
658 	default:
659 		return -EINVAL;
660 	}
661 
662 	return ret;
663 }
664 
nand_davinci_data_in(struct davinci_nand_info * info,void * buf,unsigned int len,bool force_8bit)665 static void nand_davinci_data_in(struct davinci_nand_info *info, void *buf,
666 				 unsigned int len, bool force_8bit)
667 {
668 	u32 alignment = ((uintptr_t)buf | len) & 3;
669 
670 	if (force_8bit || (alignment & 1))
671 		ioread8_rep(info->current_cs, buf, len);
672 	else if (alignment & 3)
673 		ioread16_rep(info->current_cs, buf, len >> 1);
674 	else
675 		ioread32_rep(info->current_cs, buf, len >> 2);
676 }
677 
nand_davinci_data_out(struct davinci_nand_info * info,const void * buf,unsigned int len,bool force_8bit)678 static void nand_davinci_data_out(struct davinci_nand_info *info,
679 				  const void *buf, unsigned int len,
680 				  bool force_8bit)
681 {
682 	u32 alignment = ((uintptr_t)buf | len) & 3;
683 
684 	if (force_8bit || (alignment & 1))
685 		iowrite8_rep(info->current_cs, buf, len);
686 	else if (alignment & 3)
687 		iowrite16_rep(info->current_cs, buf, len >> 1);
688 	else
689 		iowrite32_rep(info->current_cs, buf, len >> 2);
690 }
691 
davinci_nand_exec_instr(struct davinci_nand_info * info,const struct nand_op_instr * instr)692 static int davinci_nand_exec_instr(struct davinci_nand_info *info,
693 				   const struct nand_op_instr *instr)
694 {
695 	unsigned int i, timeout_us;
696 	u32 status;
697 	int ret;
698 
699 	switch (instr->type) {
700 	case NAND_OP_CMD_INSTR:
701 		iowrite8(instr->ctx.cmd.opcode,
702 			 info->current_cs + info->mask_cle);
703 		break;
704 
705 	case NAND_OP_ADDR_INSTR:
706 		for (i = 0; i < instr->ctx.addr.naddrs; i++) {
707 			iowrite8(instr->ctx.addr.addrs[i],
708 				 info->current_cs + info->mask_ale);
709 		}
710 		break;
711 
712 	case NAND_OP_DATA_IN_INSTR:
713 		nand_davinci_data_in(info, instr->ctx.data.buf.in,
714 				     instr->ctx.data.len,
715 				     instr->ctx.data.force_8bit);
716 		break;
717 
718 	case NAND_OP_DATA_OUT_INSTR:
719 		nand_davinci_data_out(info, instr->ctx.data.buf.out,
720 				      instr->ctx.data.len,
721 				      instr->ctx.data.force_8bit);
722 		break;
723 
724 	case NAND_OP_WAITRDY_INSTR:
725 		timeout_us = instr->ctx.waitrdy.timeout_ms * 1000;
726 		ret = readl_relaxed_poll_timeout(info->base + NANDFSR_OFFSET,
727 						 status, status & BIT(0), 100,
728 						 timeout_us);
729 		if (ret)
730 			return ret;
731 
732 		break;
733 	}
734 
735 	if (instr->delay_ns) {
736 		/* Dummy read to be sure that command is sent before ndelay starts */
737 		davinci_nand_readl(info, 0);
738 		ndelay(instr->delay_ns);
739 	}
740 
741 	return 0;
742 }
743 
davinci_nand_exec_op(struct nand_chip * chip,const struct nand_operation * op,bool check_only)744 static int davinci_nand_exec_op(struct nand_chip *chip,
745 				const struct nand_operation *op,
746 				bool check_only)
747 {
748 	struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
749 	unsigned int i;
750 
751 	if (check_only)
752 		return 0;
753 
754 	info->current_cs = info->vaddr + (op->cs * info->mask_chipsel);
755 
756 	for (i = 0; i < op->ninstrs; i++) {
757 		int ret;
758 
759 		ret = davinci_nand_exec_instr(info, &op->instrs[i]);
760 		if (ret)
761 			return ret;
762 	}
763 
764 	return 0;
765 }
766 
767 static const struct nand_controller_ops davinci_nand_controller_ops = {
768 	.attach_chip = davinci_nand_attach_chip,
769 	.exec_op = davinci_nand_exec_op,
770 };
771 
nand_davinci_probe(struct platform_device * pdev)772 static int nand_davinci_probe(struct platform_device *pdev)
773 {
774 	struct davinci_nand_pdata	*pdata;
775 	struct davinci_nand_info	*info;
776 	struct resource			*res1;
777 	struct resource			*res2;
778 	void __iomem			*vaddr;
779 	void __iomem			*base;
780 	int				ret;
781 	uint32_t			val;
782 	struct mtd_info			*mtd;
783 
784 	pdata = nand_davinci_get_pdata(pdev);
785 	if (IS_ERR(pdata))
786 		return PTR_ERR(pdata);
787 
788 	/* insist on board-specific configuration */
789 	if (!pdata)
790 		return -ENODEV;
791 
792 	/* which external chipselect will we be managing? */
793 	if (pdata->core_chipsel > 3)
794 		return -ENODEV;
795 
796 	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
797 	if (!info)
798 		return -ENOMEM;
799 
800 	platform_set_drvdata(pdev, info);
801 
802 	res1 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
803 	res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
804 	if (!res1 || !res2) {
805 		dev_err(&pdev->dev, "resource missing\n");
806 		return -EINVAL;
807 	}
808 
809 	vaddr = devm_ioremap_resource(&pdev->dev, res1);
810 	if (IS_ERR(vaddr))
811 		return PTR_ERR(vaddr);
812 
813 	/*
814 	 * This registers range is used to setup NAND settings. In case with
815 	 * TI AEMIF driver, the same memory address range is requested already
816 	 * by AEMIF, so we cannot request it twice, just ioremap.
817 	 * The AEMIF and NAND drivers not use the same registers in this range.
818 	 */
819 	base = devm_ioremap(&pdev->dev, res2->start, resource_size(res2));
820 	if (!base) {
821 		dev_err(&pdev->dev, "ioremap failed for resource %pR\n", res2);
822 		return -EADDRNOTAVAIL;
823 	}
824 
825 	info->pdev		= pdev;
826 	info->base		= base;
827 	info->vaddr		= vaddr;
828 
829 	mtd			= nand_to_mtd(&info->chip);
830 	mtd->dev.parent		= &pdev->dev;
831 	nand_set_flash_node(&info->chip, pdev->dev.of_node);
832 
833 	/* options such as NAND_BBT_USE_FLASH */
834 	info->chip.bbt_options	= pdata->bbt_options;
835 	/* options such as 16-bit widths */
836 	info->chip.options	= pdata->options;
837 	info->chip.bbt_td	= pdata->bbt_td;
838 	info->chip.bbt_md	= pdata->bbt_md;
839 
840 	info->current_cs	= info->vaddr;
841 	info->core_chipsel	= pdata->core_chipsel;
842 	info->mask_chipsel	= pdata->mask_chipsel;
843 
844 	/* use nandboot-capable ALE/CLE masks by default */
845 	info->mask_ale		= pdata->mask_ale ? : MASK_ALE;
846 	info->mask_cle		= pdata->mask_cle ? : MASK_CLE;
847 
848 	spin_lock_irq(&davinci_nand_lock);
849 
850 	/* put CSxNAND into NAND mode */
851 	val = davinci_nand_readl(info, NANDFCR_OFFSET);
852 	val |= BIT(info->core_chipsel);
853 	davinci_nand_writel(info, NANDFCR_OFFSET, val);
854 
855 	spin_unlock_irq(&davinci_nand_lock);
856 
857 	/* Scan to find existence of the device(s) */
858 	nand_controller_init(&info->controller);
859 	info->controller.ops = &davinci_nand_controller_ops;
860 	info->chip.controller = &info->controller;
861 	ret = nand_scan(&info->chip, pdata->mask_chipsel ? 2 : 1);
862 	if (ret < 0) {
863 		dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
864 		return ret;
865 	}
866 
867 	if (pdata->parts)
868 		ret = mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
869 	else
870 		ret = mtd_device_register(mtd, NULL, 0);
871 	if (ret < 0)
872 		goto err_cleanup_nand;
873 
874 	val = davinci_nand_readl(info, NRCSR_OFFSET);
875 	dev_info(&pdev->dev, "controller rev. %d.%d\n",
876 	       (val >> 8) & 0xff, val & 0xff);
877 
878 	return 0;
879 
880 err_cleanup_nand:
881 	nand_cleanup(&info->chip);
882 
883 	return ret;
884 }
885 
nand_davinci_remove(struct platform_device * pdev)886 static void nand_davinci_remove(struct platform_device *pdev)
887 {
888 	struct davinci_nand_info *info = platform_get_drvdata(pdev);
889 	struct nand_chip *chip = &info->chip;
890 	int ret;
891 
892 	spin_lock_irq(&davinci_nand_lock);
893 	if (chip->ecc.placement == NAND_ECC_PLACEMENT_INTERLEAVED)
894 		ecc4_busy = false;
895 	spin_unlock_irq(&davinci_nand_lock);
896 
897 	ret = mtd_device_unregister(nand_to_mtd(chip));
898 	WARN_ON(ret);
899 	nand_cleanup(chip);
900 }
901 
902 static struct platform_driver nand_davinci_driver = {
903 	.probe		= nand_davinci_probe,
904 	.remove_new	= nand_davinci_remove,
905 	.driver		= {
906 		.name	= "davinci_nand",
907 		.of_match_table = of_match_ptr(davinci_nand_of_match),
908 	},
909 };
910 MODULE_ALIAS("platform:davinci_nand");
911 
912 module_platform_driver(nand_davinci_driver);
913 
914 MODULE_LICENSE("GPL");
915 MODULE_AUTHOR("Texas Instruments");
916 MODULE_DESCRIPTION("Davinci NAND flash driver");
917 
918