xref: /linux/drivers/mtd/nand/onenand/onenand_omap2.c (revision a1c613ae4c322ddd58d5a8539dbfba2a0380a8c0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  OneNAND driver for OMAP2 / OMAP3
4  *
5  *  Copyright © 2005-2006 Nokia Corporation
6  *
7  *  Author: Jarkko Lavinen <jarkko.lavinen@nokia.com> and Juha Yrjölä
8  *  IRQ and DMA support written by Timo Teras
9  */
10 
11 #include <linux/device.h>
12 #include <linux/module.h>
13 #include <linux/mtd/mtd.h>
14 #include <linux/mtd/onenand.h>
15 #include <linux/mtd/partitions.h>
16 #include <linux/of.h>
17 #include <linux/omap-gpmc.h>
18 #include <linux/platform_device.h>
19 #include <linux/interrupt.h>
20 #include <linux/delay.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/dmaengine.h>
23 #include <linux/io.h>
24 #include <linux/slab.h>
25 #include <linux/gpio/consumer.h>
26 
27 #include <asm/mach/flash.h>
28 
29 #define DRIVER_NAME "omap2-onenand"
30 
31 #define ONENAND_BUFRAM_SIZE	(1024 * 5)
32 
33 struct omap2_onenand {
34 	struct platform_device *pdev;
35 	int gpmc_cs;
36 	unsigned long phys_base;
37 	struct gpio_desc *int_gpiod;
38 	struct mtd_info mtd;
39 	struct onenand_chip onenand;
40 	struct completion irq_done;
41 	struct completion dma_done;
42 	struct dma_chan *dma_chan;
43 };
44 
omap2_onenand_dma_complete_func(void * completion)45 static void omap2_onenand_dma_complete_func(void *completion)
46 {
47 	complete(completion);
48 }
49 
omap2_onenand_interrupt(int irq,void * dev_id)50 static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
51 {
52 	struct omap2_onenand *c = dev_id;
53 
54 	complete(&c->irq_done);
55 
56 	return IRQ_HANDLED;
57 }
58 
read_reg(struct omap2_onenand * c,int reg)59 static inline unsigned short read_reg(struct omap2_onenand *c, int reg)
60 {
61 	return readw(c->onenand.base + reg);
62 }
63 
write_reg(struct omap2_onenand * c,unsigned short value,int reg)64 static inline void write_reg(struct omap2_onenand *c, unsigned short value,
65 			     int reg)
66 {
67 	writew(value, c->onenand.base + reg);
68 }
69 
omap2_onenand_set_cfg(struct omap2_onenand * c,bool sr,bool sw,int latency,int burst_len)70 static int omap2_onenand_set_cfg(struct omap2_onenand *c,
71 				 bool sr, bool sw,
72 				 int latency, int burst_len)
73 {
74 	unsigned short reg = ONENAND_SYS_CFG1_RDY | ONENAND_SYS_CFG1_INT;
75 
76 	reg |= latency << ONENAND_SYS_CFG1_BRL_SHIFT;
77 
78 	switch (burst_len) {
79 	case 0:		/* continuous */
80 		break;
81 	case 4:
82 		reg |= ONENAND_SYS_CFG1_BL_4;
83 		break;
84 	case 8:
85 		reg |= ONENAND_SYS_CFG1_BL_8;
86 		break;
87 	case 16:
88 		reg |= ONENAND_SYS_CFG1_BL_16;
89 		break;
90 	case 32:
91 		reg |= ONENAND_SYS_CFG1_BL_32;
92 		break;
93 	default:
94 		return -EINVAL;
95 	}
96 
97 	if (latency > 5)
98 		reg |= ONENAND_SYS_CFG1_HF;
99 	if (latency > 7)
100 		reg |= ONENAND_SYS_CFG1_VHF;
101 	if (sr)
102 		reg |= ONENAND_SYS_CFG1_SYNC_READ;
103 	if (sw)
104 		reg |= ONENAND_SYS_CFG1_SYNC_WRITE;
105 
106 	write_reg(c, reg, ONENAND_REG_SYS_CFG1);
107 
108 	return 0;
109 }
110 
omap2_onenand_get_freq(int ver)111 static int omap2_onenand_get_freq(int ver)
112 {
113 	switch ((ver >> 4) & 0xf) {
114 	case 0:
115 		return 40;
116 	case 1:
117 		return 54;
118 	case 2:
119 		return 66;
120 	case 3:
121 		return 83;
122 	case 4:
123 		return 104;
124 	}
125 
126 	return -EINVAL;
127 }
128 
wait_err(char * msg,int state,unsigned int ctrl,unsigned int intr)129 static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
130 {
131 	printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
132 	       msg, state, ctrl, intr);
133 }
134 
wait_warn(char * msg,int state,unsigned int ctrl,unsigned int intr)135 static void wait_warn(char *msg, int state, unsigned int ctrl,
136 		      unsigned int intr)
137 {
138 	printk(KERN_WARNING "onenand_wait: %s! state %d ctrl 0x%04x "
139 	       "intr 0x%04x\n", msg, state, ctrl, intr);
140 }
141 
omap2_onenand_wait(struct mtd_info * mtd,int state)142 static int omap2_onenand_wait(struct mtd_info *mtd, int state)
143 {
144 	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
145 	struct onenand_chip *this = mtd->priv;
146 	unsigned int intr = 0;
147 	unsigned int ctrl, ctrl_mask;
148 	unsigned long timeout;
149 	u32 syscfg;
150 
151 	if (state == FL_RESETTING || state == FL_PREPARING_ERASE ||
152 	    state == FL_VERIFYING_ERASE) {
153 		int i = 21;
154 		unsigned int intr_flags = ONENAND_INT_MASTER;
155 
156 		switch (state) {
157 		case FL_RESETTING:
158 			intr_flags |= ONENAND_INT_RESET;
159 			break;
160 		case FL_PREPARING_ERASE:
161 			intr_flags |= ONENAND_INT_ERASE;
162 			break;
163 		case FL_VERIFYING_ERASE:
164 			i = 101;
165 			break;
166 		}
167 
168 		while (--i) {
169 			udelay(1);
170 			intr = read_reg(c, ONENAND_REG_INTERRUPT);
171 			if (intr & ONENAND_INT_MASTER)
172 				break;
173 		}
174 		ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
175 		if (ctrl & ONENAND_CTRL_ERROR) {
176 			wait_err("controller error", state, ctrl, intr);
177 			return -EIO;
178 		}
179 		if ((intr & intr_flags) == intr_flags)
180 			return 0;
181 		/* Continue in wait for interrupt branch */
182 	}
183 
184 	if (state != FL_READING) {
185 		int result;
186 
187 		/* Turn interrupts on */
188 		syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
189 		if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
190 			syscfg |= ONENAND_SYS_CFG1_IOBE;
191 			write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
192 			/* Add a delay to let GPIO settle */
193 			syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
194 		}
195 
196 		reinit_completion(&c->irq_done);
197 		result = gpiod_get_value(c->int_gpiod);
198 		if (result < 0) {
199 			ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
200 			intr = read_reg(c, ONENAND_REG_INTERRUPT);
201 			wait_err("gpio error", state, ctrl, intr);
202 			return result;
203 		} else if (result == 0) {
204 			int retry_cnt = 0;
205 retry:
206 			if (!wait_for_completion_io_timeout(&c->irq_done,
207 						msecs_to_jiffies(20))) {
208 				/* Timeout after 20ms */
209 				ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
210 				if (ctrl & ONENAND_CTRL_ONGO &&
211 				    !this->ongoing) {
212 					/*
213 					 * The operation seems to be still going
214 					 * so give it some more time.
215 					 */
216 					retry_cnt += 1;
217 					if (retry_cnt < 3)
218 						goto retry;
219 					intr = read_reg(c,
220 							ONENAND_REG_INTERRUPT);
221 					wait_err("timeout", state, ctrl, intr);
222 					return -EIO;
223 				}
224 				intr = read_reg(c, ONENAND_REG_INTERRUPT);
225 				if ((intr & ONENAND_INT_MASTER) == 0)
226 					wait_warn("timeout", state, ctrl, intr);
227 			}
228 		}
229 	} else {
230 		int retry_cnt = 0;
231 
232 		/* Turn interrupts off */
233 		syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
234 		syscfg &= ~ONENAND_SYS_CFG1_IOBE;
235 		write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
236 
237 		timeout = jiffies + msecs_to_jiffies(20);
238 		while (1) {
239 			if (time_before(jiffies, timeout)) {
240 				intr = read_reg(c, ONENAND_REG_INTERRUPT);
241 				if (intr & ONENAND_INT_MASTER)
242 					break;
243 			} else {
244 				/* Timeout after 20ms */
245 				ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
246 				if (ctrl & ONENAND_CTRL_ONGO) {
247 					/*
248 					 * The operation seems to be still going
249 					 * so give it some more time.
250 					 */
251 					retry_cnt += 1;
252 					if (retry_cnt < 3) {
253 						timeout = jiffies +
254 							  msecs_to_jiffies(20);
255 						continue;
256 					}
257 				}
258 				break;
259 			}
260 		}
261 	}
262 
263 	intr = read_reg(c, ONENAND_REG_INTERRUPT);
264 	ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
265 
266 	if (intr & ONENAND_INT_READ) {
267 		int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);
268 
269 		if (ecc) {
270 			unsigned int addr1, addr8;
271 
272 			addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
273 			addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
274 			if (ecc & ONENAND_ECC_2BIT_ALL) {
275 				printk(KERN_ERR "onenand_wait: ECC error = "
276 				       "0x%04x, addr1 %#x, addr8 %#x\n",
277 				       ecc, addr1, addr8);
278 				mtd->ecc_stats.failed++;
279 				return -EBADMSG;
280 			} else if (ecc & ONENAND_ECC_1BIT_ALL) {
281 				printk(KERN_NOTICE "onenand_wait: correctable "
282 				       "ECC error = 0x%04x, addr1 %#x, "
283 				       "addr8 %#x\n", ecc, addr1, addr8);
284 				mtd->ecc_stats.corrected++;
285 			}
286 		}
287 	} else if (state == FL_READING) {
288 		wait_err("timeout", state, ctrl, intr);
289 		return -EIO;
290 	}
291 
292 	if (ctrl & ONENAND_CTRL_ERROR) {
293 		wait_err("controller error", state, ctrl, intr);
294 		if (ctrl & ONENAND_CTRL_LOCK)
295 			printk(KERN_ERR "onenand_wait: "
296 					"Device is write protected!!!\n");
297 		return -EIO;
298 	}
299 
300 	ctrl_mask = 0xFE9F;
301 	if (this->ongoing)
302 		ctrl_mask &= ~0x8000;
303 
304 	if (ctrl & ctrl_mask)
305 		wait_warn("unexpected controller status", state, ctrl, intr);
306 
307 	return 0;
308 }
309 
omap2_onenand_bufferram_offset(struct mtd_info * mtd,int area)310 static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
311 {
312 	struct onenand_chip *this = mtd->priv;
313 
314 	if (ONENAND_CURRENT_BUFFERRAM(this)) {
315 		if (area == ONENAND_DATARAM)
316 			return this->writesize;
317 		if (area == ONENAND_SPARERAM)
318 			return mtd->oobsize;
319 	}
320 
321 	return 0;
322 }
323 
omap2_onenand_dma_transfer(struct omap2_onenand * c,dma_addr_t src,dma_addr_t dst,size_t count)324 static inline int omap2_onenand_dma_transfer(struct omap2_onenand *c,
325 					     dma_addr_t src, dma_addr_t dst,
326 					     size_t count)
327 {
328 	struct dma_async_tx_descriptor *tx;
329 	dma_cookie_t cookie;
330 
331 	tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count,
332 				       DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
333 	if (!tx) {
334 		dev_err(&c->pdev->dev, "Failed to prepare DMA memcpy\n");
335 		return -EIO;
336 	}
337 
338 	reinit_completion(&c->dma_done);
339 
340 	tx->callback = omap2_onenand_dma_complete_func;
341 	tx->callback_param = &c->dma_done;
342 
343 	cookie = tx->tx_submit(tx);
344 	if (dma_submit_error(cookie)) {
345 		dev_err(&c->pdev->dev, "Failed to do DMA tx_submit\n");
346 		return -EIO;
347 	}
348 
349 	dma_async_issue_pending(c->dma_chan);
350 
351 	if (!wait_for_completion_io_timeout(&c->dma_done,
352 					    msecs_to_jiffies(20))) {
353 		dmaengine_terminate_sync(c->dma_chan);
354 		return -ETIMEDOUT;
355 	}
356 
357 	return 0;
358 }
359 
omap2_onenand_read_bufferram(struct mtd_info * mtd,int area,unsigned char * buffer,int offset,size_t count)360 static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
361 					unsigned char *buffer, int offset,
362 					size_t count)
363 {
364 	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
365 	struct onenand_chip *this = mtd->priv;
366 	struct device *dev = &c->pdev->dev;
367 	void *buf = (void *)buffer;
368 	dma_addr_t dma_src, dma_dst;
369 	int bram_offset, err;
370 	size_t xtra;
371 
372 	bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
373 	/*
374 	 * If the buffer address is not DMA-able, len is not long enough to
375 	 * make DMA transfers profitable or if invoked from panic_write()
376 	 * fallback to PIO mode.
377 	 */
378 	if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
379 	    count < 384 || mtd->oops_panic_write)
380 		goto out_copy;
381 
382 	xtra = count & 3;
383 	if (xtra) {
384 		count -= xtra;
385 		memcpy(buf + count, this->base + bram_offset + count, xtra);
386 	}
387 
388 	dma_dst = dma_map_single(dev, buf, count, DMA_FROM_DEVICE);
389 	dma_src = c->phys_base + bram_offset;
390 
391 	if (dma_mapping_error(dev, dma_dst)) {
392 		dev_err(dev, "Couldn't DMA map a %d byte buffer\n", count);
393 		goto out_copy;
394 	}
395 
396 	err = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
397 	dma_unmap_single(dev, dma_dst, count, DMA_FROM_DEVICE);
398 	if (!err)
399 		return 0;
400 
401 	dev_err(dev, "timeout waiting for DMA\n");
402 
403 out_copy:
404 	memcpy(buf, this->base + bram_offset, count);
405 	return 0;
406 }
407 
omap2_onenand_write_bufferram(struct mtd_info * mtd,int area,const unsigned char * buffer,int offset,size_t count)408 static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
409 					 const unsigned char *buffer,
410 					 int offset, size_t count)
411 {
412 	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
413 	struct onenand_chip *this = mtd->priv;
414 	struct device *dev = &c->pdev->dev;
415 	void *buf = (void *)buffer;
416 	dma_addr_t dma_src, dma_dst;
417 	int bram_offset, err;
418 
419 	bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
420 	/*
421 	 * If the buffer address is not DMA-able, len is not long enough to
422 	 * make DMA transfers profitable or if invoked from panic_write()
423 	 * fallback to PIO mode.
424 	 */
425 	if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
426 	    count < 384 || mtd->oops_panic_write)
427 		goto out_copy;
428 
429 	dma_src = dma_map_single(dev, buf, count, DMA_TO_DEVICE);
430 	dma_dst = c->phys_base + bram_offset;
431 	if (dma_mapping_error(dev, dma_src)) {
432 		dev_err(dev, "Couldn't DMA map a %d byte buffer\n", count);
433 		goto out_copy;
434 	}
435 
436 	err = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
437 	dma_unmap_page(dev, dma_src, count, DMA_TO_DEVICE);
438 	if (!err)
439 		return 0;
440 
441 	dev_err(dev, "timeout waiting for DMA\n");
442 
443 out_copy:
444 	memcpy(this->base + bram_offset, buf, count);
445 	return 0;
446 }
447 
omap2_onenand_shutdown(struct platform_device * pdev)448 static void omap2_onenand_shutdown(struct platform_device *pdev)
449 {
450 	struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
451 
452 	/* With certain content in the buffer RAM, the OMAP boot ROM code
453 	 * can recognize the flash chip incorrectly. Zero it out before
454 	 * soft reset.
455 	 */
456 	memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
457 }
458 
omap2_onenand_probe(struct platform_device * pdev)459 static int omap2_onenand_probe(struct platform_device *pdev)
460 {
461 	u32 val;
462 	dma_cap_mask_t mask;
463 	int freq, latency, r;
464 	struct resource *res;
465 	struct omap2_onenand *c;
466 	struct gpmc_onenand_info info;
467 	struct device *dev = &pdev->dev;
468 	struct device_node *np = dev->of_node;
469 
470 	r = of_property_read_u32(np, "reg", &val);
471 	if (r) {
472 		dev_err(dev, "reg not found in DT\n");
473 		return r;
474 	}
475 
476 	c = devm_kzalloc(dev, sizeof(struct omap2_onenand), GFP_KERNEL);
477 	if (!c)
478 		return -ENOMEM;
479 
480 	init_completion(&c->irq_done);
481 	init_completion(&c->dma_done);
482 	c->gpmc_cs = val;
483 
484 	c->onenand.base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
485 	if (IS_ERR(c->onenand.base))
486 		return PTR_ERR(c->onenand.base);
487 	c->phys_base = res->start;
488 
489 	c->int_gpiod = devm_gpiod_get_optional(dev, "int", GPIOD_IN);
490 	if (IS_ERR(c->int_gpiod)) {
491 		/* Just try again if this happens */
492 		return dev_err_probe(dev, PTR_ERR(c->int_gpiod), "error getting gpio\n");
493 	}
494 
495 	if (c->int_gpiod) {
496 		r = devm_request_irq(dev, gpiod_to_irq(c->int_gpiod),
497 				     omap2_onenand_interrupt,
498 				     IRQF_TRIGGER_RISING, "onenand", c);
499 		if (r)
500 			return r;
501 
502 		c->onenand.wait = omap2_onenand_wait;
503 	}
504 
505 	dma_cap_zero(mask);
506 	dma_cap_set(DMA_MEMCPY, mask);
507 
508 	c->dma_chan = dma_request_channel(mask, NULL, NULL);
509 	if (c->dma_chan) {
510 		c->onenand.read_bufferram = omap2_onenand_read_bufferram;
511 		c->onenand.write_bufferram = omap2_onenand_write_bufferram;
512 	}
513 
514 	c->pdev = pdev;
515 	c->mtd.priv = &c->onenand;
516 	c->mtd.dev.parent = dev;
517 	mtd_set_of_node(&c->mtd, dev->of_node);
518 
519 	dev_info(dev, "initializing on CS%d (0x%08lx), va %p, %s mode\n",
520 		 c->gpmc_cs, c->phys_base, c->onenand.base,
521 		 c->dma_chan ? "DMA" : "PIO");
522 
523 	r = onenand_scan(&c->mtd, 1);
524 	if (r < 0)
525 		goto err_release_dma;
526 
527 	freq = omap2_onenand_get_freq(c->onenand.version_id);
528 	if (freq > 0) {
529 		switch (freq) {
530 		case 104:
531 			latency = 7;
532 			break;
533 		case 83:
534 			latency = 6;
535 			break;
536 		case 66:
537 			latency = 5;
538 			break;
539 		case 56:
540 			latency = 4;
541 			break;
542 		default:	/* 40 MHz or lower */
543 			latency = 3;
544 			break;
545 		}
546 
547 		r = gpmc_omap_onenand_set_timings(dev, c->gpmc_cs,
548 						  freq, latency, &info);
549 		if (r)
550 			goto err_release_onenand;
551 
552 		r = omap2_onenand_set_cfg(c, info.sync_read, info.sync_write,
553 					  latency, info.burst_len);
554 		if (r)
555 			goto err_release_onenand;
556 
557 		if (info.sync_read || info.sync_write)
558 			dev_info(dev, "optimized timings for %d MHz\n", freq);
559 	}
560 
561 	r = mtd_device_register(&c->mtd, NULL, 0);
562 	if (r)
563 		goto err_release_onenand;
564 
565 	platform_set_drvdata(pdev, c);
566 
567 	return 0;
568 
569 err_release_onenand:
570 	onenand_release(&c->mtd);
571 err_release_dma:
572 	if (c->dma_chan)
573 		dma_release_channel(c->dma_chan);
574 
575 	return r;
576 }
577 
omap2_onenand_remove(struct platform_device * pdev)578 static void omap2_onenand_remove(struct platform_device *pdev)
579 {
580 	struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
581 
582 	onenand_release(&c->mtd);
583 	if (c->dma_chan)
584 		dma_release_channel(c->dma_chan);
585 	omap2_onenand_shutdown(pdev);
586 }
587 
588 static const struct of_device_id omap2_onenand_id_table[] = {
589 	{ .compatible = "ti,omap2-onenand", },
590 	{},
591 };
592 MODULE_DEVICE_TABLE(of, omap2_onenand_id_table);
593 
594 static struct platform_driver omap2_onenand_driver = {
595 	.probe		= omap2_onenand_probe,
596 	.remove_new	= omap2_onenand_remove,
597 	.shutdown	= omap2_onenand_shutdown,
598 	.driver		= {
599 		.name	= DRIVER_NAME,
600 		.of_match_table = omap2_onenand_id_table,
601 	},
602 };
603 
604 module_platform_driver(omap2_onenand_driver);
605 
606 MODULE_ALIAS("platform:" DRIVER_NAME);
607 MODULE_LICENSE("GPL");
608 MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
609 MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");
610