xref: /linux/arch/arm/mach-omap1/omap-dma.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/arch/arm/plat-omap/dma.c
4  *
5  * Copyright (C) 2003 - 2008 Nokia Corporation
6  * Author: Juha Yrjölä <juha.yrjola@nokia.com>
7  * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com>
8  * Graphics DMA and LCD DMA graphics tranformations
9  * by Imre Deak <imre.deak@nokia.com>
10  * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc.
11  * Merged to support both OMAP1 and OMAP2 by Tony Lindgren <tony@atomide.com>
12  * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc.
13  *
14  * Copyright (C) 2009 Texas Instruments
15  * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
16  *
17  * Support functions for the OMAP internal DMA channels.
18  *
19  * Copyright (C) 2010 Texas Instruments Incorporated - https://www.ti.com/
20  * Converted DMA library into DMA platform driver.
21  *	- G, Manjunath Kondaiah <manjugk@ti.com>
22  */
23 
24 #include <linux/module.h>
25 #include <linux/init.h>
26 #include <linux/sched.h>
27 #include <linux/spinlock.h>
28 #include <linux/errno.h>
29 #include <linux/interrupt.h>
30 #include <linux/irq.h>
31 #include <linux/io.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 
35 #include <linux/omap-dma.h>
36 
37 #include <linux/soc/ti/omap1-io.h>
38 #include <linux/soc/ti/omap1-soc.h>
39 
40 #include "tc.h"
41 
42 /*
43  * MAX_LOGICAL_DMA_CH_COUNT: the maximum number of logical DMA
44  * channels that an instance of the SDMA IP block can support.  Used
45  * to size arrays.  (The actual maximum on a particular SoC may be less
46  * than this -- for example, OMAP1 SDMA instances only support 17 logical
47  * DMA channels.)
48  */
49 #define MAX_LOGICAL_DMA_CH_COUNT		32
50 
51 #undef DEBUG
52 
53 #define OMAP_DMA_ACTIVE			0x01
54 
55 #define OMAP_FUNC_MUX_ARM_BASE		(0xfffe1000 + 0xec)
56 
57 static struct omap_system_dma_plat_info *p;
58 static struct omap_dma_dev_attr *d;
59 static int enable_1510_mode;
60 static u32 errata;
61 
62 static int dma_lch_count;
63 static int dma_chan_count;
64 static int omap_dma_reserve_channels;
65 
66 static DEFINE_SPINLOCK(dma_chan_lock);
67 static struct omap_dma_lch *dma_chan;
68 
omap_disable_channel_irq(int lch)69 static inline void omap_disable_channel_irq(int lch)
70 {
71 	/* disable channel interrupts */
72 	p->dma_write(0, CICR, lch);
73 	/* Clear CSR */
74 	p->dma_read(CSR, lch);
75 }
76 
set_gdma_dev(int req,int dev)77 static inline void set_gdma_dev(int req, int dev)
78 {
79 	u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
80 	int shift = ((req - 1) % 5) * 6;
81 	u32 l;
82 
83 	l = omap_readl(reg);
84 	l &= ~(0x3f << shift);
85 	l |= (dev - 1) << shift;
86 	omap_writel(l, reg);
87 }
88 
89 #if IS_ENABLED(CONFIG_FB_OMAP)
omap_set_dma_priority(int lch,int dst_port,int priority)90 void omap_set_dma_priority(int lch, int dst_port, int priority)
91 {
92 	unsigned long reg;
93 	u32 l;
94 
95 	if (dma_omap1()) {
96 		switch (dst_port) {
97 		case OMAP_DMA_PORT_OCP_T1:	/* FFFECC00 */
98 			reg = OMAP_TC_OCPT1_PRIOR;
99 			break;
100 		case OMAP_DMA_PORT_OCP_T2:	/* FFFECCD0 */
101 			reg = OMAP_TC_OCPT2_PRIOR;
102 			break;
103 		case OMAP_DMA_PORT_EMIFF:	/* FFFECC08 */
104 			reg = OMAP_TC_EMIFF_PRIOR;
105 			break;
106 		case OMAP_DMA_PORT_EMIFS:	/* FFFECC04 */
107 			reg = OMAP_TC_EMIFS_PRIOR;
108 			break;
109 		default:
110 			BUG();
111 			return;
112 		}
113 		l = omap_readl(reg);
114 		l &= ~(0xf << 8);
115 		l |= (priority & 0xf) << 8;
116 		omap_writel(l, reg);
117 	}
118 }
119 EXPORT_SYMBOL(omap_set_dma_priority);
120 #endif
121 
122 #if IS_ENABLED(CONFIG_USB_OMAP)
123 #ifdef CONFIG_ARCH_OMAP15XX
124 /* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */
omap_dma_in_1510_mode(void)125 static int omap_dma_in_1510_mode(void)
126 {
127 	return enable_1510_mode;
128 }
129 #else
130 #define omap_dma_in_1510_mode()		0
131 #endif
132 
omap_set_dma_transfer_params(int lch,int data_type,int elem_count,int frame_count,int sync_mode,int dma_trigger,int src_or_dst_synch)133 void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
134 				  int frame_count, int sync_mode,
135 				  int dma_trigger, int src_or_dst_synch)
136 {
137 	u32 l;
138 	u16 ccr;
139 
140 	l = p->dma_read(CSDP, lch);
141 	l &= ~0x03;
142 	l |= data_type;
143 	p->dma_write(l, CSDP, lch);
144 
145 	ccr = p->dma_read(CCR, lch);
146 	ccr &= ~(1 << 5);
147 	if (sync_mode == OMAP_DMA_SYNC_FRAME)
148 		ccr |= 1 << 5;
149 	p->dma_write(ccr, CCR, lch);
150 
151 	ccr = p->dma_read(CCR2, lch);
152 	ccr &= ~(1 << 2);
153 	if (sync_mode == OMAP_DMA_SYNC_BLOCK)
154 		ccr |= 1 << 2;
155 	p->dma_write(ccr, CCR2, lch);
156 	p->dma_write(elem_count, CEN, lch);
157 	p->dma_write(frame_count, CFN, lch);
158 }
159 EXPORT_SYMBOL(omap_set_dma_transfer_params);
160 
omap_set_dma_channel_mode(int lch,enum omap_dma_channel_mode mode)161 void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode)
162 {
163 	if (!dma_omap15xx()) {
164 		u32 l;
165 
166 		l = p->dma_read(LCH_CTRL, lch);
167 		l &= ~0x7;
168 		l |= mode;
169 		p->dma_write(l, LCH_CTRL, lch);
170 	}
171 }
172 EXPORT_SYMBOL(omap_set_dma_channel_mode);
173 
174 /* Note that src_port is only for omap1 */
omap_set_dma_src_params(int lch,int src_port,int src_amode,unsigned long src_start,int src_ei,int src_fi)175 void omap_set_dma_src_params(int lch, int src_port, int src_amode,
176 			     unsigned long src_start,
177 			     int src_ei, int src_fi)
178 {
179 	u32 l;
180 	u16 w;
181 
182 	w = p->dma_read(CSDP, lch);
183 	w &= ~(0x1f << 2);
184 	w |= src_port << 2;
185 	p->dma_write(w, CSDP, lch);
186 
187 	l = p->dma_read(CCR, lch);
188 	l &= ~(0x03 << 12);
189 	l |= src_amode << 12;
190 	p->dma_write(l, CCR, lch);
191 
192 	p->dma_write(src_start, CSSA, lch);
193 
194 	p->dma_write(src_ei, CSEI, lch);
195 	p->dma_write(src_fi, CSFI, lch);
196 }
197 EXPORT_SYMBOL(omap_set_dma_src_params);
198 
omap_set_dma_src_data_pack(int lch,int enable)199 void omap_set_dma_src_data_pack(int lch, int enable)
200 {
201 	u32 l;
202 
203 	l = p->dma_read(CSDP, lch);
204 	l &= ~(1 << 6);
205 	if (enable)
206 		l |= (1 << 6);
207 	p->dma_write(l, CSDP, lch);
208 }
209 EXPORT_SYMBOL(omap_set_dma_src_data_pack);
210 
omap_set_dma_src_burst_mode(int lch,enum omap_dma_burst_mode burst_mode)211 void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
212 {
213 	unsigned int burst = 0;
214 	u32 l;
215 
216 	l = p->dma_read(CSDP, lch);
217 	l &= ~(0x03 << 7);
218 
219 	switch (burst_mode) {
220 	case OMAP_DMA_DATA_BURST_DIS:
221 		break;
222 	case OMAP_DMA_DATA_BURST_4:
223 		burst = 0x2;
224 		break;
225 	case OMAP_DMA_DATA_BURST_8:
226 		/*
227 		 * not supported by current hardware on OMAP1
228 		 * w |= (0x03 << 7);
229 		 */
230 		fallthrough;
231 	case OMAP_DMA_DATA_BURST_16:
232 		/* OMAP1 don't support burst 16 */
233 		fallthrough;
234 	default:
235 		BUG();
236 	}
237 
238 	l |= (burst << 7);
239 	p->dma_write(l, CSDP, lch);
240 }
241 EXPORT_SYMBOL(omap_set_dma_src_burst_mode);
242 
243 /* Note that dest_port is only for OMAP1 */
omap_set_dma_dest_params(int lch,int dest_port,int dest_amode,unsigned long dest_start,int dst_ei,int dst_fi)244 void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
245 			      unsigned long dest_start,
246 			      int dst_ei, int dst_fi)
247 {
248 	u32 l;
249 
250 	l = p->dma_read(CSDP, lch);
251 	l &= ~(0x1f << 9);
252 	l |= dest_port << 9;
253 	p->dma_write(l, CSDP, lch);
254 
255 	l = p->dma_read(CCR, lch);
256 	l &= ~(0x03 << 14);
257 	l |= dest_amode << 14;
258 	p->dma_write(l, CCR, lch);
259 
260 	p->dma_write(dest_start, CDSA, lch);
261 
262 	p->dma_write(dst_ei, CDEI, lch);
263 	p->dma_write(dst_fi, CDFI, lch);
264 }
265 EXPORT_SYMBOL(omap_set_dma_dest_params);
266 
omap_set_dma_dest_data_pack(int lch,int enable)267 void omap_set_dma_dest_data_pack(int lch, int enable)
268 {
269 	u32 l;
270 
271 	l = p->dma_read(CSDP, lch);
272 	l &= ~(1 << 13);
273 	if (enable)
274 		l |= 1 << 13;
275 	p->dma_write(l, CSDP, lch);
276 }
277 EXPORT_SYMBOL(omap_set_dma_dest_data_pack);
278 
omap_set_dma_dest_burst_mode(int lch,enum omap_dma_burst_mode burst_mode)279 void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
280 {
281 	unsigned int burst = 0;
282 	u32 l;
283 
284 	l = p->dma_read(CSDP, lch);
285 	l &= ~(0x03 << 14);
286 
287 	switch (burst_mode) {
288 	case OMAP_DMA_DATA_BURST_DIS:
289 		break;
290 	case OMAP_DMA_DATA_BURST_4:
291 		burst = 0x2;
292 		break;
293 	case OMAP_DMA_DATA_BURST_8:
294 		burst = 0x3;
295 		break;
296 	case OMAP_DMA_DATA_BURST_16:
297 		/* OMAP1 don't support burst 16 */
298 		fallthrough;
299 	default:
300 		printk(KERN_ERR "Invalid DMA burst mode\n");
301 		BUG();
302 		return;
303 	}
304 	l |= (burst << 14);
305 	p->dma_write(l, CSDP, lch);
306 }
307 EXPORT_SYMBOL(omap_set_dma_dest_burst_mode);
308 
omap_enable_channel_irq(int lch)309 static inline void omap_enable_channel_irq(int lch)
310 {
311 	/* Clear CSR */
312 	p->dma_read(CSR, lch);
313 
314 	/* Enable some nice interrupts. */
315 	p->dma_write(dma_chan[lch].enabled_irqs, CICR, lch);
316 }
317 
omap_disable_dma_irq(int lch,u16 bits)318 void omap_disable_dma_irq(int lch, u16 bits)
319 {
320 	dma_chan[lch].enabled_irqs &= ~bits;
321 }
322 EXPORT_SYMBOL(omap_disable_dma_irq);
323 
enable_lnk(int lch)324 static inline void enable_lnk(int lch)
325 {
326 	u32 l;
327 
328 	l = p->dma_read(CLNK_CTRL, lch);
329 
330 	l &= ~(1 << 14);
331 
332 	/* Set the ENABLE_LNK bits */
333 	if (dma_chan[lch].next_lch != -1)
334 		l = dma_chan[lch].next_lch | (1 << 15);
335 
336 	p->dma_write(l, CLNK_CTRL, lch);
337 }
338 
disable_lnk(int lch)339 static inline void disable_lnk(int lch)
340 {
341 	u32 l;
342 
343 	l = p->dma_read(CLNK_CTRL, lch);
344 
345 	/* Disable interrupts */
346 	omap_disable_channel_irq(lch);
347 
348 	/* Set the STOP_LNK bit */
349 	l |= 1 << 14;
350 
351 	p->dma_write(l, CLNK_CTRL, lch);
352 	dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
353 }
354 #endif
355 
omap_request_dma(int dev_id,const char * dev_name,void (* callback)(int lch,u16 ch_status,void * data),void * data,int * dma_ch_out)356 int omap_request_dma(int dev_id, const char *dev_name,
357 		     void (*callback)(int lch, u16 ch_status, void *data),
358 		     void *data, int *dma_ch_out)
359 {
360 	int ch, free_ch = -1;
361 	unsigned long flags;
362 	struct omap_dma_lch *chan;
363 
364 	WARN(strcmp(dev_name, "DMA engine"), "Using deprecated platform DMA API - please update to DMA engine");
365 
366 	spin_lock_irqsave(&dma_chan_lock, flags);
367 	for (ch = 0; ch < dma_chan_count; ch++) {
368 		if (free_ch == -1 && dma_chan[ch].dev_id == -1) {
369 			free_ch = ch;
370 			/* Exit after first free channel found */
371 			break;
372 		}
373 	}
374 	if (free_ch == -1) {
375 		spin_unlock_irqrestore(&dma_chan_lock, flags);
376 		return -EBUSY;
377 	}
378 	chan = dma_chan + free_ch;
379 	chan->dev_id = dev_id;
380 
381 	if (p->clear_lch_regs)
382 		p->clear_lch_regs(free_ch);
383 
384 	spin_unlock_irqrestore(&dma_chan_lock, flags);
385 
386 	chan->dev_name = dev_name;
387 	chan->callback = callback;
388 	chan->data = data;
389 	chan->flags = 0;
390 
391 	chan->enabled_irqs = OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
392 
393 	chan->enabled_irqs |= OMAP1_DMA_TOUT_IRQ;
394 
395 	if (dma_omap16xx()) {
396 		/* If the sync device is set, configure it dynamically. */
397 		if (dev_id != 0) {
398 			set_gdma_dev(free_ch + 1, dev_id);
399 			dev_id = free_ch + 1;
400 		}
401 		/*
402 		 * Disable the 1510 compatibility mode and set the sync device
403 		 * id.
404 		 */
405 		p->dma_write(dev_id | (1 << 10), CCR, free_ch);
406 	} else {
407 		p->dma_write(dev_id, CCR, free_ch);
408 	}
409 
410 	*dma_ch_out = free_ch;
411 
412 	return 0;
413 }
414 EXPORT_SYMBOL(omap_request_dma);
415 
omap_free_dma(int lch)416 void omap_free_dma(int lch)
417 {
418 	unsigned long flags;
419 
420 	if (dma_chan[lch].dev_id == -1) {
421 		pr_err("omap_dma: trying to free unallocated DMA channel %d\n",
422 		       lch);
423 		return;
424 	}
425 
426 	/* Disable all DMA interrupts for the channel. */
427 	omap_disable_channel_irq(lch);
428 
429 	/* Make sure the DMA transfer is stopped. */
430 	p->dma_write(0, CCR, lch);
431 
432 	spin_lock_irqsave(&dma_chan_lock, flags);
433 	dma_chan[lch].dev_id = -1;
434 	dma_chan[lch].next_lch = -1;
435 	dma_chan[lch].callback = NULL;
436 	spin_unlock_irqrestore(&dma_chan_lock, flags);
437 }
438 EXPORT_SYMBOL(omap_free_dma);
439 
440 /*
441  * Clears any DMA state so the DMA engine is ready to restart with new buffers
442  * through omap_start_dma(). Any buffers in flight are discarded.
443  */
omap_clear_dma(int lch)444 static void omap_clear_dma(int lch)
445 {
446 	unsigned long flags;
447 
448 	local_irq_save(flags);
449 	p->clear_dma(lch);
450 	local_irq_restore(flags);
451 }
452 
453 #if IS_ENABLED(CONFIG_USB_OMAP)
omap_start_dma(int lch)454 void omap_start_dma(int lch)
455 {
456 	u32 l;
457 
458 	/*
459 	 * The CPC/CDAC register needs to be initialized to zero
460 	 * before starting dma transfer.
461 	 */
462 	if (dma_omap15xx())
463 		p->dma_write(0, CPC, lch);
464 	else
465 		p->dma_write(0, CDAC, lch);
466 
467 	if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
468 		int next_lch, cur_lch;
469 		char dma_chan_link_map[MAX_LOGICAL_DMA_CH_COUNT];
470 
471 		/* Set the link register of the first channel */
472 		enable_lnk(lch);
473 
474 		memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
475 		dma_chan_link_map[lch] = 1;
476 
477 		cur_lch = dma_chan[lch].next_lch;
478 		do {
479 			next_lch = dma_chan[cur_lch].next_lch;
480 
481 			/* The loop case: we've been here already */
482 			if (dma_chan_link_map[cur_lch])
483 				break;
484 			/* Mark the current channel */
485 			dma_chan_link_map[cur_lch] = 1;
486 
487 			enable_lnk(cur_lch);
488 			omap_enable_channel_irq(cur_lch);
489 
490 			cur_lch = next_lch;
491 		} while (next_lch != -1);
492 	} else if (IS_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS))
493 		p->dma_write(lch, CLNK_CTRL, lch);
494 
495 	omap_enable_channel_irq(lch);
496 
497 	l = p->dma_read(CCR, lch);
498 
499 	if (IS_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING))
500 			l |= OMAP_DMA_CCR_BUFFERING_DISABLE;
501 	l |= OMAP_DMA_CCR_EN;
502 
503 	/*
504 	 * As dma_write() uses IO accessors which are weakly ordered, there
505 	 * is no guarantee that data in coherent DMA memory will be visible
506 	 * to the DMA device.  Add a memory barrier here to ensure that any
507 	 * such data is visible prior to enabling DMA.
508 	 */
509 	mb();
510 	p->dma_write(l, CCR, lch);
511 
512 	dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
513 }
514 EXPORT_SYMBOL(omap_start_dma);
515 
omap_stop_dma(int lch)516 void omap_stop_dma(int lch)
517 {
518 	u32 l;
519 
520 	/* Disable all interrupts on the channel */
521 	omap_disable_channel_irq(lch);
522 
523 	l = p->dma_read(CCR, lch);
524 	if (IS_DMA_ERRATA(DMA_ERRATA_i541) &&
525 			(l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) {
526 		int i = 0;
527 		u32 sys_cf;
528 
529 		/* Configure No-Standby */
530 		l = p->dma_read(OCP_SYSCONFIG, lch);
531 		sys_cf = l;
532 		l &= ~DMA_SYSCONFIG_MIDLEMODE_MASK;
533 		l |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
534 		p->dma_write(l , OCP_SYSCONFIG, 0);
535 
536 		l = p->dma_read(CCR, lch);
537 		l &= ~OMAP_DMA_CCR_EN;
538 		p->dma_write(l, CCR, lch);
539 
540 		/* Wait for sDMA FIFO drain */
541 		l = p->dma_read(CCR, lch);
542 		while (i < 100 && (l & (OMAP_DMA_CCR_RD_ACTIVE |
543 					OMAP_DMA_CCR_WR_ACTIVE))) {
544 			udelay(5);
545 			i++;
546 			l = p->dma_read(CCR, lch);
547 		}
548 		if (i >= 100)
549 			pr_err("DMA drain did not complete on lch %d\n", lch);
550 		/* Restore OCP_SYSCONFIG */
551 		p->dma_write(sys_cf, OCP_SYSCONFIG, lch);
552 	} else {
553 		l &= ~OMAP_DMA_CCR_EN;
554 		p->dma_write(l, CCR, lch);
555 	}
556 
557 	/*
558 	 * Ensure that data transferred by DMA is visible to any access
559 	 * after DMA has been disabled.  This is important for coherent
560 	 * DMA regions.
561 	 */
562 	mb();
563 
564 	if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
565 		int next_lch, cur_lch = lch;
566 		char dma_chan_link_map[MAX_LOGICAL_DMA_CH_COUNT];
567 
568 		memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
569 		do {
570 			/* The loop case: we've been here already */
571 			if (dma_chan_link_map[cur_lch])
572 				break;
573 			/* Mark the current channel */
574 			dma_chan_link_map[cur_lch] = 1;
575 
576 			disable_lnk(cur_lch);
577 
578 			next_lch = dma_chan[cur_lch].next_lch;
579 			cur_lch = next_lch;
580 		} while (next_lch != -1);
581 	}
582 
583 	dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
584 }
585 EXPORT_SYMBOL(omap_stop_dma);
586 
587 /*
588  * Allows changing the DMA callback function or data. This may be needed if
589  * the driver shares a single DMA channel for multiple dma triggers.
590  */
591 /*
592  * Returns current physical source address for the given DMA channel.
593  * If the channel is running the caller must disable interrupts prior calling
594  * this function and process the returned value before re-enabling interrupt to
595  * prevent races with the interrupt handler. Note that in continuous mode there
596  * is a chance for CSSA_L register overflow between the two reads resulting
597  * in incorrect return value.
598  */
omap_get_dma_src_pos(int lch)599 dma_addr_t omap_get_dma_src_pos(int lch)
600 {
601 	dma_addr_t offset = 0;
602 
603 	if (dma_omap15xx())
604 		offset = p->dma_read(CPC, lch);
605 	else
606 		offset = p->dma_read(CSAC, lch);
607 
608 	if (IS_DMA_ERRATA(DMA_ERRATA_3_3) && offset == 0)
609 		offset = p->dma_read(CSAC, lch);
610 
611 	if (!dma_omap15xx()) {
612 		/*
613 		 * CDAC == 0 indicates that the DMA transfer on the channel has
614 		 * not been started (no data has been transferred so far).
615 		 * Return the programmed source start address in this case.
616 		 */
617 		if (likely(p->dma_read(CDAC, lch)))
618 			offset = p->dma_read(CSAC, lch);
619 		else
620 			offset = p->dma_read(CSSA, lch);
621 	}
622 
623 	offset |= (p->dma_read(CSSA, lch) & 0xFFFF0000);
624 
625 	return offset;
626 }
627 EXPORT_SYMBOL(omap_get_dma_src_pos);
628 
629 /*
630  * Returns current physical destination address for the given DMA channel.
631  * If the channel is running the caller must disable interrupts prior calling
632  * this function and process the returned value before re-enabling interrupt to
633  * prevent races with the interrupt handler. Note that in continuous mode there
634  * is a chance for CDSA_L register overflow between the two reads resulting
635  * in incorrect return value.
636  */
omap_get_dma_dst_pos(int lch)637 dma_addr_t omap_get_dma_dst_pos(int lch)
638 {
639 	dma_addr_t offset = 0;
640 
641 	if (dma_omap15xx())
642 		offset = p->dma_read(CPC, lch);
643 	else
644 		offset = p->dma_read(CDAC, lch);
645 
646 	/*
647 	 * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
648 	 * read before the DMA controller finished disabling the channel.
649 	 */
650 	if (!dma_omap15xx() && offset == 0) {
651 		offset = p->dma_read(CDAC, lch);
652 		/*
653 		 * CDAC == 0 indicates that the DMA transfer on the channel has
654 		 * not been started (no data has been transferred so far).
655 		 * Return the programmed destination start address in this case.
656 		 */
657 		if (unlikely(!offset))
658 			offset = p->dma_read(CDSA, lch);
659 	}
660 
661 	offset |= (p->dma_read(CDSA, lch) & 0xFFFF0000);
662 
663 	return offset;
664 }
665 EXPORT_SYMBOL(omap_get_dma_dst_pos);
666 
omap_get_dma_active_status(int lch)667 int omap_get_dma_active_status(int lch)
668 {
669 	return (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN) != 0;
670 }
671 EXPORT_SYMBOL(omap_get_dma_active_status);
672 #endif
673 
omap_dma_running(void)674 int omap_dma_running(void)
675 {
676 	int lch;
677 
678 	if (omap_lcd_dma_running())
679 		return 1;
680 
681 	for (lch = 0; lch < dma_chan_count; lch++)
682 		if (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN)
683 			return 1;
684 
685 	return 0;
686 }
687 
688 /*----------------------------------------------------------------------------*/
689 
omap1_dma_handle_ch(int ch)690 static int omap1_dma_handle_ch(int ch)
691 {
692 	u32 csr;
693 
694 	if (enable_1510_mode && ch >= 6) {
695 		csr = dma_chan[ch].saved_csr;
696 		dma_chan[ch].saved_csr = 0;
697 	} else
698 		csr = p->dma_read(CSR, ch);
699 	if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
700 		dma_chan[ch + 6].saved_csr = csr >> 7;
701 		csr &= 0x7f;
702 	}
703 	if ((csr & 0x3f) == 0)
704 		return 0;
705 	if (unlikely(dma_chan[ch].dev_id == -1)) {
706 		pr_warn("Spurious interrupt from DMA channel %d (CSR %04x)\n",
707 			ch, csr);
708 		return 0;
709 	}
710 	if (unlikely(csr & OMAP1_DMA_TOUT_IRQ))
711 		pr_warn("DMA timeout with device %d\n", dma_chan[ch].dev_id);
712 	if (unlikely(csr & OMAP_DMA_DROP_IRQ))
713 		pr_warn("DMA synchronization event drop occurred with device %d\n",
714 			dma_chan[ch].dev_id);
715 	if (likely(csr & OMAP_DMA_BLOCK_IRQ))
716 		dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
717 	if (likely(dma_chan[ch].callback != NULL))
718 		dma_chan[ch].callback(ch, csr, dma_chan[ch].data);
719 
720 	return 1;
721 }
722 
omap1_dma_irq_handler(int irq,void * dev_id)723 static irqreturn_t omap1_dma_irq_handler(int irq, void *dev_id)
724 {
725 	int ch = ((int) dev_id) - 1;
726 	int handled = 0;
727 
728 	for (;;) {
729 		int handled_now = 0;
730 
731 		handled_now += omap1_dma_handle_ch(ch);
732 		if (enable_1510_mode && dma_chan[ch + 6].saved_csr)
733 			handled_now += omap1_dma_handle_ch(ch + 6);
734 		if (!handled_now)
735 			break;
736 		handled += handled_now;
737 	}
738 
739 	return handled ? IRQ_HANDLED : IRQ_NONE;
740 }
741 
omap_get_plat_info(void)742 struct omap_system_dma_plat_info *omap_get_plat_info(void)
743 {
744 	return p;
745 }
746 EXPORT_SYMBOL_GPL(omap_get_plat_info);
747 
omap_system_dma_probe(struct platform_device * pdev)748 static int omap_system_dma_probe(struct platform_device *pdev)
749 {
750 	int ch, ret = 0;
751 	int dma_irq;
752 	char irq_name[4];
753 
754 	p = pdev->dev.platform_data;
755 	if (!p) {
756 		dev_err(&pdev->dev,
757 			"%s: System DMA initialized without platform data\n",
758 			__func__);
759 		return -EINVAL;
760 	}
761 
762 	d			= p->dma_attr;
763 	errata			= p->errata;
764 
765 	if ((d->dev_caps & RESERVE_CHANNEL) && omap_dma_reserve_channels
766 			&& (omap_dma_reserve_channels < d->lch_count))
767 		d->lch_count	= omap_dma_reserve_channels;
768 
769 	dma_lch_count		= d->lch_count;
770 	dma_chan_count		= dma_lch_count;
771 	enable_1510_mode	= d->dev_caps & ENABLE_1510_MODE;
772 
773 	dma_chan = devm_kcalloc(&pdev->dev, dma_lch_count,
774 				sizeof(*dma_chan), GFP_KERNEL);
775 	if (!dma_chan)
776 		return -ENOMEM;
777 
778 	for (ch = 0; ch < dma_chan_count; ch++) {
779 		omap_clear_dma(ch);
780 
781 		dma_chan[ch].dev_id = -1;
782 		dma_chan[ch].next_lch = -1;
783 
784 		if (ch >= 6 && enable_1510_mode)
785 			continue;
786 
787 		/*
788 		 * request_irq() doesn't like dev_id (ie. ch) being
789 		 * zero, so we have to kludge around this.
790 		 */
791 		sprintf(&irq_name[0], "%d", ch);
792 		dma_irq = platform_get_irq_byname(pdev, irq_name);
793 
794 		if (dma_irq < 0) {
795 			ret = dma_irq;
796 			goto exit_dma_irq_fail;
797 		}
798 
799 		/* INT_DMA_LCD is handled in lcd_dma.c */
800 		if (dma_irq == INT_DMA_LCD)
801 			continue;
802 
803 		ret = request_irq(dma_irq,
804 				omap1_dma_irq_handler, 0, "DMA",
805 				(void *) (ch + 1));
806 		if (ret != 0)
807 			goto exit_dma_irq_fail;
808 	}
809 
810 	/* reserve dma channels 0 and 1 in high security devices on 34xx */
811 	if (d->dev_caps & HS_CHANNELS_RESERVED) {
812 		pr_info("Reserving DMA channels 0 and 1 for HS ROM code\n");
813 		dma_chan[0].dev_id = 0;
814 		dma_chan[1].dev_id = 1;
815 	}
816 	p->show_dma_caps();
817 	return 0;
818 
819 exit_dma_irq_fail:
820 	return ret;
821 }
822 
omap_system_dma_remove(struct platform_device * pdev)823 static void omap_system_dma_remove(struct platform_device *pdev)
824 {
825 	int dma_irq, irq_rel = 0;
826 
827 	for ( ; irq_rel < dma_chan_count; irq_rel++) {
828 		dma_irq = platform_get_irq(pdev, irq_rel);
829 		free_irq(dma_irq, (void *)(irq_rel + 1));
830 	}
831 }
832 
833 static struct platform_driver omap_system_dma_driver = {
834 	.probe		= omap_system_dma_probe,
835 	.remove_new	= omap_system_dma_remove,
836 	.driver		= {
837 		.name	= "omap_dma_system"
838 	},
839 };
840 
omap_system_dma_init(void)841 static int __init omap_system_dma_init(void)
842 {
843 	return platform_driver_register(&omap_system_dma_driver);
844 }
845 arch_initcall(omap_system_dma_init);
846 
omap_system_dma_exit(void)847 static void __exit omap_system_dma_exit(void)
848 {
849 	platform_driver_unregister(&omap_system_dma_driver);
850 }
851 
852 MODULE_DESCRIPTION("OMAP SYSTEM DMA DRIVER");
853 MODULE_LICENSE("GPL");
854 MODULE_AUTHOR("Texas Instruments Inc");
855 
856 /*
857  * Reserve the omap SDMA channels using cmdline bootarg
858  * "omap_dma_reserve_ch=". The valid range is 1 to 32
859  */
omap_dma_cmdline_reserve_ch(char * str)860 static int __init omap_dma_cmdline_reserve_ch(char *str)
861 {
862 	if (get_option(&str, &omap_dma_reserve_channels) != 1)
863 		omap_dma_reserve_channels = 0;
864 	return 1;
865 }
866 
867 __setup("omap_dma_reserve_ch=", omap_dma_cmdline_reserve_ch);
868 
869 
870