xref: /linux/arch/arm/mach-omap1/omap-dma.c (revision 4b132aacb0768ac1e652cf517097ea6f237214b9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/arch/arm/plat-omap/dma.c
4  *
5  * Copyright (C) 2003 - 2008 Nokia Corporation
6  * Author: Juha Yrjölä <juha.yrjola@nokia.com>
7  * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com>
8  * Graphics DMA and LCD DMA graphics tranformations
9  * by Imre Deak <imre.deak@nokia.com>
10  * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc.
11  * Merged to support both OMAP1 and OMAP2 by Tony Lindgren <tony@atomide.com>
12  * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc.
13  *
14  * Copyright (C) 2009 Texas Instruments
15  * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
16  *
17  * Support functions for the OMAP internal DMA channels.
18  *
19  * Copyright (C) 2010 Texas Instruments Incorporated - https://www.ti.com/
20  * Converted DMA library into DMA platform driver.
21  *	- G, Manjunath Kondaiah <manjugk@ti.com>
22  */
23 
24 #include <linux/module.h>
25 #include <linux/init.h>
26 #include <linux/sched.h>
27 #include <linux/spinlock.h>
28 #include <linux/errno.h>
29 #include <linux/interrupt.h>
30 #include <linux/irq.h>
31 #include <linux/io.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 
35 #include <linux/omap-dma.h>
36 
37 #include <linux/soc/ti/omap1-io.h>
38 #include <linux/soc/ti/omap1-soc.h>
39 
40 #include "tc.h"
41 
42 /*
43  * MAX_LOGICAL_DMA_CH_COUNT: the maximum number of logical DMA
44  * channels that an instance of the SDMA IP block can support.  Used
45  * to size arrays.  (The actual maximum on a particular SoC may be less
46  * than this -- for example, OMAP1 SDMA instances only support 17 logical
47  * DMA channels.)
48  */
49 #define MAX_LOGICAL_DMA_CH_COUNT		32
50 
51 #undef DEBUG
52 
53 #define OMAP_DMA_ACTIVE			0x01
54 
55 #define OMAP_FUNC_MUX_ARM_BASE		(0xfffe1000 + 0xec)
56 
57 static struct omap_system_dma_plat_info *p;
58 static struct omap_dma_dev_attr *d;
59 static int enable_1510_mode;
60 static u32 errata;
61 
62 struct dma_link_info {
63 	int *linked_dmach_q;
64 	int no_of_lchs_linked;
65 
66 	int q_count;
67 	int q_tail;
68 	int q_head;
69 
70 	int chain_state;
71 	int chain_mode;
72 
73 };
74 
75 static int dma_lch_count;
76 static int dma_chan_count;
77 static int omap_dma_reserve_channels;
78 
79 static DEFINE_SPINLOCK(dma_chan_lock);
80 static struct omap_dma_lch *dma_chan;
81 
82 static inline void omap_disable_channel_irq(int lch)
83 {
84 	/* disable channel interrupts */
85 	p->dma_write(0, CICR, lch);
86 	/* Clear CSR */
87 	p->dma_read(CSR, lch);
88 }
89 
90 static inline void set_gdma_dev(int req, int dev)
91 {
92 	u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
93 	int shift = ((req - 1) % 5) * 6;
94 	u32 l;
95 
96 	l = omap_readl(reg);
97 	l &= ~(0x3f << shift);
98 	l |= (dev - 1) << shift;
99 	omap_writel(l, reg);
100 }
101 
102 #if IS_ENABLED(CONFIG_FB_OMAP)
103 void omap_set_dma_priority(int lch, int dst_port, int priority)
104 {
105 	unsigned long reg;
106 	u32 l;
107 
108 	if (dma_omap1()) {
109 		switch (dst_port) {
110 		case OMAP_DMA_PORT_OCP_T1:	/* FFFECC00 */
111 			reg = OMAP_TC_OCPT1_PRIOR;
112 			break;
113 		case OMAP_DMA_PORT_OCP_T2:	/* FFFECCD0 */
114 			reg = OMAP_TC_OCPT2_PRIOR;
115 			break;
116 		case OMAP_DMA_PORT_EMIFF:	/* FFFECC08 */
117 			reg = OMAP_TC_EMIFF_PRIOR;
118 			break;
119 		case OMAP_DMA_PORT_EMIFS:	/* FFFECC04 */
120 			reg = OMAP_TC_EMIFS_PRIOR;
121 			break;
122 		default:
123 			BUG();
124 			return;
125 		}
126 		l = omap_readl(reg);
127 		l &= ~(0xf << 8);
128 		l |= (priority & 0xf) << 8;
129 		omap_writel(l, reg);
130 	}
131 }
132 EXPORT_SYMBOL(omap_set_dma_priority);
133 #endif
134 
135 #if IS_ENABLED(CONFIG_USB_OMAP)
136 #ifdef CONFIG_ARCH_OMAP15XX
137 /* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */
138 static int omap_dma_in_1510_mode(void)
139 {
140 	return enable_1510_mode;
141 }
142 #else
143 #define omap_dma_in_1510_mode()		0
144 #endif
145 
146 void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
147 				  int frame_count, int sync_mode,
148 				  int dma_trigger, int src_or_dst_synch)
149 {
150 	u32 l;
151 	u16 ccr;
152 
153 	l = p->dma_read(CSDP, lch);
154 	l &= ~0x03;
155 	l |= data_type;
156 	p->dma_write(l, CSDP, lch);
157 
158 	ccr = p->dma_read(CCR, lch);
159 	ccr &= ~(1 << 5);
160 	if (sync_mode == OMAP_DMA_SYNC_FRAME)
161 		ccr |= 1 << 5;
162 	p->dma_write(ccr, CCR, lch);
163 
164 	ccr = p->dma_read(CCR2, lch);
165 	ccr &= ~(1 << 2);
166 	if (sync_mode == OMAP_DMA_SYNC_BLOCK)
167 		ccr |= 1 << 2;
168 	p->dma_write(ccr, CCR2, lch);
169 	p->dma_write(elem_count, CEN, lch);
170 	p->dma_write(frame_count, CFN, lch);
171 }
172 EXPORT_SYMBOL(omap_set_dma_transfer_params);
173 
174 void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode)
175 {
176 	if (!dma_omap15xx()) {
177 		u32 l;
178 
179 		l = p->dma_read(LCH_CTRL, lch);
180 		l &= ~0x7;
181 		l |= mode;
182 		p->dma_write(l, LCH_CTRL, lch);
183 	}
184 }
185 EXPORT_SYMBOL(omap_set_dma_channel_mode);
186 
187 /* Note that src_port is only for omap1 */
188 void omap_set_dma_src_params(int lch, int src_port, int src_amode,
189 			     unsigned long src_start,
190 			     int src_ei, int src_fi)
191 {
192 	u32 l;
193 	u16 w;
194 
195 	w = p->dma_read(CSDP, lch);
196 	w &= ~(0x1f << 2);
197 	w |= src_port << 2;
198 	p->dma_write(w, CSDP, lch);
199 
200 	l = p->dma_read(CCR, lch);
201 	l &= ~(0x03 << 12);
202 	l |= src_amode << 12;
203 	p->dma_write(l, CCR, lch);
204 
205 	p->dma_write(src_start, CSSA, lch);
206 
207 	p->dma_write(src_ei, CSEI, lch);
208 	p->dma_write(src_fi, CSFI, lch);
209 }
210 EXPORT_SYMBOL(omap_set_dma_src_params);
211 
212 void omap_set_dma_src_data_pack(int lch, int enable)
213 {
214 	u32 l;
215 
216 	l = p->dma_read(CSDP, lch);
217 	l &= ~(1 << 6);
218 	if (enable)
219 		l |= (1 << 6);
220 	p->dma_write(l, CSDP, lch);
221 }
222 EXPORT_SYMBOL(omap_set_dma_src_data_pack);
223 
224 void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
225 {
226 	unsigned int burst = 0;
227 	u32 l;
228 
229 	l = p->dma_read(CSDP, lch);
230 	l &= ~(0x03 << 7);
231 
232 	switch (burst_mode) {
233 	case OMAP_DMA_DATA_BURST_DIS:
234 		break;
235 	case OMAP_DMA_DATA_BURST_4:
236 		burst = 0x2;
237 		break;
238 	case OMAP_DMA_DATA_BURST_8:
239 		/*
240 		 * not supported by current hardware on OMAP1
241 		 * w |= (0x03 << 7);
242 		 */
243 		fallthrough;
244 	case OMAP_DMA_DATA_BURST_16:
245 		/* OMAP1 don't support burst 16 */
246 		fallthrough;
247 	default:
248 		BUG();
249 	}
250 
251 	l |= (burst << 7);
252 	p->dma_write(l, CSDP, lch);
253 }
254 EXPORT_SYMBOL(omap_set_dma_src_burst_mode);
255 
256 /* Note that dest_port is only for OMAP1 */
257 void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
258 			      unsigned long dest_start,
259 			      int dst_ei, int dst_fi)
260 {
261 	u32 l;
262 
263 	l = p->dma_read(CSDP, lch);
264 	l &= ~(0x1f << 9);
265 	l |= dest_port << 9;
266 	p->dma_write(l, CSDP, lch);
267 
268 	l = p->dma_read(CCR, lch);
269 	l &= ~(0x03 << 14);
270 	l |= dest_amode << 14;
271 	p->dma_write(l, CCR, lch);
272 
273 	p->dma_write(dest_start, CDSA, lch);
274 
275 	p->dma_write(dst_ei, CDEI, lch);
276 	p->dma_write(dst_fi, CDFI, lch);
277 }
278 EXPORT_SYMBOL(omap_set_dma_dest_params);
279 
280 void omap_set_dma_dest_data_pack(int lch, int enable)
281 {
282 	u32 l;
283 
284 	l = p->dma_read(CSDP, lch);
285 	l &= ~(1 << 13);
286 	if (enable)
287 		l |= 1 << 13;
288 	p->dma_write(l, CSDP, lch);
289 }
290 EXPORT_SYMBOL(omap_set_dma_dest_data_pack);
291 
292 void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
293 {
294 	unsigned int burst = 0;
295 	u32 l;
296 
297 	l = p->dma_read(CSDP, lch);
298 	l &= ~(0x03 << 14);
299 
300 	switch (burst_mode) {
301 	case OMAP_DMA_DATA_BURST_DIS:
302 		break;
303 	case OMAP_DMA_DATA_BURST_4:
304 		burst = 0x2;
305 		break;
306 	case OMAP_DMA_DATA_BURST_8:
307 		burst = 0x3;
308 		break;
309 	case OMAP_DMA_DATA_BURST_16:
310 		/* OMAP1 don't support burst 16 */
311 		fallthrough;
312 	default:
313 		printk(KERN_ERR "Invalid DMA burst mode\n");
314 		BUG();
315 		return;
316 	}
317 	l |= (burst << 14);
318 	p->dma_write(l, CSDP, lch);
319 }
320 EXPORT_SYMBOL(omap_set_dma_dest_burst_mode);
321 
322 static inline void omap_enable_channel_irq(int lch)
323 {
324 	/* Clear CSR */
325 	p->dma_read(CSR, lch);
326 
327 	/* Enable some nice interrupts. */
328 	p->dma_write(dma_chan[lch].enabled_irqs, CICR, lch);
329 }
330 
331 void omap_disable_dma_irq(int lch, u16 bits)
332 {
333 	dma_chan[lch].enabled_irqs &= ~bits;
334 }
335 EXPORT_SYMBOL(omap_disable_dma_irq);
336 
337 static inline void enable_lnk(int lch)
338 {
339 	u32 l;
340 
341 	l = p->dma_read(CLNK_CTRL, lch);
342 
343 	l &= ~(1 << 14);
344 
345 	/* Set the ENABLE_LNK bits */
346 	if (dma_chan[lch].next_lch != -1)
347 		l = dma_chan[lch].next_lch | (1 << 15);
348 
349 	p->dma_write(l, CLNK_CTRL, lch);
350 }
351 
352 static inline void disable_lnk(int lch)
353 {
354 	u32 l;
355 
356 	l = p->dma_read(CLNK_CTRL, lch);
357 
358 	/* Disable interrupts */
359 	omap_disable_channel_irq(lch);
360 
361 	/* Set the STOP_LNK bit */
362 	l |= 1 << 14;
363 
364 	p->dma_write(l, CLNK_CTRL, lch);
365 	dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
366 }
367 #endif
368 
369 int omap_request_dma(int dev_id, const char *dev_name,
370 		     void (*callback)(int lch, u16 ch_status, void *data),
371 		     void *data, int *dma_ch_out)
372 {
373 	int ch, free_ch = -1;
374 	unsigned long flags;
375 	struct omap_dma_lch *chan;
376 
377 	WARN(strcmp(dev_name, "DMA engine"), "Using deprecated platform DMA API - please update to DMA engine");
378 
379 	spin_lock_irqsave(&dma_chan_lock, flags);
380 	for (ch = 0; ch < dma_chan_count; ch++) {
381 		if (free_ch == -1 && dma_chan[ch].dev_id == -1) {
382 			free_ch = ch;
383 			/* Exit after first free channel found */
384 			break;
385 		}
386 	}
387 	if (free_ch == -1) {
388 		spin_unlock_irqrestore(&dma_chan_lock, flags);
389 		return -EBUSY;
390 	}
391 	chan = dma_chan + free_ch;
392 	chan->dev_id = dev_id;
393 
394 	if (p->clear_lch_regs)
395 		p->clear_lch_regs(free_ch);
396 
397 	spin_unlock_irqrestore(&dma_chan_lock, flags);
398 
399 	chan->dev_name = dev_name;
400 	chan->callback = callback;
401 	chan->data = data;
402 	chan->flags = 0;
403 
404 	chan->enabled_irqs = OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
405 
406 	chan->enabled_irqs |= OMAP1_DMA_TOUT_IRQ;
407 
408 	if (dma_omap16xx()) {
409 		/* If the sync device is set, configure it dynamically. */
410 		if (dev_id != 0) {
411 			set_gdma_dev(free_ch + 1, dev_id);
412 			dev_id = free_ch + 1;
413 		}
414 		/*
415 		 * Disable the 1510 compatibility mode and set the sync device
416 		 * id.
417 		 */
418 		p->dma_write(dev_id | (1 << 10), CCR, free_ch);
419 	} else {
420 		p->dma_write(dev_id, CCR, free_ch);
421 	}
422 
423 	*dma_ch_out = free_ch;
424 
425 	return 0;
426 }
427 EXPORT_SYMBOL(omap_request_dma);
428 
429 void omap_free_dma(int lch)
430 {
431 	unsigned long flags;
432 
433 	if (dma_chan[lch].dev_id == -1) {
434 		pr_err("omap_dma: trying to free unallocated DMA channel %d\n",
435 		       lch);
436 		return;
437 	}
438 
439 	/* Disable all DMA interrupts for the channel. */
440 	omap_disable_channel_irq(lch);
441 
442 	/* Make sure the DMA transfer is stopped. */
443 	p->dma_write(0, CCR, lch);
444 
445 	spin_lock_irqsave(&dma_chan_lock, flags);
446 	dma_chan[lch].dev_id = -1;
447 	dma_chan[lch].next_lch = -1;
448 	dma_chan[lch].callback = NULL;
449 	spin_unlock_irqrestore(&dma_chan_lock, flags);
450 }
451 EXPORT_SYMBOL(omap_free_dma);
452 
453 /*
454  * Clears any DMA state so the DMA engine is ready to restart with new buffers
455  * through omap_start_dma(). Any buffers in flight are discarded.
456  */
457 static void omap_clear_dma(int lch)
458 {
459 	unsigned long flags;
460 
461 	local_irq_save(flags);
462 	p->clear_dma(lch);
463 	local_irq_restore(flags);
464 }
465 
466 #if IS_ENABLED(CONFIG_USB_OMAP)
467 void omap_start_dma(int lch)
468 {
469 	u32 l;
470 
471 	/*
472 	 * The CPC/CDAC register needs to be initialized to zero
473 	 * before starting dma transfer.
474 	 */
475 	if (dma_omap15xx())
476 		p->dma_write(0, CPC, lch);
477 	else
478 		p->dma_write(0, CDAC, lch);
479 
480 	if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
481 		int next_lch, cur_lch;
482 		char dma_chan_link_map[MAX_LOGICAL_DMA_CH_COUNT];
483 
484 		/* Set the link register of the first channel */
485 		enable_lnk(lch);
486 
487 		memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
488 		dma_chan_link_map[lch] = 1;
489 
490 		cur_lch = dma_chan[lch].next_lch;
491 		do {
492 			next_lch = dma_chan[cur_lch].next_lch;
493 
494 			/* The loop case: we've been here already */
495 			if (dma_chan_link_map[cur_lch])
496 				break;
497 			/* Mark the current channel */
498 			dma_chan_link_map[cur_lch] = 1;
499 
500 			enable_lnk(cur_lch);
501 			omap_enable_channel_irq(cur_lch);
502 
503 			cur_lch = next_lch;
504 		} while (next_lch != -1);
505 	} else if (IS_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS))
506 		p->dma_write(lch, CLNK_CTRL, lch);
507 
508 	omap_enable_channel_irq(lch);
509 
510 	l = p->dma_read(CCR, lch);
511 
512 	if (IS_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING))
513 			l |= OMAP_DMA_CCR_BUFFERING_DISABLE;
514 	l |= OMAP_DMA_CCR_EN;
515 
516 	/*
517 	 * As dma_write() uses IO accessors which are weakly ordered, there
518 	 * is no guarantee that data in coherent DMA memory will be visible
519 	 * to the DMA device.  Add a memory barrier here to ensure that any
520 	 * such data is visible prior to enabling DMA.
521 	 */
522 	mb();
523 	p->dma_write(l, CCR, lch);
524 
525 	dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
526 }
527 EXPORT_SYMBOL(omap_start_dma);
528 
529 void omap_stop_dma(int lch)
530 {
531 	u32 l;
532 
533 	/* Disable all interrupts on the channel */
534 	omap_disable_channel_irq(lch);
535 
536 	l = p->dma_read(CCR, lch);
537 	if (IS_DMA_ERRATA(DMA_ERRATA_i541) &&
538 			(l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) {
539 		int i = 0;
540 		u32 sys_cf;
541 
542 		/* Configure No-Standby */
543 		l = p->dma_read(OCP_SYSCONFIG, lch);
544 		sys_cf = l;
545 		l &= ~DMA_SYSCONFIG_MIDLEMODE_MASK;
546 		l |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
547 		p->dma_write(l , OCP_SYSCONFIG, 0);
548 
549 		l = p->dma_read(CCR, lch);
550 		l &= ~OMAP_DMA_CCR_EN;
551 		p->dma_write(l, CCR, lch);
552 
553 		/* Wait for sDMA FIFO drain */
554 		l = p->dma_read(CCR, lch);
555 		while (i < 100 && (l & (OMAP_DMA_CCR_RD_ACTIVE |
556 					OMAP_DMA_CCR_WR_ACTIVE))) {
557 			udelay(5);
558 			i++;
559 			l = p->dma_read(CCR, lch);
560 		}
561 		if (i >= 100)
562 			pr_err("DMA drain did not complete on lch %d\n", lch);
563 		/* Restore OCP_SYSCONFIG */
564 		p->dma_write(sys_cf, OCP_SYSCONFIG, lch);
565 	} else {
566 		l &= ~OMAP_DMA_CCR_EN;
567 		p->dma_write(l, CCR, lch);
568 	}
569 
570 	/*
571 	 * Ensure that data transferred by DMA is visible to any access
572 	 * after DMA has been disabled.  This is important for coherent
573 	 * DMA regions.
574 	 */
575 	mb();
576 
577 	if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
578 		int next_lch, cur_lch = lch;
579 		char dma_chan_link_map[MAX_LOGICAL_DMA_CH_COUNT];
580 
581 		memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
582 		do {
583 			/* The loop case: we've been here already */
584 			if (dma_chan_link_map[cur_lch])
585 				break;
586 			/* Mark the current channel */
587 			dma_chan_link_map[cur_lch] = 1;
588 
589 			disable_lnk(cur_lch);
590 
591 			next_lch = dma_chan[cur_lch].next_lch;
592 			cur_lch = next_lch;
593 		} while (next_lch != -1);
594 	}
595 
596 	dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
597 }
598 EXPORT_SYMBOL(omap_stop_dma);
599 
600 /*
601  * Allows changing the DMA callback function or data. This may be needed if
602  * the driver shares a single DMA channel for multiple dma triggers.
603  */
604 /*
605  * Returns current physical source address for the given DMA channel.
606  * If the channel is running the caller must disable interrupts prior calling
607  * this function and process the returned value before re-enabling interrupt to
608  * prevent races with the interrupt handler. Note that in continuous mode there
609  * is a chance for CSSA_L register overflow between the two reads resulting
610  * in incorrect return value.
611  */
612 dma_addr_t omap_get_dma_src_pos(int lch)
613 {
614 	dma_addr_t offset = 0;
615 
616 	if (dma_omap15xx())
617 		offset = p->dma_read(CPC, lch);
618 	else
619 		offset = p->dma_read(CSAC, lch);
620 
621 	if (IS_DMA_ERRATA(DMA_ERRATA_3_3) && offset == 0)
622 		offset = p->dma_read(CSAC, lch);
623 
624 	if (!dma_omap15xx()) {
625 		/*
626 		 * CDAC == 0 indicates that the DMA transfer on the channel has
627 		 * not been started (no data has been transferred so far).
628 		 * Return the programmed source start address in this case.
629 		 */
630 		if (likely(p->dma_read(CDAC, lch)))
631 			offset = p->dma_read(CSAC, lch);
632 		else
633 			offset = p->dma_read(CSSA, lch);
634 	}
635 
636 	offset |= (p->dma_read(CSSA, lch) & 0xFFFF0000);
637 
638 	return offset;
639 }
640 EXPORT_SYMBOL(omap_get_dma_src_pos);
641 
642 /*
643  * Returns current physical destination address for the given DMA channel.
644  * If the channel is running the caller must disable interrupts prior calling
645  * this function and process the returned value before re-enabling interrupt to
646  * prevent races with the interrupt handler. Note that in continuous mode there
647  * is a chance for CDSA_L register overflow between the two reads resulting
648  * in incorrect return value.
649  */
650 dma_addr_t omap_get_dma_dst_pos(int lch)
651 {
652 	dma_addr_t offset = 0;
653 
654 	if (dma_omap15xx())
655 		offset = p->dma_read(CPC, lch);
656 	else
657 		offset = p->dma_read(CDAC, lch);
658 
659 	/*
660 	 * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
661 	 * read before the DMA controller finished disabling the channel.
662 	 */
663 	if (!dma_omap15xx() && offset == 0) {
664 		offset = p->dma_read(CDAC, lch);
665 		/*
666 		 * CDAC == 0 indicates that the DMA transfer on the channel has
667 		 * not been started (no data has been transferred so far).
668 		 * Return the programmed destination start address in this case.
669 		 */
670 		if (unlikely(!offset))
671 			offset = p->dma_read(CDSA, lch);
672 	}
673 
674 	offset |= (p->dma_read(CDSA, lch) & 0xFFFF0000);
675 
676 	return offset;
677 }
678 EXPORT_SYMBOL(omap_get_dma_dst_pos);
679 
680 int omap_get_dma_active_status(int lch)
681 {
682 	return (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN) != 0;
683 }
684 EXPORT_SYMBOL(omap_get_dma_active_status);
685 #endif
686 
687 int omap_dma_running(void)
688 {
689 	int lch;
690 
691 	if (omap_lcd_dma_running())
692 		return 1;
693 
694 	for (lch = 0; lch < dma_chan_count; lch++)
695 		if (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN)
696 			return 1;
697 
698 	return 0;
699 }
700 
701 /*----------------------------------------------------------------------------*/
702 
703 static int omap1_dma_handle_ch(int ch)
704 {
705 	u32 csr;
706 
707 	if (enable_1510_mode && ch >= 6) {
708 		csr = dma_chan[ch].saved_csr;
709 		dma_chan[ch].saved_csr = 0;
710 	} else
711 		csr = p->dma_read(CSR, ch);
712 	if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
713 		dma_chan[ch + 6].saved_csr = csr >> 7;
714 		csr &= 0x7f;
715 	}
716 	if ((csr & 0x3f) == 0)
717 		return 0;
718 	if (unlikely(dma_chan[ch].dev_id == -1)) {
719 		pr_warn("Spurious interrupt from DMA channel %d (CSR %04x)\n",
720 			ch, csr);
721 		return 0;
722 	}
723 	if (unlikely(csr & OMAP1_DMA_TOUT_IRQ))
724 		pr_warn("DMA timeout with device %d\n", dma_chan[ch].dev_id);
725 	if (unlikely(csr & OMAP_DMA_DROP_IRQ))
726 		pr_warn("DMA synchronization event drop occurred with device %d\n",
727 			dma_chan[ch].dev_id);
728 	if (likely(csr & OMAP_DMA_BLOCK_IRQ))
729 		dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
730 	if (likely(dma_chan[ch].callback != NULL))
731 		dma_chan[ch].callback(ch, csr, dma_chan[ch].data);
732 
733 	return 1;
734 }
735 
736 static irqreturn_t omap1_dma_irq_handler(int irq, void *dev_id)
737 {
738 	int ch = ((int) dev_id) - 1;
739 	int handled = 0;
740 
741 	for (;;) {
742 		int handled_now = 0;
743 
744 		handled_now += omap1_dma_handle_ch(ch);
745 		if (enable_1510_mode && dma_chan[ch + 6].saved_csr)
746 			handled_now += omap1_dma_handle_ch(ch + 6);
747 		if (!handled_now)
748 			break;
749 		handled += handled_now;
750 	}
751 
752 	return handled ? IRQ_HANDLED : IRQ_NONE;
753 }
754 
755 struct omap_system_dma_plat_info *omap_get_plat_info(void)
756 {
757 	return p;
758 }
759 EXPORT_SYMBOL_GPL(omap_get_plat_info);
760 
761 static int omap_system_dma_probe(struct platform_device *pdev)
762 {
763 	int ch, ret = 0;
764 	int dma_irq;
765 	char irq_name[4];
766 
767 	p = pdev->dev.platform_data;
768 	if (!p) {
769 		dev_err(&pdev->dev,
770 			"%s: System DMA initialized without platform data\n",
771 			__func__);
772 		return -EINVAL;
773 	}
774 
775 	d			= p->dma_attr;
776 	errata			= p->errata;
777 
778 	if ((d->dev_caps & RESERVE_CHANNEL) && omap_dma_reserve_channels
779 			&& (omap_dma_reserve_channels < d->lch_count))
780 		d->lch_count	= omap_dma_reserve_channels;
781 
782 	dma_lch_count		= d->lch_count;
783 	dma_chan_count		= dma_lch_count;
784 	enable_1510_mode	= d->dev_caps & ENABLE_1510_MODE;
785 
786 	dma_chan = devm_kcalloc(&pdev->dev, dma_lch_count,
787 				sizeof(*dma_chan), GFP_KERNEL);
788 	if (!dma_chan)
789 		return -ENOMEM;
790 
791 	for (ch = 0; ch < dma_chan_count; ch++) {
792 		omap_clear_dma(ch);
793 
794 		dma_chan[ch].dev_id = -1;
795 		dma_chan[ch].next_lch = -1;
796 
797 		if (ch >= 6 && enable_1510_mode)
798 			continue;
799 
800 		/*
801 		 * request_irq() doesn't like dev_id (ie. ch) being
802 		 * zero, so we have to kludge around this.
803 		 */
804 		sprintf(&irq_name[0], "%d", ch);
805 		dma_irq = platform_get_irq_byname(pdev, irq_name);
806 
807 		if (dma_irq < 0) {
808 			ret = dma_irq;
809 			goto exit_dma_irq_fail;
810 		}
811 
812 		/* INT_DMA_LCD is handled in lcd_dma.c */
813 		if (dma_irq == INT_DMA_LCD)
814 			continue;
815 
816 		ret = request_irq(dma_irq,
817 				omap1_dma_irq_handler, 0, "DMA",
818 				(void *) (ch + 1));
819 		if (ret != 0)
820 			goto exit_dma_irq_fail;
821 	}
822 
823 	/* reserve dma channels 0 and 1 in high security devices on 34xx */
824 	if (d->dev_caps & HS_CHANNELS_RESERVED) {
825 		pr_info("Reserving DMA channels 0 and 1 for HS ROM code\n");
826 		dma_chan[0].dev_id = 0;
827 		dma_chan[1].dev_id = 1;
828 	}
829 	p->show_dma_caps();
830 	return 0;
831 
832 exit_dma_irq_fail:
833 	return ret;
834 }
835 
836 static void omap_system_dma_remove(struct platform_device *pdev)
837 {
838 	int dma_irq, irq_rel = 0;
839 
840 	for ( ; irq_rel < dma_chan_count; irq_rel++) {
841 		dma_irq = platform_get_irq(pdev, irq_rel);
842 		free_irq(dma_irq, (void *)(irq_rel + 1));
843 	}
844 }
845 
846 static struct platform_driver omap_system_dma_driver = {
847 	.probe		= omap_system_dma_probe,
848 	.remove_new	= omap_system_dma_remove,
849 	.driver		= {
850 		.name	= "omap_dma_system"
851 	},
852 };
853 
854 static int __init omap_system_dma_init(void)
855 {
856 	return platform_driver_register(&omap_system_dma_driver);
857 }
858 arch_initcall(omap_system_dma_init);
859 
860 static void __exit omap_system_dma_exit(void)
861 {
862 	platform_driver_unregister(&omap_system_dma_driver);
863 }
864 
865 MODULE_DESCRIPTION("OMAP SYSTEM DMA DRIVER");
866 MODULE_LICENSE("GPL");
867 MODULE_AUTHOR("Texas Instruments Inc");
868 
869 /*
870  * Reserve the omap SDMA channels using cmdline bootarg
871  * "omap_dma_reserve_ch=". The valid range is 1 to 32
872  */
873 static int __init omap_dma_cmdline_reserve_ch(char *str)
874 {
875 	if (get_option(&str, &omap_dma_reserve_channels) != 1)
876 		omap_dma_reserve_channels = 0;
877 	return 1;
878 }
879 
880 __setup("omap_dma_reserve_ch=", omap_dma_cmdline_reserve_ch);
881 
882 
883