xref: /linux/drivers/dma/dw-edma/dw-edma-core.c (revision a7f7f6248d9740d710fd6bd190293fe5e16410ac)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
4  * Synopsys DesignWare eDMA core driver
5  *
6  * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
7  */
8 
9 #include <linux/module.h>
10 #include <linux/device.h>
11 #include <linux/kernel.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/dmaengine.h>
14 #include <linux/err.h>
15 #include <linux/interrupt.h>
16 #include <linux/irq.h>
17 #include <linux/dma/edma.h>
18 #include <linux/dma-mapping.h>
19 
20 #include "dw-edma-core.h"
21 #include "dw-edma-v0-core.h"
22 #include "../dmaengine.h"
23 #include "../virt-dma.h"
24 
25 static inline
26 struct device *dchan2dev(struct dma_chan *dchan)
27 {
28 	return &dchan->dev->device;
29 }
30 
31 static inline
32 struct device *chan2dev(struct dw_edma_chan *chan)
33 {
34 	return &chan->vc.chan.dev->device;
35 }
36 
37 static inline
38 struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd)
39 {
40 	return container_of(vd, struct dw_edma_desc, vd);
41 }
42 
43 static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk)
44 {
45 	struct dw_edma_burst *burst;
46 
47 	burst = kzalloc(sizeof(*burst), GFP_NOWAIT);
48 	if (unlikely(!burst))
49 		return NULL;
50 
51 	INIT_LIST_HEAD(&burst->list);
52 	if (chunk->burst) {
53 		/* Create and add new element into the linked list */
54 		chunk->bursts_alloc++;
55 		list_add_tail(&burst->list, &chunk->burst->list);
56 	} else {
57 		/* List head */
58 		chunk->bursts_alloc = 0;
59 		chunk->burst = burst;
60 	}
61 
62 	return burst;
63 }
64 
65 static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
66 {
67 	struct dw_edma_chan *chan = desc->chan;
68 	struct dw_edma *dw = chan->chip->dw;
69 	struct dw_edma_chunk *chunk;
70 
71 	chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT);
72 	if (unlikely(!chunk))
73 		return NULL;
74 
75 	INIT_LIST_HEAD(&chunk->list);
76 	chunk->chan = chan;
77 	/* Toggling change bit (CB) in each chunk, this is a mechanism to
78 	 * inform the eDMA HW block that this is a new linked list ready
79 	 * to be consumed.
80 	 *  - Odd chunks originate CB equal to 0
81 	 *  - Even chunks originate CB equal to 1
82 	 */
83 	chunk->cb = !(desc->chunks_alloc % 2);
84 	chunk->ll_region.paddr = dw->ll_region.paddr + chan->ll_off;
85 	chunk->ll_region.vaddr = dw->ll_region.vaddr + chan->ll_off;
86 
87 	if (desc->chunk) {
88 		/* Create and add new element into the linked list */
89 		desc->chunks_alloc++;
90 		list_add_tail(&chunk->list, &desc->chunk->list);
91 		if (!dw_edma_alloc_burst(chunk)) {
92 			kfree(chunk);
93 			return NULL;
94 		}
95 	} else {
96 		/* List head */
97 		chunk->burst = NULL;
98 		desc->chunks_alloc = 0;
99 		desc->chunk = chunk;
100 	}
101 
102 	return chunk;
103 }
104 
105 static struct dw_edma_desc *dw_edma_alloc_desc(struct dw_edma_chan *chan)
106 {
107 	struct dw_edma_desc *desc;
108 
109 	desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
110 	if (unlikely(!desc))
111 		return NULL;
112 
113 	desc->chan = chan;
114 	if (!dw_edma_alloc_chunk(desc)) {
115 		kfree(desc);
116 		return NULL;
117 	}
118 
119 	return desc;
120 }
121 
122 static void dw_edma_free_burst(struct dw_edma_chunk *chunk)
123 {
124 	struct dw_edma_burst *child, *_next;
125 
126 	/* Remove all the list elements */
127 	list_for_each_entry_safe(child, _next, &chunk->burst->list, list) {
128 		list_del(&child->list);
129 		kfree(child);
130 		chunk->bursts_alloc--;
131 	}
132 
133 	/* Remove the list head */
134 	kfree(child);
135 	chunk->burst = NULL;
136 }
137 
138 static void dw_edma_free_chunk(struct dw_edma_desc *desc)
139 {
140 	struct dw_edma_chunk *child, *_next;
141 
142 	if (!desc->chunk)
143 		return;
144 
145 	/* Remove all the list elements */
146 	list_for_each_entry_safe(child, _next, &desc->chunk->list, list) {
147 		dw_edma_free_burst(child);
148 		list_del(&child->list);
149 		kfree(child);
150 		desc->chunks_alloc--;
151 	}
152 
153 	/* Remove the list head */
154 	kfree(child);
155 	desc->chunk = NULL;
156 }
157 
158 static void dw_edma_free_desc(struct dw_edma_desc *desc)
159 {
160 	dw_edma_free_chunk(desc);
161 	kfree(desc);
162 }
163 
164 static void vchan_free_desc(struct virt_dma_desc *vdesc)
165 {
166 	dw_edma_free_desc(vd2dw_edma_desc(vdesc));
167 }
168 
169 static void dw_edma_start_transfer(struct dw_edma_chan *chan)
170 {
171 	struct dw_edma_chunk *child;
172 	struct dw_edma_desc *desc;
173 	struct virt_dma_desc *vd;
174 
175 	vd = vchan_next_desc(&chan->vc);
176 	if (!vd)
177 		return;
178 
179 	desc = vd2dw_edma_desc(vd);
180 	if (!desc)
181 		return;
182 
183 	child = list_first_entry_or_null(&desc->chunk->list,
184 					 struct dw_edma_chunk, list);
185 	if (!child)
186 		return;
187 
188 	dw_edma_v0_core_start(child, !desc->xfer_sz);
189 	desc->xfer_sz += child->ll_region.sz;
190 	dw_edma_free_burst(child);
191 	list_del(&child->list);
192 	kfree(child);
193 	desc->chunks_alloc--;
194 }
195 
196 static int dw_edma_device_config(struct dma_chan *dchan,
197 				 struct dma_slave_config *config)
198 {
199 	struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
200 
201 	memcpy(&chan->config, config, sizeof(*config));
202 	chan->configured = true;
203 
204 	return 0;
205 }
206 
207 static int dw_edma_device_pause(struct dma_chan *dchan)
208 {
209 	struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
210 	int err = 0;
211 
212 	if (!chan->configured)
213 		err = -EPERM;
214 	else if (chan->status != EDMA_ST_BUSY)
215 		err = -EPERM;
216 	else if (chan->request != EDMA_REQ_NONE)
217 		err = -EPERM;
218 	else
219 		chan->request = EDMA_REQ_PAUSE;
220 
221 	return err;
222 }
223 
224 static int dw_edma_device_resume(struct dma_chan *dchan)
225 {
226 	struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
227 	int err = 0;
228 
229 	if (!chan->configured) {
230 		err = -EPERM;
231 	} else if (chan->status != EDMA_ST_PAUSE) {
232 		err = -EPERM;
233 	} else if (chan->request != EDMA_REQ_NONE) {
234 		err = -EPERM;
235 	} else {
236 		chan->status = EDMA_ST_BUSY;
237 		dw_edma_start_transfer(chan);
238 	}
239 
240 	return err;
241 }
242 
243 static int dw_edma_device_terminate_all(struct dma_chan *dchan)
244 {
245 	struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
246 	int err = 0;
247 	LIST_HEAD(head);
248 
249 	if (!chan->configured) {
250 		/* Do nothing */
251 	} else if (chan->status == EDMA_ST_PAUSE) {
252 		chan->status = EDMA_ST_IDLE;
253 		chan->configured = false;
254 	} else if (chan->status == EDMA_ST_IDLE) {
255 		chan->configured = false;
256 	} else if (dw_edma_v0_core_ch_status(chan) == DMA_COMPLETE) {
257 		/*
258 		 * The channel is in a false BUSY state, probably didn't
259 		 * receive or lost an interrupt
260 		 */
261 		chan->status = EDMA_ST_IDLE;
262 		chan->configured = false;
263 	} else if (chan->request > EDMA_REQ_PAUSE) {
264 		err = -EPERM;
265 	} else {
266 		chan->request = EDMA_REQ_STOP;
267 	}
268 
269 	return err;
270 }
271 
272 static void dw_edma_device_issue_pending(struct dma_chan *dchan)
273 {
274 	struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
275 	unsigned long flags;
276 
277 	spin_lock_irqsave(&chan->vc.lock, flags);
278 	if (chan->configured && chan->request == EDMA_REQ_NONE &&
279 	    chan->status == EDMA_ST_IDLE && vchan_issue_pending(&chan->vc)) {
280 		chan->status = EDMA_ST_BUSY;
281 		dw_edma_start_transfer(chan);
282 	}
283 	spin_unlock_irqrestore(&chan->vc.lock, flags);
284 }
285 
286 static enum dma_status
287 dw_edma_device_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
288 			 struct dma_tx_state *txstate)
289 {
290 	struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
291 	struct dw_edma_desc *desc;
292 	struct virt_dma_desc *vd;
293 	unsigned long flags;
294 	enum dma_status ret;
295 	u32 residue = 0;
296 
297 	ret = dma_cookie_status(dchan, cookie, txstate);
298 	if (ret == DMA_COMPLETE)
299 		return ret;
300 
301 	if (ret == DMA_IN_PROGRESS && chan->status == EDMA_ST_PAUSE)
302 		ret = DMA_PAUSED;
303 
304 	if (!txstate)
305 		goto ret_residue;
306 
307 	spin_lock_irqsave(&chan->vc.lock, flags);
308 	vd = vchan_find_desc(&chan->vc, cookie);
309 	if (vd) {
310 		desc = vd2dw_edma_desc(vd);
311 		if (desc)
312 			residue = desc->alloc_sz - desc->xfer_sz;
313 	}
314 	spin_unlock_irqrestore(&chan->vc.lock, flags);
315 
316 ret_residue:
317 	dma_set_residue(txstate, residue);
318 
319 	return ret;
320 }
321 
322 static struct dma_async_tx_descriptor *
323 dw_edma_device_transfer(struct dw_edma_transfer *xfer)
324 {
325 	struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan);
326 	enum dma_transfer_direction dir = xfer->direction;
327 	phys_addr_t src_addr, dst_addr;
328 	struct scatterlist *sg = NULL;
329 	struct dw_edma_chunk *chunk;
330 	struct dw_edma_burst *burst;
331 	struct dw_edma_desc *desc;
332 	u32 cnt;
333 	int i;
334 
335 	if (!chan->configured)
336 		return NULL;
337 
338 	switch (chan->config.direction) {
339 	case DMA_DEV_TO_MEM: /* local dma */
340 		if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ)
341 			break;
342 		return NULL;
343 	case DMA_MEM_TO_DEV: /* local dma */
344 		if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_WRITE)
345 			break;
346 		return NULL;
347 	default: /* remote dma */
348 		if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_READ)
349 			break;
350 		if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_WRITE)
351 			break;
352 		return NULL;
353 	}
354 
355 	if (xfer->cyclic) {
356 		if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt)
357 			return NULL;
358 	} else {
359 		if (xfer->xfer.sg.len < 1)
360 			return NULL;
361 	}
362 
363 	desc = dw_edma_alloc_desc(chan);
364 	if (unlikely(!desc))
365 		goto err_alloc;
366 
367 	chunk = dw_edma_alloc_chunk(desc);
368 	if (unlikely(!chunk))
369 		goto err_alloc;
370 
371 	src_addr = chan->config.src_addr;
372 	dst_addr = chan->config.dst_addr;
373 
374 	if (xfer->cyclic) {
375 		cnt = xfer->xfer.cyclic.cnt;
376 	} else {
377 		cnt = xfer->xfer.sg.len;
378 		sg = xfer->xfer.sg.sgl;
379 	}
380 
381 	for (i = 0; i < cnt; i++) {
382 		if (!xfer->cyclic && !sg)
383 			break;
384 
385 		if (chunk->bursts_alloc == chan->ll_max) {
386 			chunk = dw_edma_alloc_chunk(desc);
387 			if (unlikely(!chunk))
388 				goto err_alloc;
389 		}
390 
391 		burst = dw_edma_alloc_burst(chunk);
392 		if (unlikely(!burst))
393 			goto err_alloc;
394 
395 		if (xfer->cyclic)
396 			burst->sz = xfer->xfer.cyclic.len;
397 		else
398 			burst->sz = sg_dma_len(sg);
399 
400 		chunk->ll_region.sz += burst->sz;
401 		desc->alloc_sz += burst->sz;
402 
403 		if (chan->dir == EDMA_DIR_WRITE) {
404 			burst->sar = src_addr;
405 			if (xfer->cyclic) {
406 				burst->dar = xfer->xfer.cyclic.paddr;
407 			} else {
408 				burst->dar = sg_dma_address(sg);
409 				/* Unlike the typical assumption by other
410 				 * drivers/IPs the peripheral memory isn't
411 				 * a FIFO memory, in this case, it's a
412 				 * linear memory and that why the source
413 				 * and destination addresses are increased
414 				 * by the same portion (data length)
415 				 */
416 				src_addr += sg_dma_len(sg);
417 			}
418 		} else {
419 			burst->dar = dst_addr;
420 			if (xfer->cyclic) {
421 				burst->sar = xfer->xfer.cyclic.paddr;
422 			} else {
423 				burst->sar = sg_dma_address(sg);
424 				/* Unlike the typical assumption by other
425 				 * drivers/IPs the peripheral memory isn't
426 				 * a FIFO memory, in this case, it's a
427 				 * linear memory and that why the source
428 				 * and destination addresses are increased
429 				 * by the same portion (data length)
430 				 */
431 				dst_addr += sg_dma_len(sg);
432 			}
433 		}
434 
435 		if (!xfer->cyclic)
436 			sg = sg_next(sg);
437 	}
438 
439 	return vchan_tx_prep(&chan->vc, &desc->vd, xfer->flags);
440 
441 err_alloc:
442 	if (desc)
443 		dw_edma_free_desc(desc);
444 
445 	return NULL;
446 }
447 
448 static struct dma_async_tx_descriptor *
449 dw_edma_device_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
450 			     unsigned int len,
451 			     enum dma_transfer_direction direction,
452 			     unsigned long flags, void *context)
453 {
454 	struct dw_edma_transfer xfer;
455 
456 	xfer.dchan = dchan;
457 	xfer.direction = direction;
458 	xfer.xfer.sg.sgl = sgl;
459 	xfer.xfer.sg.len = len;
460 	xfer.flags = flags;
461 	xfer.cyclic = false;
462 
463 	return dw_edma_device_transfer(&xfer);
464 }
465 
466 static struct dma_async_tx_descriptor *
467 dw_edma_device_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t paddr,
468 			       size_t len, size_t count,
469 			       enum dma_transfer_direction direction,
470 			       unsigned long flags)
471 {
472 	struct dw_edma_transfer xfer;
473 
474 	xfer.dchan = dchan;
475 	xfer.direction = direction;
476 	xfer.xfer.cyclic.paddr = paddr;
477 	xfer.xfer.cyclic.len = len;
478 	xfer.xfer.cyclic.cnt = count;
479 	xfer.flags = flags;
480 	xfer.cyclic = true;
481 
482 	return dw_edma_device_transfer(&xfer);
483 }
484 
485 static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
486 {
487 	struct dw_edma_desc *desc;
488 	struct virt_dma_desc *vd;
489 	unsigned long flags;
490 
491 	dw_edma_v0_core_clear_done_int(chan);
492 
493 	spin_lock_irqsave(&chan->vc.lock, flags);
494 	vd = vchan_next_desc(&chan->vc);
495 	if (vd) {
496 		switch (chan->request) {
497 		case EDMA_REQ_NONE:
498 			desc = vd2dw_edma_desc(vd);
499 			if (desc->chunks_alloc) {
500 				chan->status = EDMA_ST_BUSY;
501 				dw_edma_start_transfer(chan);
502 			} else {
503 				list_del(&vd->node);
504 				vchan_cookie_complete(vd);
505 				chan->status = EDMA_ST_IDLE;
506 			}
507 			break;
508 
509 		case EDMA_REQ_STOP:
510 			list_del(&vd->node);
511 			vchan_cookie_complete(vd);
512 			chan->request = EDMA_REQ_NONE;
513 			chan->status = EDMA_ST_IDLE;
514 			break;
515 
516 		case EDMA_REQ_PAUSE:
517 			chan->request = EDMA_REQ_NONE;
518 			chan->status = EDMA_ST_PAUSE;
519 			break;
520 
521 		default:
522 			break;
523 		}
524 	}
525 	spin_unlock_irqrestore(&chan->vc.lock, flags);
526 }
527 
528 static void dw_edma_abort_interrupt(struct dw_edma_chan *chan)
529 {
530 	struct virt_dma_desc *vd;
531 	unsigned long flags;
532 
533 	dw_edma_v0_core_clear_abort_int(chan);
534 
535 	spin_lock_irqsave(&chan->vc.lock, flags);
536 	vd = vchan_next_desc(&chan->vc);
537 	if (vd) {
538 		list_del(&vd->node);
539 		vchan_cookie_complete(vd);
540 	}
541 	spin_unlock_irqrestore(&chan->vc.lock, flags);
542 	chan->request = EDMA_REQ_NONE;
543 	chan->status = EDMA_ST_IDLE;
544 }
545 
546 static irqreturn_t dw_edma_interrupt(int irq, void *data, bool write)
547 {
548 	struct dw_edma_irq *dw_irq = data;
549 	struct dw_edma *dw = dw_irq->dw;
550 	unsigned long total, pos, val;
551 	unsigned long off;
552 	u32 mask;
553 
554 	if (write) {
555 		total = dw->wr_ch_cnt;
556 		off = 0;
557 		mask = dw_irq->wr_mask;
558 	} else {
559 		total = dw->rd_ch_cnt;
560 		off = dw->wr_ch_cnt;
561 		mask = dw_irq->rd_mask;
562 	}
563 
564 	val = dw_edma_v0_core_status_done_int(dw, write ?
565 							  EDMA_DIR_WRITE :
566 							  EDMA_DIR_READ);
567 	val &= mask;
568 	for_each_set_bit(pos, &val, total) {
569 		struct dw_edma_chan *chan = &dw->chan[pos + off];
570 
571 		dw_edma_done_interrupt(chan);
572 	}
573 
574 	val = dw_edma_v0_core_status_abort_int(dw, write ?
575 							   EDMA_DIR_WRITE :
576 							   EDMA_DIR_READ);
577 	val &= mask;
578 	for_each_set_bit(pos, &val, total) {
579 		struct dw_edma_chan *chan = &dw->chan[pos + off];
580 
581 		dw_edma_abort_interrupt(chan);
582 	}
583 
584 	return IRQ_HANDLED;
585 }
586 
587 static inline irqreturn_t dw_edma_interrupt_write(int irq, void *data)
588 {
589 	return dw_edma_interrupt(irq, data, true);
590 }
591 
592 static inline irqreturn_t dw_edma_interrupt_read(int irq, void *data)
593 {
594 	return dw_edma_interrupt(irq, data, false);
595 }
596 
597 static irqreturn_t dw_edma_interrupt_common(int irq, void *data)
598 {
599 	dw_edma_interrupt(irq, data, true);
600 	dw_edma_interrupt(irq, data, false);
601 
602 	return IRQ_HANDLED;
603 }
604 
605 static int dw_edma_alloc_chan_resources(struct dma_chan *dchan)
606 {
607 	struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
608 
609 	if (chan->status != EDMA_ST_IDLE)
610 		return -EBUSY;
611 
612 	pm_runtime_get(chan->chip->dev);
613 
614 	return 0;
615 }
616 
617 static void dw_edma_free_chan_resources(struct dma_chan *dchan)
618 {
619 	unsigned long timeout = jiffies + msecs_to_jiffies(5000);
620 	struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
621 	int ret;
622 
623 	while (time_before(jiffies, timeout)) {
624 		ret = dw_edma_device_terminate_all(dchan);
625 		if (!ret)
626 			break;
627 
628 		if (time_after_eq(jiffies, timeout))
629 			return;
630 
631 		cpu_relax();
632 	}
633 
634 	pm_runtime_put(chan->chip->dev);
635 }
636 
637 static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
638 				 u32 wr_alloc, u32 rd_alloc)
639 {
640 	struct dw_edma_region *dt_region;
641 	struct device *dev = chip->dev;
642 	struct dw_edma *dw = chip->dw;
643 	struct dw_edma_chan *chan;
644 	size_t ll_chunk, dt_chunk;
645 	struct dw_edma_irq *irq;
646 	struct dma_device *dma;
647 	u32 i, j, cnt, ch_cnt;
648 	u32 alloc, off_alloc;
649 	int err = 0;
650 	u32 pos;
651 
652 	ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
653 	ll_chunk = dw->ll_region.sz;
654 	dt_chunk = dw->dt_region.sz;
655 
656 	/* Calculate linked list chunk for each channel */
657 	ll_chunk /= roundup_pow_of_two(ch_cnt);
658 
659 	/* Calculate linked list chunk for each channel */
660 	dt_chunk /= roundup_pow_of_two(ch_cnt);
661 
662 	if (write) {
663 		i = 0;
664 		cnt = dw->wr_ch_cnt;
665 		dma = &dw->wr_edma;
666 		alloc = wr_alloc;
667 		off_alloc = 0;
668 	} else {
669 		i = dw->wr_ch_cnt;
670 		cnt = dw->rd_ch_cnt;
671 		dma = &dw->rd_edma;
672 		alloc = rd_alloc;
673 		off_alloc = wr_alloc;
674 	}
675 
676 	INIT_LIST_HEAD(&dma->channels);
677 	for (j = 0; (alloc || dw->nr_irqs == 1) && j < cnt; j++, i++) {
678 		chan = &dw->chan[i];
679 
680 		dt_region = devm_kzalloc(dev, sizeof(*dt_region), GFP_KERNEL);
681 		if (!dt_region)
682 			return -ENOMEM;
683 
684 		chan->vc.chan.private = dt_region;
685 
686 		chan->chip = chip;
687 		chan->id = j;
688 		chan->dir = write ? EDMA_DIR_WRITE : EDMA_DIR_READ;
689 		chan->configured = false;
690 		chan->request = EDMA_REQ_NONE;
691 		chan->status = EDMA_ST_IDLE;
692 
693 		chan->ll_off = (ll_chunk * i);
694 		chan->ll_max = (ll_chunk / EDMA_LL_SZ) - 1;
695 
696 		chan->dt_off = (dt_chunk * i);
697 
698 		dev_vdbg(dev, "L. List:\tChannel %s[%u] off=0x%.8lx, max_cnt=%u\n",
699 			 write ? "write" : "read", j,
700 			 chan->ll_off, chan->ll_max);
701 
702 		if (dw->nr_irqs == 1)
703 			pos = 0;
704 		else
705 			pos = off_alloc + (j % alloc);
706 
707 		irq = &dw->irq[pos];
708 
709 		if (write)
710 			irq->wr_mask |= BIT(j);
711 		else
712 			irq->rd_mask |= BIT(j);
713 
714 		irq->dw = dw;
715 		memcpy(&chan->msi, &irq->msi, sizeof(chan->msi));
716 
717 		dev_vdbg(dev, "MSI:\t\tChannel %s[%u] addr=0x%.8x%.8x, data=0x%.8x\n",
718 			 write ? "write" : "read", j,
719 			 chan->msi.address_hi, chan->msi.address_lo,
720 			 chan->msi.data);
721 
722 		chan->vc.desc_free = vchan_free_desc;
723 		vchan_init(&chan->vc, dma);
724 
725 		dt_region->paddr = dw->dt_region.paddr + chan->dt_off;
726 		dt_region->vaddr = dw->dt_region.vaddr + chan->dt_off;
727 		dt_region->sz = dt_chunk;
728 
729 		dev_vdbg(dev, "Data:\tChannel %s[%u] off=0x%.8lx\n",
730 			 write ? "write" : "read", j, chan->dt_off);
731 
732 		dw_edma_v0_core_device_config(chan);
733 	}
734 
735 	/* Set DMA channel capabilities */
736 	dma_cap_zero(dma->cap_mask);
737 	dma_cap_set(DMA_SLAVE, dma->cap_mask);
738 	dma_cap_set(DMA_CYCLIC, dma->cap_mask);
739 	dma_cap_set(DMA_PRIVATE, dma->cap_mask);
740 	dma->directions = BIT(write ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV);
741 	dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
742 	dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
743 	dma->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
744 	dma->chancnt = cnt;
745 
746 	/* Set DMA channel callbacks */
747 	dma->dev = chip->dev;
748 	dma->device_alloc_chan_resources = dw_edma_alloc_chan_resources;
749 	dma->device_free_chan_resources = dw_edma_free_chan_resources;
750 	dma->device_config = dw_edma_device_config;
751 	dma->device_pause = dw_edma_device_pause;
752 	dma->device_resume = dw_edma_device_resume;
753 	dma->device_terminate_all = dw_edma_device_terminate_all;
754 	dma->device_issue_pending = dw_edma_device_issue_pending;
755 	dma->device_tx_status = dw_edma_device_tx_status;
756 	dma->device_prep_slave_sg = dw_edma_device_prep_slave_sg;
757 	dma->device_prep_dma_cyclic = dw_edma_device_prep_dma_cyclic;
758 
759 	dma_set_max_seg_size(dma->dev, U32_MAX);
760 
761 	/* Register DMA device */
762 	err = dma_async_device_register(dma);
763 
764 	return err;
765 }
766 
767 static inline void dw_edma_dec_irq_alloc(int *nr_irqs, u32 *alloc, u16 cnt)
768 {
769 	if (*nr_irqs && *alloc < cnt) {
770 		(*alloc)++;
771 		(*nr_irqs)--;
772 	}
773 }
774 
775 static inline void dw_edma_add_irq_mask(u32 *mask, u32 alloc, u16 cnt)
776 {
777 	while (*mask * alloc < cnt)
778 		(*mask)++;
779 }
780 
781 static int dw_edma_irq_request(struct dw_edma_chip *chip,
782 			       u32 *wr_alloc, u32 *rd_alloc)
783 {
784 	struct device *dev = chip->dev;
785 	struct dw_edma *dw = chip->dw;
786 	u32 wr_mask = 1;
787 	u32 rd_mask = 1;
788 	int i, err = 0;
789 	u32 ch_cnt;
790 	int irq;
791 
792 	ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
793 
794 	if (dw->nr_irqs < 1)
795 		return -EINVAL;
796 
797 	if (dw->nr_irqs == 1) {
798 		/* Common IRQ shared among all channels */
799 		irq = dw->ops->irq_vector(dev, 0);
800 		err = request_irq(irq, dw_edma_interrupt_common,
801 				  IRQF_SHARED, dw->name, &dw->irq[0]);
802 		if (err) {
803 			dw->nr_irqs = 0;
804 			return err;
805 		}
806 
807 		if (irq_get_msi_desc(irq))
808 			get_cached_msi_msg(irq, &dw->irq[0].msi);
809 	} else {
810 		/* Distribute IRQs equally among all channels */
811 		int tmp = dw->nr_irqs;
812 
813 		while (tmp && (*wr_alloc + *rd_alloc) < ch_cnt) {
814 			dw_edma_dec_irq_alloc(&tmp, wr_alloc, dw->wr_ch_cnt);
815 			dw_edma_dec_irq_alloc(&tmp, rd_alloc, dw->rd_ch_cnt);
816 		}
817 
818 		dw_edma_add_irq_mask(&wr_mask, *wr_alloc, dw->wr_ch_cnt);
819 		dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt);
820 
821 		for (i = 0; i < (*wr_alloc + *rd_alloc); i++) {
822 			irq = dw->ops->irq_vector(dev, i);
823 			err = request_irq(irq,
824 					  i < *wr_alloc ?
825 						dw_edma_interrupt_write :
826 						dw_edma_interrupt_read,
827 					  IRQF_SHARED, dw->name,
828 					  &dw->irq[i]);
829 			if (err) {
830 				dw->nr_irqs = i;
831 				return err;
832 			}
833 
834 			if (irq_get_msi_desc(irq))
835 				get_cached_msi_msg(irq, &dw->irq[i].msi);
836 		}
837 
838 		dw->nr_irqs = i;
839 	}
840 
841 	return err;
842 }
843 
844 int dw_edma_probe(struct dw_edma_chip *chip)
845 {
846 	struct device *dev;
847 	struct dw_edma *dw;
848 	u32 wr_alloc = 0;
849 	u32 rd_alloc = 0;
850 	int i, err;
851 
852 	if (!chip)
853 		return -EINVAL;
854 
855 	dev = chip->dev;
856 	if (!dev)
857 		return -EINVAL;
858 
859 	dw = chip->dw;
860 	if (!dw || !dw->irq || !dw->ops || !dw->ops->irq_vector)
861 		return -EINVAL;
862 
863 	raw_spin_lock_init(&dw->lock);
864 
865 	/* Find out how many write channels are supported by hardware */
866 	dw->wr_ch_cnt = dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE);
867 	if (!dw->wr_ch_cnt)
868 		return -EINVAL;
869 
870 	/* Find out how many read channels are supported by hardware */
871 	dw->rd_ch_cnt = dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ);
872 	if (!dw->rd_ch_cnt)
873 		return -EINVAL;
874 
875 	dev_vdbg(dev, "Channels:\twrite=%d, read=%d\n",
876 		 dw->wr_ch_cnt, dw->rd_ch_cnt);
877 
878 	/* Allocate channels */
879 	dw->chan = devm_kcalloc(dev, dw->wr_ch_cnt + dw->rd_ch_cnt,
880 				sizeof(*dw->chan), GFP_KERNEL);
881 	if (!dw->chan)
882 		return -ENOMEM;
883 
884 	snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%d", chip->id);
885 
886 	/* Disable eDMA, only to establish the ideal initial conditions */
887 	dw_edma_v0_core_off(dw);
888 
889 	/* Request IRQs */
890 	err = dw_edma_irq_request(chip, &wr_alloc, &rd_alloc);
891 	if (err)
892 		return err;
893 
894 	/* Setup write channels */
895 	err = dw_edma_channel_setup(chip, true, wr_alloc, rd_alloc);
896 	if (err)
897 		goto err_irq_free;
898 
899 	/* Setup read channels */
900 	err = dw_edma_channel_setup(chip, false, wr_alloc, rd_alloc);
901 	if (err)
902 		goto err_irq_free;
903 
904 	/* Power management */
905 	pm_runtime_enable(dev);
906 
907 	/* Turn debugfs on */
908 	dw_edma_v0_core_debugfs_on(chip);
909 
910 	return 0;
911 
912 err_irq_free:
913 	for (i = (dw->nr_irqs - 1); i >= 0; i--)
914 		free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]);
915 
916 	dw->nr_irqs = 0;
917 
918 	return err;
919 }
920 EXPORT_SYMBOL_GPL(dw_edma_probe);
921 
922 int dw_edma_remove(struct dw_edma_chip *chip)
923 {
924 	struct dw_edma_chan *chan, *_chan;
925 	struct device *dev = chip->dev;
926 	struct dw_edma *dw = chip->dw;
927 	int i;
928 
929 	/* Disable eDMA */
930 	dw_edma_v0_core_off(dw);
931 
932 	/* Free irqs */
933 	for (i = (dw->nr_irqs - 1); i >= 0; i--)
934 		free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]);
935 
936 	/* Power management */
937 	pm_runtime_disable(dev);
938 
939 	list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels,
940 				 vc.chan.device_node) {
941 		list_del(&chan->vc.chan.device_node);
942 		tasklet_kill(&chan->vc.task);
943 	}
944 
945 	list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels,
946 				 vc.chan.device_node) {
947 		list_del(&chan->vc.chan.device_node);
948 		tasklet_kill(&chan->vc.task);
949 	}
950 
951 	/* Deregister eDMA device */
952 	dma_async_device_unregister(&dw->wr_edma);
953 	dma_async_device_unregister(&dw->rd_edma);
954 
955 	/* Turn debugfs off */
956 	dw_edma_v0_core_debugfs_off();
957 
958 	return 0;
959 }
960 EXPORT_SYMBOL_GPL(dw_edma_remove);
961 
962 MODULE_LICENSE("GPL v2");
963 MODULE_DESCRIPTION("Synopsys DesignWare eDMA controller core driver");
964 MODULE_AUTHOR("Gustavo Pimentel <gustavo.pimentel@synopsys.com>");
965