xref: /linux/drivers/dma/dw-edma/dw-edma-core.c (revision e4c0fdd5af4c590ca07880b97e286c6532437658)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
4  * Synopsys DesignWare eDMA core driver
5  *
6  * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
7  */
8 
9 #include <linux/module.h>
10 #include <linux/device.h>
11 #include <linux/kernel.h>
12 #include <linux/dmaengine.h>
13 #include <linux/err.h>
14 #include <linux/interrupt.h>
15 #include <linux/irq.h>
16 #include <linux/dma/edma.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/string_choices.h>
19 
20 #include "dw-edma-core.h"
21 #include "dw-edma-v0-core.h"
22 #include "dw-hdma-v0-core.h"
23 #include "../dmaengine.h"
24 #include "../virt-dma.h"
25 
26 static inline
vd2dw_edma_desc(struct virt_dma_desc * vd)27 struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd)
28 {
29 	return container_of(vd, struct dw_edma_desc, vd);
30 }
31 
32 static inline
dw_edma_get_pci_address(struct dw_edma_chan * chan,phys_addr_t cpu_addr)33 u64 dw_edma_get_pci_address(struct dw_edma_chan *chan, phys_addr_t cpu_addr)
34 {
35 	struct dw_edma_chip *chip = chan->dw->chip;
36 
37 	if (chip->ops->pci_address)
38 		return chip->ops->pci_address(chip->dev, cpu_addr);
39 
40 	return cpu_addr;
41 }
42 
dw_edma_alloc_burst(struct dw_edma_chunk * chunk)43 static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk)
44 {
45 	struct dw_edma_burst *burst;
46 
47 	burst = kzalloc(sizeof(*burst), GFP_NOWAIT);
48 	if (unlikely(!burst))
49 		return NULL;
50 
51 	INIT_LIST_HEAD(&burst->list);
52 	if (chunk->burst) {
53 		/* Create and add new element into the linked list */
54 		chunk->bursts_alloc++;
55 		list_add_tail(&burst->list, &chunk->burst->list);
56 	} else {
57 		/* List head */
58 		chunk->bursts_alloc = 0;
59 		chunk->burst = burst;
60 	}
61 
62 	return burst;
63 }
64 
dw_edma_alloc_chunk(struct dw_edma_desc * desc)65 static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
66 {
67 	struct dw_edma_chip *chip = desc->chan->dw->chip;
68 	struct dw_edma_chan *chan = desc->chan;
69 	struct dw_edma_chunk *chunk;
70 
71 	chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT);
72 	if (unlikely(!chunk))
73 		return NULL;
74 
75 	INIT_LIST_HEAD(&chunk->list);
76 	chunk->chan = chan;
77 	/* Toggling change bit (CB) in each chunk, this is a mechanism to
78 	 * inform the eDMA HW block that this is a new linked list ready
79 	 * to be consumed.
80 	 *  - Odd chunks originate CB equal to 0
81 	 *  - Even chunks originate CB equal to 1
82 	 */
83 	chunk->cb = !(desc->chunks_alloc % 2);
84 	if (chan->dir == EDMA_DIR_WRITE) {
85 		chunk->ll_region.paddr = chip->ll_region_wr[chan->id].paddr;
86 		chunk->ll_region.vaddr = chip->ll_region_wr[chan->id].vaddr;
87 	} else {
88 		chunk->ll_region.paddr = chip->ll_region_rd[chan->id].paddr;
89 		chunk->ll_region.vaddr = chip->ll_region_rd[chan->id].vaddr;
90 	}
91 
92 	if (desc->chunk) {
93 		/* Create and add new element into the linked list */
94 		if (!dw_edma_alloc_burst(chunk)) {
95 			kfree(chunk);
96 			return NULL;
97 		}
98 		desc->chunks_alloc++;
99 		list_add_tail(&chunk->list, &desc->chunk->list);
100 	} else {
101 		/* List head */
102 		chunk->burst = NULL;
103 		desc->chunks_alloc = 0;
104 		desc->chunk = chunk;
105 	}
106 
107 	return chunk;
108 }
109 
dw_edma_alloc_desc(struct dw_edma_chan * chan)110 static struct dw_edma_desc *dw_edma_alloc_desc(struct dw_edma_chan *chan)
111 {
112 	struct dw_edma_desc *desc;
113 
114 	desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
115 	if (unlikely(!desc))
116 		return NULL;
117 
118 	desc->chan = chan;
119 	if (!dw_edma_alloc_chunk(desc)) {
120 		kfree(desc);
121 		return NULL;
122 	}
123 
124 	return desc;
125 }
126 
dw_edma_free_burst(struct dw_edma_chunk * chunk)127 static void dw_edma_free_burst(struct dw_edma_chunk *chunk)
128 {
129 	struct dw_edma_burst *child, *_next;
130 
131 	/* Remove all the list elements */
132 	list_for_each_entry_safe(child, _next, &chunk->burst->list, list) {
133 		list_del(&child->list);
134 		kfree(child);
135 		chunk->bursts_alloc--;
136 	}
137 
138 	/* Remove the list head */
139 	kfree(child);
140 	chunk->burst = NULL;
141 }
142 
dw_edma_free_chunk(struct dw_edma_desc * desc)143 static void dw_edma_free_chunk(struct dw_edma_desc *desc)
144 {
145 	struct dw_edma_chunk *child, *_next;
146 
147 	if (!desc->chunk)
148 		return;
149 
150 	/* Remove all the list elements */
151 	list_for_each_entry_safe(child, _next, &desc->chunk->list, list) {
152 		dw_edma_free_burst(child);
153 		list_del(&child->list);
154 		kfree(child);
155 		desc->chunks_alloc--;
156 	}
157 
158 	/* Remove the list head */
159 	kfree(child);
160 	desc->chunk = NULL;
161 }
162 
dw_edma_free_desc(struct dw_edma_desc * desc)163 static void dw_edma_free_desc(struct dw_edma_desc *desc)
164 {
165 	dw_edma_free_chunk(desc);
166 	kfree(desc);
167 }
168 
vchan_free_desc(struct virt_dma_desc * vdesc)169 static void vchan_free_desc(struct virt_dma_desc *vdesc)
170 {
171 	dw_edma_free_desc(vd2dw_edma_desc(vdesc));
172 }
173 
dw_edma_start_transfer(struct dw_edma_chan * chan)174 static int dw_edma_start_transfer(struct dw_edma_chan *chan)
175 {
176 	struct dw_edma *dw = chan->dw;
177 	struct dw_edma_chunk *child;
178 	struct dw_edma_desc *desc;
179 	struct virt_dma_desc *vd;
180 
181 	vd = vchan_next_desc(&chan->vc);
182 	if (!vd)
183 		return 0;
184 
185 	desc = vd2dw_edma_desc(vd);
186 	if (!desc)
187 		return 0;
188 
189 	child = list_first_entry_or_null(&desc->chunk->list,
190 					 struct dw_edma_chunk, list);
191 	if (!child)
192 		return 0;
193 
194 	dw_edma_core_start(dw, child, !desc->xfer_sz);
195 	desc->xfer_sz += child->ll_region.sz;
196 	dw_edma_free_burst(child);
197 	list_del(&child->list);
198 	kfree(child);
199 	desc->chunks_alloc--;
200 
201 	return 1;
202 }
203 
dw_edma_device_caps(struct dma_chan * dchan,struct dma_slave_caps * caps)204 static void dw_edma_device_caps(struct dma_chan *dchan,
205 				struct dma_slave_caps *caps)
206 {
207 	struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
208 
209 	if (chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
210 		if (chan->dir == EDMA_DIR_READ)
211 			caps->directions = BIT(DMA_DEV_TO_MEM);
212 		else
213 			caps->directions = BIT(DMA_MEM_TO_DEV);
214 	} else {
215 		if (chan->dir == EDMA_DIR_WRITE)
216 			caps->directions = BIT(DMA_DEV_TO_MEM);
217 		else
218 			caps->directions = BIT(DMA_MEM_TO_DEV);
219 	}
220 }
221 
dw_edma_device_config(struct dma_chan * dchan,struct dma_slave_config * config)222 static int dw_edma_device_config(struct dma_chan *dchan,
223 				 struct dma_slave_config *config)
224 {
225 	struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
226 
227 	memcpy(&chan->config, config, sizeof(*config));
228 	chan->configured = true;
229 
230 	return 0;
231 }
232 
dw_edma_device_pause(struct dma_chan * dchan)233 static int dw_edma_device_pause(struct dma_chan *dchan)
234 {
235 	struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
236 	int err = 0;
237 
238 	if (!chan->configured)
239 		err = -EPERM;
240 	else if (chan->status != EDMA_ST_BUSY)
241 		err = -EPERM;
242 	else if (chan->request != EDMA_REQ_NONE)
243 		err = -EPERM;
244 	else
245 		chan->request = EDMA_REQ_PAUSE;
246 
247 	return err;
248 }
249 
dw_edma_device_resume(struct dma_chan * dchan)250 static int dw_edma_device_resume(struct dma_chan *dchan)
251 {
252 	struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
253 	int err = 0;
254 
255 	if (!chan->configured) {
256 		err = -EPERM;
257 	} else if (chan->status != EDMA_ST_PAUSE) {
258 		err = -EPERM;
259 	} else if (chan->request != EDMA_REQ_NONE) {
260 		err = -EPERM;
261 	} else {
262 		chan->status = EDMA_ST_BUSY;
263 		dw_edma_start_transfer(chan);
264 	}
265 
266 	return err;
267 }
268 
dw_edma_device_terminate_all(struct dma_chan * dchan)269 static int dw_edma_device_terminate_all(struct dma_chan *dchan)
270 {
271 	struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
272 	int err = 0;
273 
274 	if (!chan->configured) {
275 		/* Do nothing */
276 	} else if (chan->status == EDMA_ST_PAUSE) {
277 		chan->status = EDMA_ST_IDLE;
278 		chan->configured = false;
279 	} else if (chan->status == EDMA_ST_IDLE) {
280 		chan->configured = false;
281 	} else if (dw_edma_core_ch_status(chan) == DMA_COMPLETE) {
282 		/*
283 		 * The channel is in a false BUSY state, probably didn't
284 		 * receive or lost an interrupt
285 		 */
286 		chan->status = EDMA_ST_IDLE;
287 		chan->configured = false;
288 	} else if (chan->request > EDMA_REQ_PAUSE) {
289 		err = -EPERM;
290 	} else {
291 		chan->request = EDMA_REQ_STOP;
292 	}
293 
294 	return err;
295 }
296 
dw_edma_device_issue_pending(struct dma_chan * dchan)297 static void dw_edma_device_issue_pending(struct dma_chan *dchan)
298 {
299 	struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
300 	unsigned long flags;
301 
302 	if (!chan->configured)
303 		return;
304 
305 	spin_lock_irqsave(&chan->vc.lock, flags);
306 	if (vchan_issue_pending(&chan->vc) && chan->request == EDMA_REQ_NONE &&
307 	    chan->status == EDMA_ST_IDLE) {
308 		chan->status = EDMA_ST_BUSY;
309 		dw_edma_start_transfer(chan);
310 	}
311 	spin_unlock_irqrestore(&chan->vc.lock, flags);
312 }
313 
314 static enum dma_status
dw_edma_device_tx_status(struct dma_chan * dchan,dma_cookie_t cookie,struct dma_tx_state * txstate)315 dw_edma_device_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
316 			 struct dma_tx_state *txstate)
317 {
318 	struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
319 	struct dw_edma_desc *desc;
320 	struct virt_dma_desc *vd;
321 	unsigned long flags;
322 	enum dma_status ret;
323 	u32 residue = 0;
324 
325 	ret = dma_cookie_status(dchan, cookie, txstate);
326 	if (ret == DMA_COMPLETE)
327 		return ret;
328 
329 	if (ret == DMA_IN_PROGRESS && chan->status == EDMA_ST_PAUSE)
330 		ret = DMA_PAUSED;
331 
332 	if (!txstate)
333 		goto ret_residue;
334 
335 	spin_lock_irqsave(&chan->vc.lock, flags);
336 	vd = vchan_find_desc(&chan->vc, cookie);
337 	if (vd) {
338 		desc = vd2dw_edma_desc(vd);
339 		if (desc)
340 			residue = desc->alloc_sz - desc->xfer_sz;
341 	}
342 	spin_unlock_irqrestore(&chan->vc.lock, flags);
343 
344 ret_residue:
345 	dma_set_residue(txstate, residue);
346 
347 	return ret;
348 }
349 
350 static struct dma_async_tx_descriptor *
dw_edma_device_transfer(struct dw_edma_transfer * xfer)351 dw_edma_device_transfer(struct dw_edma_transfer *xfer)
352 {
353 	struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan);
354 	enum dma_transfer_direction dir = xfer->direction;
355 	struct scatterlist *sg = NULL;
356 	struct dw_edma_chunk *chunk;
357 	struct dw_edma_burst *burst;
358 	struct dw_edma_desc *desc;
359 	u64 src_addr, dst_addr;
360 	size_t fsz = 0;
361 	u32 cnt = 0;
362 	int i;
363 
364 	if (!chan->configured)
365 		return NULL;
366 
367 	/*
368 	 * Local Root Port/End-point              Remote End-point
369 	 * +-----------------------+ PCIe bus +----------------------+
370 	 * |                       |    +-+   |                      |
371 	 * |    DEV_TO_MEM   Rx Ch <----+ +---+ Tx Ch  DEV_TO_MEM    |
372 	 * |                       |    | |   |                      |
373 	 * |    MEM_TO_DEV   Tx Ch +----+ +---> Rx Ch  MEM_TO_DEV    |
374 	 * |                       |    +-+   |                      |
375 	 * +-----------------------+          +----------------------+
376 	 *
377 	 * 1. Normal logic:
378 	 * If eDMA is embedded into the DW PCIe RP/EP and controlled from the
379 	 * CPU/Application side, the Rx channel (EDMA_DIR_READ) will be used
380 	 * for the device read operations (DEV_TO_MEM) and the Tx channel
381 	 * (EDMA_DIR_WRITE) - for the write operations (MEM_TO_DEV).
382 	 *
383 	 * 2. Inverted logic:
384 	 * If eDMA is embedded into a Remote PCIe EP and is controlled by the
385 	 * MWr/MRd TLPs sent from the CPU's PCIe host controller, the Tx
386 	 * channel (EDMA_DIR_WRITE) will be used for the device read operations
387 	 * (DEV_TO_MEM) and the Rx channel (EDMA_DIR_READ) - for the write
388 	 * operations (MEM_TO_DEV).
389 	 *
390 	 * It is the client driver responsibility to choose a proper channel
391 	 * for the DMA transfers.
392 	 */
393 	if (chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
394 		if ((chan->dir == EDMA_DIR_READ && dir != DMA_DEV_TO_MEM) ||
395 		    (chan->dir == EDMA_DIR_WRITE && dir != DMA_MEM_TO_DEV))
396 			return NULL;
397 	} else {
398 		if ((chan->dir == EDMA_DIR_WRITE && dir != DMA_DEV_TO_MEM) ||
399 		    (chan->dir == EDMA_DIR_READ && dir != DMA_MEM_TO_DEV))
400 			return NULL;
401 	}
402 
403 	if (xfer->type == EDMA_XFER_CYCLIC) {
404 		if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt)
405 			return NULL;
406 	} else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
407 		if (xfer->xfer.sg.len < 1)
408 			return NULL;
409 	} else if (xfer->type == EDMA_XFER_INTERLEAVED) {
410 		if (!xfer->xfer.il->numf || xfer->xfer.il->frame_size < 1)
411 			return NULL;
412 		if (!xfer->xfer.il->src_inc || !xfer->xfer.il->dst_inc)
413 			return NULL;
414 	} else {
415 		return NULL;
416 	}
417 
418 	desc = dw_edma_alloc_desc(chan);
419 	if (unlikely(!desc))
420 		goto err_alloc;
421 
422 	chunk = dw_edma_alloc_chunk(desc);
423 	if (unlikely(!chunk))
424 		goto err_alloc;
425 
426 	if (xfer->type == EDMA_XFER_INTERLEAVED) {
427 		src_addr = xfer->xfer.il->src_start;
428 		dst_addr = xfer->xfer.il->dst_start;
429 	} else {
430 		src_addr = chan->config.src_addr;
431 		dst_addr = chan->config.dst_addr;
432 	}
433 
434 	if (dir == DMA_DEV_TO_MEM)
435 		src_addr = dw_edma_get_pci_address(chan, (phys_addr_t)src_addr);
436 	else
437 		dst_addr = dw_edma_get_pci_address(chan, (phys_addr_t)dst_addr);
438 
439 	if (xfer->type == EDMA_XFER_CYCLIC) {
440 		cnt = xfer->xfer.cyclic.cnt;
441 	} else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
442 		cnt = xfer->xfer.sg.len;
443 		sg = xfer->xfer.sg.sgl;
444 	} else if (xfer->type == EDMA_XFER_INTERLEAVED) {
445 		cnt = xfer->xfer.il->numf * xfer->xfer.il->frame_size;
446 		fsz = xfer->xfer.il->frame_size;
447 	}
448 
449 	for (i = 0; i < cnt; i++) {
450 		if (xfer->type == EDMA_XFER_SCATTER_GATHER && !sg)
451 			break;
452 
453 		if (chunk->bursts_alloc == chan->ll_max) {
454 			chunk = dw_edma_alloc_chunk(desc);
455 			if (unlikely(!chunk))
456 				goto err_alloc;
457 		}
458 
459 		burst = dw_edma_alloc_burst(chunk);
460 		if (unlikely(!burst))
461 			goto err_alloc;
462 
463 		if (xfer->type == EDMA_XFER_CYCLIC)
464 			burst->sz = xfer->xfer.cyclic.len;
465 		else if (xfer->type == EDMA_XFER_SCATTER_GATHER)
466 			burst->sz = sg_dma_len(sg);
467 		else if (xfer->type == EDMA_XFER_INTERLEAVED)
468 			burst->sz = xfer->xfer.il->sgl[i % fsz].size;
469 
470 		chunk->ll_region.sz += burst->sz;
471 		desc->alloc_sz += burst->sz;
472 
473 		if (dir == DMA_DEV_TO_MEM) {
474 			burst->sar = src_addr;
475 			if (xfer->type == EDMA_XFER_CYCLIC) {
476 				burst->dar = xfer->xfer.cyclic.paddr;
477 			} else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
478 				src_addr += sg_dma_len(sg);
479 				burst->dar = sg_dma_address(sg);
480 				/* Unlike the typical assumption by other
481 				 * drivers/IPs the peripheral memory isn't
482 				 * a FIFO memory, in this case, it's a
483 				 * linear memory and that why the source
484 				 * and destination addresses are increased
485 				 * by the same portion (data length)
486 				 */
487 			} else if (xfer->type == EDMA_XFER_INTERLEAVED) {
488 				burst->dar = dst_addr;
489 			}
490 		} else {
491 			burst->dar = dst_addr;
492 			if (xfer->type == EDMA_XFER_CYCLIC) {
493 				burst->sar = xfer->xfer.cyclic.paddr;
494 			} else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
495 				dst_addr += sg_dma_len(sg);
496 				burst->sar = sg_dma_address(sg);
497 				/* Unlike the typical assumption by other
498 				 * drivers/IPs the peripheral memory isn't
499 				 * a FIFO memory, in this case, it's a
500 				 * linear memory and that why the source
501 				 * and destination addresses are increased
502 				 * by the same portion (data length)
503 				 */
504 			}  else if (xfer->type == EDMA_XFER_INTERLEAVED) {
505 				burst->sar = src_addr;
506 			}
507 		}
508 
509 		if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
510 			sg = sg_next(sg);
511 		} else if (xfer->type == EDMA_XFER_INTERLEAVED) {
512 			struct dma_interleaved_template *il = xfer->xfer.il;
513 			struct data_chunk *dc = &il->sgl[i % fsz];
514 
515 			src_addr += burst->sz;
516 			if (il->src_sgl)
517 				src_addr += dmaengine_get_src_icg(il, dc);
518 
519 			dst_addr += burst->sz;
520 			if (il->dst_sgl)
521 				dst_addr += dmaengine_get_dst_icg(il, dc);
522 		}
523 	}
524 
525 	return vchan_tx_prep(&chan->vc, &desc->vd, xfer->flags);
526 
527 err_alloc:
528 	if (desc)
529 		dw_edma_free_desc(desc);
530 
531 	return NULL;
532 }
533 
534 static struct dma_async_tx_descriptor *
dw_edma_device_prep_slave_sg(struct dma_chan * dchan,struct scatterlist * sgl,unsigned int len,enum dma_transfer_direction direction,unsigned long flags,void * context)535 dw_edma_device_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
536 			     unsigned int len,
537 			     enum dma_transfer_direction direction,
538 			     unsigned long flags, void *context)
539 {
540 	struct dw_edma_transfer xfer;
541 
542 	xfer.dchan = dchan;
543 	xfer.direction = direction;
544 	xfer.xfer.sg.sgl = sgl;
545 	xfer.xfer.sg.len = len;
546 	xfer.flags = flags;
547 	xfer.type = EDMA_XFER_SCATTER_GATHER;
548 
549 	return dw_edma_device_transfer(&xfer);
550 }
551 
552 static struct dma_async_tx_descriptor *
dw_edma_device_prep_dma_cyclic(struct dma_chan * dchan,dma_addr_t paddr,size_t len,size_t count,enum dma_transfer_direction direction,unsigned long flags)553 dw_edma_device_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t paddr,
554 			       size_t len, size_t count,
555 			       enum dma_transfer_direction direction,
556 			       unsigned long flags)
557 {
558 	struct dw_edma_transfer xfer;
559 
560 	xfer.dchan = dchan;
561 	xfer.direction = direction;
562 	xfer.xfer.cyclic.paddr = paddr;
563 	xfer.xfer.cyclic.len = len;
564 	xfer.xfer.cyclic.cnt = count;
565 	xfer.flags = flags;
566 	xfer.type = EDMA_XFER_CYCLIC;
567 
568 	return dw_edma_device_transfer(&xfer);
569 }
570 
571 static struct dma_async_tx_descriptor *
dw_edma_device_prep_interleaved_dma(struct dma_chan * dchan,struct dma_interleaved_template * ilt,unsigned long flags)572 dw_edma_device_prep_interleaved_dma(struct dma_chan *dchan,
573 				    struct dma_interleaved_template *ilt,
574 				    unsigned long flags)
575 {
576 	struct dw_edma_transfer xfer;
577 
578 	xfer.dchan = dchan;
579 	xfer.direction = ilt->dir;
580 	xfer.xfer.il = ilt;
581 	xfer.flags = flags;
582 	xfer.type = EDMA_XFER_INTERLEAVED;
583 
584 	return dw_edma_device_transfer(&xfer);
585 }
586 
dw_hdma_set_callback_result(struct virt_dma_desc * vd,enum dmaengine_tx_result result)587 static void dw_hdma_set_callback_result(struct virt_dma_desc *vd,
588 					enum dmaengine_tx_result result)
589 {
590 	u32 residue = 0;
591 	struct dw_edma_desc *desc;
592 	struct dmaengine_result *res;
593 
594 	if (!vd->tx.callback_result)
595 		return;
596 
597 	desc = vd2dw_edma_desc(vd);
598 	if (desc)
599 		residue = desc->alloc_sz - desc->xfer_sz;
600 
601 	res = &vd->tx_result;
602 	res->result = result;
603 	res->residue = residue;
604 }
605 
dw_edma_done_interrupt(struct dw_edma_chan * chan)606 static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
607 {
608 	struct dw_edma_desc *desc;
609 	struct virt_dma_desc *vd;
610 	unsigned long flags;
611 
612 	spin_lock_irqsave(&chan->vc.lock, flags);
613 	vd = vchan_next_desc(&chan->vc);
614 	if (vd) {
615 		switch (chan->request) {
616 		case EDMA_REQ_NONE:
617 			desc = vd2dw_edma_desc(vd);
618 			if (!desc->chunks_alloc) {
619 				dw_hdma_set_callback_result(vd,
620 							    DMA_TRANS_NOERROR);
621 				list_del(&vd->node);
622 				vchan_cookie_complete(vd);
623 			}
624 
625 			/* Continue transferring if there are remaining chunks or issued requests.
626 			 */
627 			chan->status = dw_edma_start_transfer(chan) ? EDMA_ST_BUSY : EDMA_ST_IDLE;
628 			break;
629 
630 		case EDMA_REQ_STOP:
631 			list_del(&vd->node);
632 			vchan_cookie_complete(vd);
633 			chan->request = EDMA_REQ_NONE;
634 			chan->status = EDMA_ST_IDLE;
635 			break;
636 
637 		case EDMA_REQ_PAUSE:
638 			chan->request = EDMA_REQ_NONE;
639 			chan->status = EDMA_ST_PAUSE;
640 			break;
641 
642 		default:
643 			break;
644 		}
645 	}
646 	spin_unlock_irqrestore(&chan->vc.lock, flags);
647 }
648 
dw_edma_abort_interrupt(struct dw_edma_chan * chan)649 static void dw_edma_abort_interrupt(struct dw_edma_chan *chan)
650 {
651 	struct virt_dma_desc *vd;
652 	unsigned long flags;
653 
654 	spin_lock_irqsave(&chan->vc.lock, flags);
655 	vd = vchan_next_desc(&chan->vc);
656 	if (vd) {
657 		dw_hdma_set_callback_result(vd, DMA_TRANS_ABORTED);
658 		list_del(&vd->node);
659 		vchan_cookie_complete(vd);
660 	}
661 	spin_unlock_irqrestore(&chan->vc.lock, flags);
662 	chan->request = EDMA_REQ_NONE;
663 	chan->status = EDMA_ST_IDLE;
664 }
665 
dw_edma_interrupt_write(int irq,void * data)666 static inline irqreturn_t dw_edma_interrupt_write(int irq, void *data)
667 {
668 	struct dw_edma_irq *dw_irq = data;
669 
670 	return dw_edma_core_handle_int(dw_irq, EDMA_DIR_WRITE,
671 				       dw_edma_done_interrupt,
672 				       dw_edma_abort_interrupt);
673 }
674 
dw_edma_interrupt_read(int irq,void * data)675 static inline irqreturn_t dw_edma_interrupt_read(int irq, void *data)
676 {
677 	struct dw_edma_irq *dw_irq = data;
678 
679 	return dw_edma_core_handle_int(dw_irq, EDMA_DIR_READ,
680 				       dw_edma_done_interrupt,
681 				       dw_edma_abort_interrupt);
682 }
683 
dw_edma_interrupt_common(int irq,void * data)684 static irqreturn_t dw_edma_interrupt_common(int irq, void *data)
685 {
686 	irqreturn_t ret = IRQ_NONE;
687 
688 	ret |= dw_edma_interrupt_write(irq, data);
689 	ret |= dw_edma_interrupt_read(irq, data);
690 
691 	return ret;
692 }
693 
dw_edma_alloc_chan_resources(struct dma_chan * dchan)694 static int dw_edma_alloc_chan_resources(struct dma_chan *dchan)
695 {
696 	struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
697 
698 	if (chan->status != EDMA_ST_IDLE)
699 		return -EBUSY;
700 
701 	return 0;
702 }
703 
dw_edma_free_chan_resources(struct dma_chan * dchan)704 static void dw_edma_free_chan_resources(struct dma_chan *dchan)
705 {
706 	unsigned long timeout = jiffies + msecs_to_jiffies(5000);
707 	int ret;
708 
709 	while (time_before(jiffies, timeout)) {
710 		ret = dw_edma_device_terminate_all(dchan);
711 		if (!ret)
712 			break;
713 
714 		if (time_after_eq(jiffies, timeout))
715 			return;
716 
717 		cpu_relax();
718 	}
719 }
720 
dw_edma_channel_setup(struct dw_edma * dw,u32 wr_alloc,u32 rd_alloc)721 static int dw_edma_channel_setup(struct dw_edma *dw, u32 wr_alloc, u32 rd_alloc)
722 {
723 	struct dw_edma_chip *chip = dw->chip;
724 	struct device *dev = chip->dev;
725 	struct dw_edma_chan *chan;
726 	struct dw_edma_irq *irq;
727 	struct dma_device *dma;
728 	u32 i, ch_cnt;
729 	u32 pos;
730 
731 	ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
732 	dma = &dw->dma;
733 
734 	INIT_LIST_HEAD(&dma->channels);
735 
736 	for (i = 0; i < ch_cnt; i++) {
737 		chan = &dw->chan[i];
738 
739 		chan->dw = dw;
740 
741 		if (i < dw->wr_ch_cnt) {
742 			chan->id = i;
743 			chan->dir = EDMA_DIR_WRITE;
744 		} else {
745 			chan->id = i - dw->wr_ch_cnt;
746 			chan->dir = EDMA_DIR_READ;
747 		}
748 
749 		chan->configured = false;
750 		chan->request = EDMA_REQ_NONE;
751 		chan->status = EDMA_ST_IDLE;
752 
753 		if (chan->dir == EDMA_DIR_WRITE)
754 			chan->ll_max = (chip->ll_region_wr[chan->id].sz / EDMA_LL_SZ);
755 		else
756 			chan->ll_max = (chip->ll_region_rd[chan->id].sz / EDMA_LL_SZ);
757 		chan->ll_max -= 1;
758 
759 		dev_vdbg(dev, "L. List:\tChannel %s[%u] max_cnt=%u\n",
760 			 str_write_read(chan->dir == EDMA_DIR_WRITE),
761 			 chan->id, chan->ll_max);
762 
763 		if (dw->nr_irqs == 1)
764 			pos = 0;
765 		else if (chan->dir == EDMA_DIR_WRITE)
766 			pos = chan->id % wr_alloc;
767 		else
768 			pos = wr_alloc + chan->id % rd_alloc;
769 
770 		irq = &dw->irq[pos];
771 
772 		if (chan->dir == EDMA_DIR_WRITE)
773 			irq->wr_mask |= BIT(chan->id);
774 		else
775 			irq->rd_mask |= BIT(chan->id);
776 
777 		irq->dw = dw;
778 		memcpy(&chan->msi, &irq->msi, sizeof(chan->msi));
779 
780 		dev_vdbg(dev, "MSI:\t\tChannel %s[%u] addr=0x%.8x%.8x, data=0x%.8x\n",
781 			 str_write_read(chan->dir == EDMA_DIR_WRITE),
782 			 chan->id,
783 			 chan->msi.address_hi, chan->msi.address_lo,
784 			 chan->msi.data);
785 
786 		chan->vc.desc_free = vchan_free_desc;
787 		chan->vc.chan.private = chan->dir == EDMA_DIR_WRITE ?
788 					&dw->chip->dt_region_wr[chan->id] :
789 					&dw->chip->dt_region_rd[chan->id];
790 
791 		vchan_init(&chan->vc, dma);
792 
793 		dw_edma_core_ch_config(chan);
794 	}
795 
796 	/* Set DMA channel capabilities */
797 	dma_cap_zero(dma->cap_mask);
798 	dma_cap_set(DMA_SLAVE, dma->cap_mask);
799 	dma_cap_set(DMA_CYCLIC, dma->cap_mask);
800 	dma_cap_set(DMA_PRIVATE, dma->cap_mask);
801 	dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
802 	dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
803 	dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
804 	dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
805 	dma->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
806 
807 	/* Set DMA channel callbacks */
808 	dma->dev = chip->dev;
809 	dma->device_alloc_chan_resources = dw_edma_alloc_chan_resources;
810 	dma->device_free_chan_resources = dw_edma_free_chan_resources;
811 	dma->device_caps = dw_edma_device_caps;
812 	dma->device_config = dw_edma_device_config;
813 	dma->device_pause = dw_edma_device_pause;
814 	dma->device_resume = dw_edma_device_resume;
815 	dma->device_terminate_all = dw_edma_device_terminate_all;
816 	dma->device_issue_pending = dw_edma_device_issue_pending;
817 	dma->device_tx_status = dw_edma_device_tx_status;
818 	dma->device_prep_slave_sg = dw_edma_device_prep_slave_sg;
819 	dma->device_prep_dma_cyclic = dw_edma_device_prep_dma_cyclic;
820 	dma->device_prep_interleaved_dma = dw_edma_device_prep_interleaved_dma;
821 
822 	dma_set_max_seg_size(dma->dev, U32_MAX);
823 
824 	/* Register DMA device */
825 	return dma_async_device_register(dma);
826 }
827 
dw_edma_dec_irq_alloc(int * nr_irqs,u32 * alloc,u16 cnt)828 static inline void dw_edma_dec_irq_alloc(int *nr_irqs, u32 *alloc, u16 cnt)
829 {
830 	if (*nr_irqs && *alloc < cnt) {
831 		(*alloc)++;
832 		(*nr_irqs)--;
833 	}
834 }
835 
dw_edma_add_irq_mask(u32 * mask,u32 alloc,u16 cnt)836 static inline void dw_edma_add_irq_mask(u32 *mask, u32 alloc, u16 cnt)
837 {
838 	while (*mask * alloc < cnt)
839 		(*mask)++;
840 }
841 
dw_edma_irq_request(struct dw_edma * dw,u32 * wr_alloc,u32 * rd_alloc)842 static int dw_edma_irq_request(struct dw_edma *dw,
843 			       u32 *wr_alloc, u32 *rd_alloc)
844 {
845 	struct dw_edma_chip *chip = dw->chip;
846 	struct device *dev = dw->chip->dev;
847 	u32 wr_mask = 1;
848 	u32 rd_mask = 1;
849 	int i, err = 0;
850 	u32 ch_cnt;
851 	int irq;
852 
853 	ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
854 
855 	if (chip->nr_irqs < 1 || !chip->ops->irq_vector)
856 		return -EINVAL;
857 
858 	dw->irq = devm_kcalloc(dev, chip->nr_irqs, sizeof(*dw->irq), GFP_KERNEL);
859 	if (!dw->irq)
860 		return -ENOMEM;
861 
862 	if (chip->nr_irqs == 1) {
863 		/* Common IRQ shared among all channels */
864 		irq = chip->ops->irq_vector(dev, 0);
865 		err = request_irq(irq, dw_edma_interrupt_common,
866 				  IRQF_SHARED, dw->name, &dw->irq[0]);
867 		if (err) {
868 			dw->nr_irqs = 0;
869 			return err;
870 		}
871 
872 		if (irq_get_msi_desc(irq))
873 			get_cached_msi_msg(irq, &dw->irq[0].msi);
874 
875 		dw->nr_irqs = 1;
876 	} else {
877 		/* Distribute IRQs equally among all channels */
878 		int tmp = chip->nr_irqs;
879 
880 		while (tmp && (*wr_alloc + *rd_alloc) < ch_cnt) {
881 			dw_edma_dec_irq_alloc(&tmp, wr_alloc, dw->wr_ch_cnt);
882 			dw_edma_dec_irq_alloc(&tmp, rd_alloc, dw->rd_ch_cnt);
883 		}
884 
885 		dw_edma_add_irq_mask(&wr_mask, *wr_alloc, dw->wr_ch_cnt);
886 		dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt);
887 
888 		for (i = 0; i < (*wr_alloc + *rd_alloc); i++) {
889 			irq = chip->ops->irq_vector(dev, i);
890 			err = request_irq(irq,
891 					  i < *wr_alloc ?
892 						dw_edma_interrupt_write :
893 						dw_edma_interrupt_read,
894 					  IRQF_SHARED, dw->name,
895 					  &dw->irq[i]);
896 			if (err)
897 				goto err_irq_free;
898 
899 			if (irq_get_msi_desc(irq))
900 				get_cached_msi_msg(irq, &dw->irq[i].msi);
901 		}
902 
903 		dw->nr_irqs = i;
904 	}
905 
906 	return 0;
907 
908 err_irq_free:
909 	for  (i--; i >= 0; i--) {
910 		irq = chip->ops->irq_vector(dev, i);
911 		free_irq(irq, &dw->irq[i]);
912 	}
913 
914 	return err;
915 }
916 
dw_edma_probe(struct dw_edma_chip * chip)917 int dw_edma_probe(struct dw_edma_chip *chip)
918 {
919 	struct device *dev;
920 	struct dw_edma *dw;
921 	u32 wr_alloc = 0;
922 	u32 rd_alloc = 0;
923 	int i, err;
924 
925 	if (!chip)
926 		return -EINVAL;
927 
928 	dev = chip->dev;
929 	if (!dev || !chip->ops)
930 		return -EINVAL;
931 
932 	dw = devm_kzalloc(dev, sizeof(*dw), GFP_KERNEL);
933 	if (!dw)
934 		return -ENOMEM;
935 
936 	dw->chip = chip;
937 
938 	if (dw->chip->mf == EDMA_MF_HDMA_NATIVE)
939 		dw_hdma_v0_core_register(dw);
940 	else
941 		dw_edma_v0_core_register(dw);
942 
943 	raw_spin_lock_init(&dw->lock);
944 
945 	dw->wr_ch_cnt = min_t(u16, chip->ll_wr_cnt,
946 			      dw_edma_core_ch_count(dw, EDMA_DIR_WRITE));
947 	dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt, EDMA_MAX_WR_CH);
948 
949 	dw->rd_ch_cnt = min_t(u16, chip->ll_rd_cnt,
950 			      dw_edma_core_ch_count(dw, EDMA_DIR_READ));
951 	dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt, EDMA_MAX_RD_CH);
952 
953 	if (!dw->wr_ch_cnt && !dw->rd_ch_cnt)
954 		return -EINVAL;
955 
956 	dev_vdbg(dev, "Channels:\twrite=%d, read=%d\n",
957 		 dw->wr_ch_cnt, dw->rd_ch_cnt);
958 
959 	/* Allocate channels */
960 	dw->chan = devm_kcalloc(dev, dw->wr_ch_cnt + dw->rd_ch_cnt,
961 				sizeof(*dw->chan), GFP_KERNEL);
962 	if (!dw->chan)
963 		return -ENOMEM;
964 
965 	snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%s",
966 		 dev_name(chip->dev));
967 
968 	/* Disable eDMA, only to establish the ideal initial conditions */
969 	dw_edma_core_off(dw);
970 
971 	/* Request IRQs */
972 	err = dw_edma_irq_request(dw, &wr_alloc, &rd_alloc);
973 	if (err)
974 		return err;
975 
976 	/* Setup write/read channels */
977 	err = dw_edma_channel_setup(dw, wr_alloc, rd_alloc);
978 	if (err)
979 		goto err_irq_free;
980 
981 	/* Turn debugfs on */
982 	dw_edma_core_debugfs_on(dw);
983 
984 	chip->dw = dw;
985 
986 	return 0;
987 
988 err_irq_free:
989 	for (i = (dw->nr_irqs - 1); i >= 0; i--)
990 		free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]);
991 
992 	return err;
993 }
994 EXPORT_SYMBOL_GPL(dw_edma_probe);
995 
dw_edma_remove(struct dw_edma_chip * chip)996 int dw_edma_remove(struct dw_edma_chip *chip)
997 {
998 	struct dw_edma_chan *chan, *_chan;
999 	struct device *dev = chip->dev;
1000 	struct dw_edma *dw = chip->dw;
1001 	int i;
1002 
1003 	/* Skip removal if no private data found */
1004 	if (!dw)
1005 		return -ENODEV;
1006 
1007 	/* Disable eDMA */
1008 	dw_edma_core_off(dw);
1009 
1010 	/* Free irqs */
1011 	for (i = (dw->nr_irqs - 1); i >= 0; i--)
1012 		free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]);
1013 
1014 	/* Deregister eDMA device */
1015 	dma_async_device_unregister(&dw->dma);
1016 	list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
1017 				 vc.chan.device_node) {
1018 		tasklet_kill(&chan->vc.task);
1019 		list_del(&chan->vc.chan.device_node);
1020 	}
1021 
1022 	return 0;
1023 }
1024 EXPORT_SYMBOL_GPL(dw_edma_remove);
1025 
1026 MODULE_LICENSE("GPL v2");
1027 MODULE_DESCRIPTION("Synopsys DesignWare eDMA controller core driver");
1028 MODULE_AUTHOR("Gustavo Pimentel <gustavo.pimentel@synopsys.com>");
1029