xref: /linux/drivers/dma/uniphier-xdmac.c (revision 6c8c1406a6d6a3f2e61ac590f5c0994231bc6be7)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * External DMA controller driver for UniPhier SoCs
4  * Copyright 2019 Socionext Inc.
5  * Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
6  */
7 
8 #include <linux/bitops.h>
9 #include <linux/bitfield.h>
10 #include <linux/iopoll.h>
11 #include <linux/module.h>
12 #include <linux/of.h>
13 #include <linux/of_dma.h>
14 #include <linux/platform_device.h>
15 #include <linux/slab.h>
16 
17 #include "dmaengine.h"
18 #include "virt-dma.h"
19 
20 #define XDMAC_CH_WIDTH		0x100
21 
22 #define XDMAC_TFA		0x08
23 #define XDMAC_TFA_MCNT_MASK	GENMASK(23, 16)
24 #define XDMAC_TFA_MASK		GENMASK(5, 0)
25 #define XDMAC_SADM		0x10
26 #define XDMAC_SADM_STW_MASK	GENMASK(25, 24)
27 #define XDMAC_SADM_SAM		BIT(4)
28 #define XDMAC_SADM_SAM_FIXED	XDMAC_SADM_SAM
29 #define XDMAC_SADM_SAM_INC	0
30 #define XDMAC_DADM		0x14
31 #define XDMAC_DADM_DTW_MASK	XDMAC_SADM_STW_MASK
32 #define XDMAC_DADM_DAM		XDMAC_SADM_SAM
33 #define XDMAC_DADM_DAM_FIXED	XDMAC_SADM_SAM_FIXED
34 #define XDMAC_DADM_DAM_INC	XDMAC_SADM_SAM_INC
35 #define XDMAC_EXSAD		0x18
36 #define XDMAC_EXDAD		0x1c
37 #define XDMAC_SAD		0x20
38 #define XDMAC_DAD		0x24
39 #define XDMAC_ITS		0x28
40 #define XDMAC_ITS_MASK		GENMASK(25, 0)
41 #define XDMAC_TNUM		0x2c
42 #define XDMAC_TNUM_MASK		GENMASK(15, 0)
43 #define XDMAC_TSS		0x30
44 #define XDMAC_TSS_REQ		BIT(0)
45 #define XDMAC_IEN		0x34
46 #define XDMAC_IEN_ERRIEN	BIT(1)
47 #define XDMAC_IEN_ENDIEN	BIT(0)
48 #define XDMAC_STAT		0x40
49 #define XDMAC_STAT_TENF		BIT(0)
50 #define XDMAC_IR		0x44
51 #define XDMAC_IR_ERRF		BIT(1)
52 #define XDMAC_IR_ENDF		BIT(0)
53 #define XDMAC_ID		0x48
54 #define XDMAC_ID_ERRIDF		BIT(1)
55 #define XDMAC_ID_ENDIDF		BIT(0)
56 
57 #define XDMAC_MAX_CHANS		16
58 #define XDMAC_INTERVAL_CLKS	20
59 #define XDMAC_MAX_WORDS		XDMAC_TNUM_MASK
60 
61 /* cut lower bit for maintain alignment of maximum transfer size */
62 #define XDMAC_MAX_WORD_SIZE	(XDMAC_ITS_MASK & ~GENMASK(3, 0))
63 
64 #define UNIPHIER_XDMAC_BUSWIDTHS \
65 	(BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
66 	 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
67 	 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
68 	 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
69 
70 struct uniphier_xdmac_desc_node {
71 	dma_addr_t src;
72 	dma_addr_t dst;
73 	u32 burst_size;
74 	u32 nr_burst;
75 };
76 
77 struct uniphier_xdmac_desc {
78 	struct virt_dma_desc vd;
79 
80 	unsigned int nr_node;
81 	unsigned int cur_node;
82 	enum dma_transfer_direction dir;
83 	struct uniphier_xdmac_desc_node nodes[];
84 };
85 
86 struct uniphier_xdmac_chan {
87 	struct virt_dma_chan vc;
88 	struct uniphier_xdmac_device *xdev;
89 	struct uniphier_xdmac_desc *xd;
90 	void __iomem *reg_ch_base;
91 	struct dma_slave_config	sconfig;
92 	int id;
93 	unsigned int req_factor;
94 };
95 
96 struct uniphier_xdmac_device {
97 	struct dma_device ddev;
98 	void __iomem *reg_base;
99 	int nr_chans;
100 	struct uniphier_xdmac_chan channels[];
101 };
102 
103 static struct uniphier_xdmac_chan *
104 to_uniphier_xdmac_chan(struct virt_dma_chan *vc)
105 {
106 	return container_of(vc, struct uniphier_xdmac_chan, vc);
107 }
108 
109 static struct uniphier_xdmac_desc *
110 to_uniphier_xdmac_desc(struct virt_dma_desc *vd)
111 {
112 	return container_of(vd, struct uniphier_xdmac_desc, vd);
113 }
114 
115 /* xc->vc.lock must be held by caller */
116 static struct uniphier_xdmac_desc *
117 uniphier_xdmac_next_desc(struct uniphier_xdmac_chan *xc)
118 {
119 	struct virt_dma_desc *vd;
120 
121 	vd = vchan_next_desc(&xc->vc);
122 	if (!vd)
123 		return NULL;
124 
125 	list_del(&vd->node);
126 
127 	return to_uniphier_xdmac_desc(vd);
128 }
129 
130 /* xc->vc.lock must be held by caller */
131 static void uniphier_xdmac_chan_start(struct uniphier_xdmac_chan *xc,
132 				      struct uniphier_xdmac_desc *xd)
133 {
134 	u32 src_mode, src_width;
135 	u32 dst_mode, dst_width;
136 	dma_addr_t src_addr, dst_addr;
137 	u32 val, its, tnum;
138 	enum dma_slave_buswidth buswidth;
139 
140 	src_addr = xd->nodes[xd->cur_node].src;
141 	dst_addr = xd->nodes[xd->cur_node].dst;
142 	its      = xd->nodes[xd->cur_node].burst_size;
143 	tnum     = xd->nodes[xd->cur_node].nr_burst;
144 
145 	/*
146 	 * The width of MEM side must be 4 or 8 bytes, that does not
147 	 * affect that of DEV side and transfer size.
148 	 */
149 	if (xd->dir == DMA_DEV_TO_MEM) {
150 		src_mode = XDMAC_SADM_SAM_FIXED;
151 		buswidth = xc->sconfig.src_addr_width;
152 	} else {
153 		src_mode = XDMAC_SADM_SAM_INC;
154 		buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES;
155 	}
156 	src_width = FIELD_PREP(XDMAC_SADM_STW_MASK, __ffs(buswidth));
157 
158 	if (xd->dir == DMA_MEM_TO_DEV) {
159 		dst_mode = XDMAC_DADM_DAM_FIXED;
160 		buswidth = xc->sconfig.dst_addr_width;
161 	} else {
162 		dst_mode = XDMAC_DADM_DAM_INC;
163 		buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES;
164 	}
165 	dst_width = FIELD_PREP(XDMAC_DADM_DTW_MASK, __ffs(buswidth));
166 
167 	/* setup transfer factor */
168 	val = FIELD_PREP(XDMAC_TFA_MCNT_MASK, XDMAC_INTERVAL_CLKS);
169 	val |= FIELD_PREP(XDMAC_TFA_MASK, xc->req_factor);
170 	writel(val, xc->reg_ch_base + XDMAC_TFA);
171 
172 	/* setup the channel */
173 	writel(lower_32_bits(src_addr), xc->reg_ch_base + XDMAC_SAD);
174 	writel(upper_32_bits(src_addr), xc->reg_ch_base + XDMAC_EXSAD);
175 
176 	writel(lower_32_bits(dst_addr), xc->reg_ch_base + XDMAC_DAD);
177 	writel(upper_32_bits(dst_addr), xc->reg_ch_base + XDMAC_EXDAD);
178 
179 	src_mode |= src_width;
180 	dst_mode |= dst_width;
181 	writel(src_mode, xc->reg_ch_base + XDMAC_SADM);
182 	writel(dst_mode, xc->reg_ch_base + XDMAC_DADM);
183 
184 	writel(its, xc->reg_ch_base + XDMAC_ITS);
185 	writel(tnum, xc->reg_ch_base + XDMAC_TNUM);
186 
187 	/* enable interrupt */
188 	writel(XDMAC_IEN_ENDIEN | XDMAC_IEN_ERRIEN,
189 	       xc->reg_ch_base + XDMAC_IEN);
190 
191 	/* start XDMAC */
192 	val = readl(xc->reg_ch_base + XDMAC_TSS);
193 	val |= XDMAC_TSS_REQ;
194 	writel(val, xc->reg_ch_base + XDMAC_TSS);
195 }
196 
197 /* xc->vc.lock must be held by caller */
198 static int uniphier_xdmac_chan_stop(struct uniphier_xdmac_chan *xc)
199 {
200 	u32 val;
201 
202 	/* disable interrupt */
203 	val = readl(xc->reg_ch_base + XDMAC_IEN);
204 	val &= ~(XDMAC_IEN_ENDIEN | XDMAC_IEN_ERRIEN);
205 	writel(val, xc->reg_ch_base + XDMAC_IEN);
206 
207 	/* stop XDMAC */
208 	val = readl(xc->reg_ch_base + XDMAC_TSS);
209 	val &= ~XDMAC_TSS_REQ;
210 	writel(0, xc->reg_ch_base + XDMAC_TSS);
211 
212 	/* wait until transfer is stopped */
213 	return readl_poll_timeout_atomic(xc->reg_ch_base + XDMAC_STAT, val,
214 					 !(val & XDMAC_STAT_TENF), 100, 1000);
215 }
216 
217 /* xc->vc.lock must be held by caller */
218 static void uniphier_xdmac_start(struct uniphier_xdmac_chan *xc)
219 {
220 	struct uniphier_xdmac_desc *xd;
221 
222 	xd = uniphier_xdmac_next_desc(xc);
223 	if (xd)
224 		uniphier_xdmac_chan_start(xc, xd);
225 
226 	/* set desc to chan regardless of xd is null */
227 	xc->xd = xd;
228 }
229 
230 static void uniphier_xdmac_chan_irq(struct uniphier_xdmac_chan *xc)
231 {
232 	u32 stat;
233 	int ret;
234 
235 	spin_lock(&xc->vc.lock);
236 
237 	stat = readl(xc->reg_ch_base + XDMAC_ID);
238 
239 	if (stat & XDMAC_ID_ERRIDF) {
240 		ret = uniphier_xdmac_chan_stop(xc);
241 		if (ret)
242 			dev_err(xc->xdev->ddev.dev,
243 				"DMA transfer error with aborting issue\n");
244 		else
245 			dev_err(xc->xdev->ddev.dev,
246 				"DMA transfer error\n");
247 
248 	} else if ((stat & XDMAC_ID_ENDIDF) && xc->xd) {
249 		xc->xd->cur_node++;
250 		if (xc->xd->cur_node >= xc->xd->nr_node) {
251 			vchan_cookie_complete(&xc->xd->vd);
252 			uniphier_xdmac_start(xc);
253 		} else {
254 			uniphier_xdmac_chan_start(xc, xc->xd);
255 		}
256 	}
257 
258 	/* write bits to clear */
259 	writel(stat, xc->reg_ch_base + XDMAC_IR);
260 
261 	spin_unlock(&xc->vc.lock);
262 }
263 
264 static irqreturn_t uniphier_xdmac_irq_handler(int irq, void *dev_id)
265 {
266 	struct uniphier_xdmac_device *xdev = dev_id;
267 	int i;
268 
269 	for (i = 0; i < xdev->nr_chans; i++)
270 		uniphier_xdmac_chan_irq(&xdev->channels[i]);
271 
272 	return IRQ_HANDLED;
273 }
274 
275 static void uniphier_xdmac_free_chan_resources(struct dma_chan *chan)
276 {
277 	vchan_free_chan_resources(to_virt_chan(chan));
278 }
279 
280 static struct dma_async_tx_descriptor *
281 uniphier_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
282 			       dma_addr_t src, size_t len, unsigned long flags)
283 {
284 	struct virt_dma_chan *vc = to_virt_chan(chan);
285 	struct uniphier_xdmac_desc *xd;
286 	unsigned int nr;
287 	size_t burst_size, tlen;
288 	int i;
289 
290 	if (len > XDMAC_MAX_WORD_SIZE * XDMAC_MAX_WORDS)
291 		return NULL;
292 
293 	nr = 1 + len / XDMAC_MAX_WORD_SIZE;
294 
295 	xd = kzalloc(struct_size(xd, nodes, nr), GFP_NOWAIT);
296 	if (!xd)
297 		return NULL;
298 
299 	for (i = 0; i < nr; i++) {
300 		burst_size = min_t(size_t, len, XDMAC_MAX_WORD_SIZE);
301 		xd->nodes[i].src = src;
302 		xd->nodes[i].dst = dst;
303 		xd->nodes[i].burst_size = burst_size;
304 		xd->nodes[i].nr_burst = len / burst_size;
305 		tlen = rounddown(len, burst_size);
306 		src += tlen;
307 		dst += tlen;
308 		len -= tlen;
309 	}
310 
311 	xd->dir = DMA_MEM_TO_MEM;
312 	xd->nr_node = nr;
313 	xd->cur_node = 0;
314 
315 	return vchan_tx_prep(vc, &xd->vd, flags);
316 }
317 
318 static struct dma_async_tx_descriptor *
319 uniphier_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
320 			     unsigned int sg_len,
321 			     enum dma_transfer_direction direction,
322 			     unsigned long flags, void *context)
323 {
324 	struct virt_dma_chan *vc = to_virt_chan(chan);
325 	struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
326 	struct uniphier_xdmac_desc *xd;
327 	struct scatterlist *sg;
328 	enum dma_slave_buswidth buswidth;
329 	u32 maxburst;
330 	int i;
331 
332 	if (!is_slave_direction(direction))
333 		return NULL;
334 
335 	if (direction == DMA_DEV_TO_MEM) {
336 		buswidth = xc->sconfig.src_addr_width;
337 		maxburst = xc->sconfig.src_maxburst;
338 	} else {
339 		buswidth = xc->sconfig.dst_addr_width;
340 		maxburst = xc->sconfig.dst_maxburst;
341 	}
342 
343 	if (!maxburst)
344 		maxburst = 1;
345 	if (maxburst > xc->xdev->ddev.max_burst) {
346 		dev_err(xc->xdev->ddev.dev,
347 			"Exceed maximum number of burst words\n");
348 		return NULL;
349 	}
350 
351 	xd = kzalloc(struct_size(xd, nodes, sg_len), GFP_NOWAIT);
352 	if (!xd)
353 		return NULL;
354 
355 	for_each_sg(sgl, sg, sg_len, i) {
356 		xd->nodes[i].src = (direction == DMA_DEV_TO_MEM)
357 			? xc->sconfig.src_addr : sg_dma_address(sg);
358 		xd->nodes[i].dst = (direction == DMA_MEM_TO_DEV)
359 			? xc->sconfig.dst_addr : sg_dma_address(sg);
360 		xd->nodes[i].burst_size = maxburst * buswidth;
361 		xd->nodes[i].nr_burst =
362 			sg_dma_len(sg) / xd->nodes[i].burst_size;
363 
364 		/*
365 		 * Currently transfer that size doesn't align the unit size
366 		 * (the number of burst words * bus-width) is not allowed,
367 		 * because the driver does not support the way to transfer
368 		 * residue size. As a matter of fact, in order to transfer
369 		 * arbitrary size, 'src_maxburst' or 'dst_maxburst' of
370 		 * dma_slave_config must be 1.
371 		 */
372 		if (sg_dma_len(sg) % xd->nodes[i].burst_size) {
373 			dev_err(xc->xdev->ddev.dev,
374 				"Unaligned transfer size: %d", sg_dma_len(sg));
375 			kfree(xd);
376 			return NULL;
377 		}
378 
379 		if (xd->nodes[i].nr_burst > XDMAC_MAX_WORDS) {
380 			dev_err(xc->xdev->ddev.dev,
381 				"Exceed maximum transfer size");
382 			kfree(xd);
383 			return NULL;
384 		}
385 	}
386 
387 	xd->dir = direction;
388 	xd->nr_node = sg_len;
389 	xd->cur_node = 0;
390 
391 	return vchan_tx_prep(vc, &xd->vd, flags);
392 }
393 
394 static int uniphier_xdmac_slave_config(struct dma_chan *chan,
395 				       struct dma_slave_config *config)
396 {
397 	struct virt_dma_chan *vc = to_virt_chan(chan);
398 	struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
399 
400 	memcpy(&xc->sconfig, config, sizeof(*config));
401 
402 	return 0;
403 }
404 
405 static int uniphier_xdmac_terminate_all(struct dma_chan *chan)
406 {
407 	struct virt_dma_chan *vc = to_virt_chan(chan);
408 	struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
409 	unsigned long flags;
410 	int ret = 0;
411 	LIST_HEAD(head);
412 
413 	spin_lock_irqsave(&vc->lock, flags);
414 
415 	if (xc->xd) {
416 		vchan_terminate_vdesc(&xc->xd->vd);
417 		xc->xd = NULL;
418 		ret = uniphier_xdmac_chan_stop(xc);
419 	}
420 
421 	vchan_get_all_descriptors(vc, &head);
422 
423 	spin_unlock_irqrestore(&vc->lock, flags);
424 
425 	vchan_dma_desc_free_list(vc, &head);
426 
427 	return ret;
428 }
429 
430 static void uniphier_xdmac_synchronize(struct dma_chan *chan)
431 {
432 	vchan_synchronize(to_virt_chan(chan));
433 }
434 
435 static void uniphier_xdmac_issue_pending(struct dma_chan *chan)
436 {
437 	struct virt_dma_chan *vc = to_virt_chan(chan);
438 	struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
439 	unsigned long flags;
440 
441 	spin_lock_irqsave(&vc->lock, flags);
442 
443 	if (vchan_issue_pending(vc) && !xc->xd)
444 		uniphier_xdmac_start(xc);
445 
446 	spin_unlock_irqrestore(&vc->lock, flags);
447 }
448 
449 static void uniphier_xdmac_desc_free(struct virt_dma_desc *vd)
450 {
451 	kfree(to_uniphier_xdmac_desc(vd));
452 }
453 
454 static void uniphier_xdmac_chan_init(struct uniphier_xdmac_device *xdev,
455 				     int ch)
456 {
457 	struct uniphier_xdmac_chan *xc = &xdev->channels[ch];
458 
459 	xc->xdev = xdev;
460 	xc->reg_ch_base = xdev->reg_base + XDMAC_CH_WIDTH * ch;
461 	xc->vc.desc_free = uniphier_xdmac_desc_free;
462 
463 	vchan_init(&xc->vc, &xdev->ddev);
464 }
465 
466 static struct dma_chan *of_dma_uniphier_xlate(struct of_phandle_args *dma_spec,
467 					      struct of_dma *ofdma)
468 {
469 	struct uniphier_xdmac_device *xdev = ofdma->of_dma_data;
470 	int chan_id = dma_spec->args[0];
471 
472 	if (chan_id >= xdev->nr_chans)
473 		return NULL;
474 
475 	xdev->channels[chan_id].id = chan_id;
476 	xdev->channels[chan_id].req_factor = dma_spec->args[1];
477 
478 	return dma_get_slave_channel(&xdev->channels[chan_id].vc.chan);
479 }
480 
481 static int uniphier_xdmac_probe(struct platform_device *pdev)
482 {
483 	struct uniphier_xdmac_device *xdev;
484 	struct device *dev = &pdev->dev;
485 	struct dma_device *ddev;
486 	int irq;
487 	int nr_chans;
488 	int i, ret;
489 
490 	if (of_property_read_u32(dev->of_node, "dma-channels", &nr_chans))
491 		return -EINVAL;
492 	if (nr_chans > XDMAC_MAX_CHANS)
493 		nr_chans = XDMAC_MAX_CHANS;
494 
495 	xdev = devm_kzalloc(dev, struct_size(xdev, channels, nr_chans),
496 			    GFP_KERNEL);
497 	if (!xdev)
498 		return -ENOMEM;
499 
500 	xdev->nr_chans = nr_chans;
501 	xdev->reg_base = devm_platform_ioremap_resource(pdev, 0);
502 	if (IS_ERR(xdev->reg_base))
503 		return PTR_ERR(xdev->reg_base);
504 
505 	ddev = &xdev->ddev;
506 	ddev->dev = dev;
507 	dma_cap_zero(ddev->cap_mask);
508 	dma_cap_set(DMA_MEMCPY, ddev->cap_mask);
509 	dma_cap_set(DMA_SLAVE, ddev->cap_mask);
510 	ddev->src_addr_widths = UNIPHIER_XDMAC_BUSWIDTHS;
511 	ddev->dst_addr_widths = UNIPHIER_XDMAC_BUSWIDTHS;
512 	ddev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
513 			   BIT(DMA_MEM_TO_MEM);
514 	ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
515 	ddev->max_burst = XDMAC_MAX_WORDS;
516 	ddev->device_free_chan_resources = uniphier_xdmac_free_chan_resources;
517 	ddev->device_prep_dma_memcpy = uniphier_xdmac_prep_dma_memcpy;
518 	ddev->device_prep_slave_sg = uniphier_xdmac_prep_slave_sg;
519 	ddev->device_config = uniphier_xdmac_slave_config;
520 	ddev->device_terminate_all = uniphier_xdmac_terminate_all;
521 	ddev->device_synchronize = uniphier_xdmac_synchronize;
522 	ddev->device_tx_status = dma_cookie_status;
523 	ddev->device_issue_pending = uniphier_xdmac_issue_pending;
524 	INIT_LIST_HEAD(&ddev->channels);
525 
526 	for (i = 0; i < nr_chans; i++)
527 		uniphier_xdmac_chan_init(xdev, i);
528 
529 	irq = platform_get_irq(pdev, 0);
530 	if (irq < 0)
531 		return irq;
532 
533 	ret = devm_request_irq(dev, irq, uniphier_xdmac_irq_handler,
534 			       IRQF_SHARED, "xdmac", xdev);
535 	if (ret) {
536 		dev_err(dev, "Failed to request IRQ\n");
537 		return ret;
538 	}
539 
540 	ret = dma_async_device_register(ddev);
541 	if (ret) {
542 		dev_err(dev, "Failed to register XDMA device\n");
543 		return ret;
544 	}
545 
546 	ret = of_dma_controller_register(dev->of_node,
547 					 of_dma_uniphier_xlate, xdev);
548 	if (ret) {
549 		dev_err(dev, "Failed to register XDMA controller\n");
550 		goto out_unregister_dmac;
551 	}
552 
553 	platform_set_drvdata(pdev, xdev);
554 
555 	dev_info(&pdev->dev, "UniPhier XDMAC driver (%d channels)\n",
556 		 nr_chans);
557 
558 	return 0;
559 
560 out_unregister_dmac:
561 	dma_async_device_unregister(ddev);
562 
563 	return ret;
564 }
565 
566 static int uniphier_xdmac_remove(struct platform_device *pdev)
567 {
568 	struct uniphier_xdmac_device *xdev = platform_get_drvdata(pdev);
569 	struct dma_device *ddev = &xdev->ddev;
570 	struct dma_chan *chan;
571 	int ret;
572 
573 	/*
574 	 * Before reaching here, almost all descriptors have been freed by the
575 	 * ->device_free_chan_resources() hook. However, each channel might
576 	 * be still holding one descriptor that was on-flight at that moment.
577 	 * Terminate it to make sure this hardware is no longer running. Then,
578 	 * free the channel resources once again to avoid memory leak.
579 	 */
580 	list_for_each_entry(chan, &ddev->channels, device_node) {
581 		ret = dmaengine_terminate_sync(chan);
582 		if (ret)
583 			return ret;
584 		uniphier_xdmac_free_chan_resources(chan);
585 	}
586 
587 	of_dma_controller_free(pdev->dev.of_node);
588 	dma_async_device_unregister(ddev);
589 
590 	return 0;
591 }
592 
593 static const struct of_device_id uniphier_xdmac_match[] = {
594 	{ .compatible = "socionext,uniphier-xdmac" },
595 	{ /* sentinel */ }
596 };
597 MODULE_DEVICE_TABLE(of, uniphier_xdmac_match);
598 
599 static struct platform_driver uniphier_xdmac_driver = {
600 	.probe = uniphier_xdmac_probe,
601 	.remove = uniphier_xdmac_remove,
602 	.driver = {
603 		.name = "uniphier-xdmac",
604 		.of_match_table = uniphier_xdmac_match,
605 	},
606 };
607 module_platform_driver(uniphier_xdmac_driver);
608 
609 MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
610 MODULE_DESCRIPTION("UniPhier external DMA controller driver");
611 MODULE_LICENSE("GPL v2");
612