xref: /linux/drivers/dma/xilinx/xilinx_dpdma.c (revision 02680c23d7b3febe45ea3d4f9818c2b2dc89020a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Xilinx ZynqMP DPDMA Engine driver
4  *
5  * Copyright (C) 2015 - 2020 Xilinx, Inc.
6  *
7  * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
8  */
9 
10 #include <linux/bitfield.h>
11 #include <linux/bits.h>
12 #include <linux/clk.h>
13 #include <linux/debugfs.h>
14 #include <linux/delay.h>
15 #include <linux/dmaengine.h>
16 #include <linux/dmapool.h>
17 #include <linux/interrupt.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/of_dma.h>
21 #include <linux/platform_device.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/wait.h>
26 
27 #include <dt-bindings/dma/xlnx-zynqmp-dpdma.h>
28 
29 #include "../dmaengine.h"
30 #include "../virt-dma.h"
31 
32 /* DPDMA registers */
33 #define XILINX_DPDMA_ERR_CTRL				0x000
34 #define XILINX_DPDMA_ISR				0x004
35 #define XILINX_DPDMA_IMR				0x008
36 #define XILINX_DPDMA_IEN				0x00c
37 #define XILINX_DPDMA_IDS				0x010
38 #define XILINX_DPDMA_INTR_DESC_DONE(n)			BIT((n) + 0)
39 #define XILINX_DPDMA_INTR_DESC_DONE_MASK		GENMASK(5, 0)
40 #define XILINX_DPDMA_INTR_NO_OSTAND(n)			BIT((n) + 6)
41 #define XILINX_DPDMA_INTR_NO_OSTAND_MASK		GENMASK(11, 6)
42 #define XILINX_DPDMA_INTR_AXI_ERR(n)			BIT((n) + 12)
43 #define XILINX_DPDMA_INTR_AXI_ERR_MASK			GENMASK(17, 12)
44 #define XILINX_DPDMA_INTR_DESC_ERR(n)			BIT((n) + 16)
45 #define XILINX_DPDMA_INTR_DESC_ERR_MASK			GENMASK(23, 18)
46 #define XILINX_DPDMA_INTR_WR_CMD_FIFO_FULL		BIT(24)
47 #define XILINX_DPDMA_INTR_WR_DATA_FIFO_FULL		BIT(25)
48 #define XILINX_DPDMA_INTR_AXI_4K_CROSS			BIT(26)
49 #define XILINX_DPDMA_INTR_VSYNC				BIT(27)
50 #define XILINX_DPDMA_INTR_CHAN_ERR_MASK			0x00041000
51 #define XILINX_DPDMA_INTR_CHAN_ERR			0x00fff000
52 #define XILINX_DPDMA_INTR_GLOBAL_ERR			0x07000000
53 #define XILINX_DPDMA_INTR_ERR_ALL			0x07fff000
54 #define XILINX_DPDMA_INTR_CHAN_MASK			0x00041041
55 #define XILINX_DPDMA_INTR_GLOBAL_MASK			0x0f000000
56 #define XILINX_DPDMA_INTR_ALL				0x0fffffff
57 #define XILINX_DPDMA_EISR				0x014
58 #define XILINX_DPDMA_EIMR				0x018
59 #define XILINX_DPDMA_EIEN				0x01c
60 #define XILINX_DPDMA_EIDS				0x020
61 #define XILINX_DPDMA_EINTR_INV_APB			BIT(0)
62 #define XILINX_DPDMA_EINTR_RD_AXI_ERR(n)		BIT((n) + 1)
63 #define XILINX_DPDMA_EINTR_RD_AXI_ERR_MASK		GENMASK(6, 1)
64 #define XILINX_DPDMA_EINTR_PRE_ERR(n)			BIT((n) + 7)
65 #define XILINX_DPDMA_EINTR_PRE_ERR_MASK			GENMASK(12, 7)
66 #define XILINX_DPDMA_EINTR_CRC_ERR(n)			BIT((n) + 13)
67 #define XILINX_DPDMA_EINTR_CRC_ERR_MASK			GENMASK(18, 13)
68 #define XILINX_DPDMA_EINTR_WR_AXI_ERR(n)		BIT((n) + 19)
69 #define XILINX_DPDMA_EINTR_WR_AXI_ERR_MASK		GENMASK(24, 19)
70 #define XILINX_DPDMA_EINTR_DESC_DONE_ERR(n)		BIT((n) + 25)
71 #define XILINX_DPDMA_EINTR_DESC_DONE_ERR_MASK		GENMASK(30, 25)
72 #define XILINX_DPDMA_EINTR_RD_CMD_FIFO_FULL		BIT(32)
73 #define XILINX_DPDMA_EINTR_CHAN_ERR_MASK		0x02082082
74 #define XILINX_DPDMA_EINTR_CHAN_ERR			0x7ffffffe
75 #define XILINX_DPDMA_EINTR_GLOBAL_ERR			0x80000001
76 #define XILINX_DPDMA_EINTR_ALL				0xffffffff
77 #define XILINX_DPDMA_CNTL				0x100
78 #define XILINX_DPDMA_GBL				0x104
79 #define XILINX_DPDMA_GBL_TRIG_MASK(n)			((n) << 0)
80 #define XILINX_DPDMA_GBL_RETRIG_MASK(n)			((n) << 6)
81 #define XILINX_DPDMA_ALC0_CNTL				0x108
82 #define XILINX_DPDMA_ALC0_STATUS			0x10c
83 #define XILINX_DPDMA_ALC0_MAX				0x110
84 #define XILINX_DPDMA_ALC0_MIN				0x114
85 #define XILINX_DPDMA_ALC0_ACC				0x118
86 #define XILINX_DPDMA_ALC0_ACC_TRAN			0x11c
87 #define XILINX_DPDMA_ALC1_CNTL				0x120
88 #define XILINX_DPDMA_ALC1_STATUS			0x124
89 #define XILINX_DPDMA_ALC1_MAX				0x128
90 #define XILINX_DPDMA_ALC1_MIN				0x12c
91 #define XILINX_DPDMA_ALC1_ACC				0x130
92 #define XILINX_DPDMA_ALC1_ACC_TRAN			0x134
93 
94 /* Channel register */
95 #define XILINX_DPDMA_CH_BASE				0x200
96 #define XILINX_DPDMA_CH_OFFSET				0x100
97 #define XILINX_DPDMA_CH_DESC_START_ADDRE		0x000
98 #define XILINX_DPDMA_CH_DESC_START_ADDRE_MASK		GENMASK(15, 0)
99 #define XILINX_DPDMA_CH_DESC_START_ADDR			0x004
100 #define XILINX_DPDMA_CH_DESC_NEXT_ADDRE			0x008
101 #define XILINX_DPDMA_CH_DESC_NEXT_ADDR			0x00c
102 #define XILINX_DPDMA_CH_PYLD_CUR_ADDRE			0x010
103 #define XILINX_DPDMA_CH_PYLD_CUR_ADDR			0x014
104 #define XILINX_DPDMA_CH_CNTL				0x018
105 #define XILINX_DPDMA_CH_CNTL_ENABLE			BIT(0)
106 #define XILINX_DPDMA_CH_CNTL_PAUSE			BIT(1)
107 #define XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_MASK		GENMASK(5, 2)
108 #define XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_MASK		GENMASK(9, 6)
109 #define XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_MASK		GENMASK(13, 10)
110 #define XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS		11
111 #define XILINX_DPDMA_CH_STATUS				0x01c
112 #define XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK		GENMASK(24, 21)
113 #define XILINX_DPDMA_CH_VDO				0x020
114 #define XILINX_DPDMA_CH_PYLD_SZ				0x024
115 #define XILINX_DPDMA_CH_DESC_ID				0x028
116 
117 /* DPDMA descriptor fields */
118 #define XILINX_DPDMA_DESC_CONTROL_PREEMBLE		0xa5
119 #define XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR		BIT(8)
120 #define XILINX_DPDMA_DESC_CONTROL_DESC_UPDATE		BIT(9)
121 #define XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE		BIT(10)
122 #define XILINX_DPDMA_DESC_CONTROL_FRAG_MODE		BIT(18)
123 #define XILINX_DPDMA_DESC_CONTROL_LAST			BIT(19)
124 #define XILINX_DPDMA_DESC_CONTROL_ENABLE_CRC		BIT(20)
125 #define XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME		BIT(21)
126 #define XILINX_DPDMA_DESC_ID_MASK			GENMASK(15, 0)
127 #define XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK	GENMASK(17, 0)
128 #define XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK	GENMASK(31, 18)
129 #define XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK	GENMASK(15, 0)
130 #define XILINX_DPDMA_DESC_ADDR_EXT_SRC_ADDR_MASK	GENMASK(31, 16)
131 
132 #define XILINX_DPDMA_ALIGN_BYTES			256
133 #define XILINX_DPDMA_LINESIZE_ALIGN_BITS		128
134 
135 #define XILINX_DPDMA_NUM_CHAN				6
136 
137 struct xilinx_dpdma_chan;
138 
139 /**
140  * struct xilinx_dpdma_hw_desc - DPDMA hardware descriptor
141  * @control: control configuration field
142  * @desc_id: descriptor ID
143  * @xfer_size: transfer size
144  * @hsize_stride: horizontal size and stride
145  * @timestamp_lsb: LSB of time stamp
146  * @timestamp_msb: MSB of time stamp
147  * @addr_ext: upper 16 bit of 48 bit address (next_desc and src_addr)
148  * @next_desc: next descriptor 32 bit address
149  * @src_addr: payload source address (1st page, 32 LSB)
150  * @addr_ext_23: payload source address (3nd and 3rd pages, 16 LSBs)
151  * @addr_ext_45: payload source address (4th and 5th pages, 16 LSBs)
152  * @src_addr2: payload source address (2nd page, 32 LSB)
153  * @src_addr3: payload source address (3rd page, 32 LSB)
154  * @src_addr4: payload source address (4th page, 32 LSB)
155  * @src_addr5: payload source address (5th page, 32 LSB)
156  * @crc: descriptor CRC
157  */
158 struct xilinx_dpdma_hw_desc {
159 	u32 control;
160 	u32 desc_id;
161 	u32 xfer_size;
162 	u32 hsize_stride;
163 	u32 timestamp_lsb;
164 	u32 timestamp_msb;
165 	u32 addr_ext;
166 	u32 next_desc;
167 	u32 src_addr;
168 	u32 addr_ext_23;
169 	u32 addr_ext_45;
170 	u32 src_addr2;
171 	u32 src_addr3;
172 	u32 src_addr4;
173 	u32 src_addr5;
174 	u32 crc;
175 } __aligned(XILINX_DPDMA_ALIGN_BYTES);
176 
177 /**
178  * struct xilinx_dpdma_sw_desc - DPDMA software descriptor
179  * @hw: DPDMA hardware descriptor
180  * @node: list node for software descriptors
181  * @dma_addr: DMA address of the software descriptor
182  */
183 struct xilinx_dpdma_sw_desc {
184 	struct xilinx_dpdma_hw_desc hw;
185 	struct list_head node;
186 	dma_addr_t dma_addr;
187 };
188 
189 /**
190  * struct xilinx_dpdma_tx_desc - DPDMA transaction descriptor
191  * @vdesc: virtual DMA descriptor
192  * @chan: DMA channel
193  * @descriptors: list of software descriptors
194  * @error: an error has been detected with this descriptor
195  */
196 struct xilinx_dpdma_tx_desc {
197 	struct virt_dma_desc vdesc;
198 	struct xilinx_dpdma_chan *chan;
199 	struct list_head descriptors;
200 	bool error;
201 };
202 
203 #define to_dpdma_tx_desc(_desc) \
204 	container_of(_desc, struct xilinx_dpdma_tx_desc, vdesc)
205 
206 /**
207  * struct xilinx_dpdma_chan - DPDMA channel
208  * @vchan: virtual DMA channel
209  * @reg: register base address
210  * @id: channel ID
211  * @wait_to_stop: queue to wait for outstanding transacitons before stopping
212  * @running: true if the channel is running
213  * @first_frame: flag for the first frame of stream
214  * @video_group: flag if multi-channel operation is needed for video channels
215  * @lock: lock to access struct xilinx_dpdma_chan
216  * @desc_pool: descriptor allocation pool
217  * @err_task: error IRQ bottom half handler
218  * @desc: References to descriptors being processed
219  * @desc.pending: Descriptor schedule to the hardware, pending execution
220  * @desc.active: Descriptor being executed by the hardware
221  * @xdev: DPDMA device
222  */
223 struct xilinx_dpdma_chan {
224 	struct virt_dma_chan vchan;
225 	void __iomem *reg;
226 	unsigned int id;
227 
228 	wait_queue_head_t wait_to_stop;
229 	bool running;
230 	bool first_frame;
231 	bool video_group;
232 
233 	spinlock_t lock; /* lock to access struct xilinx_dpdma_chan */
234 	struct dma_pool *desc_pool;
235 	struct tasklet_struct err_task;
236 
237 	struct {
238 		struct xilinx_dpdma_tx_desc *pending;
239 		struct xilinx_dpdma_tx_desc *active;
240 	} desc;
241 
242 	struct xilinx_dpdma_device *xdev;
243 };
244 
245 #define to_xilinx_chan(_chan) \
246 	container_of(_chan, struct xilinx_dpdma_chan, vchan.chan)
247 
248 /**
249  * struct xilinx_dpdma_device - DPDMA device
250  * @common: generic dma device structure
251  * @reg: register base address
252  * @dev: generic device structure
253  * @irq: the interrupt number
254  * @axi_clk: axi clock
255  * @chan: DPDMA channels
256  * @ext_addr: flag for 64 bit system (48 bit addressing)
257  */
258 struct xilinx_dpdma_device {
259 	struct dma_device common;
260 	void __iomem *reg;
261 	struct device *dev;
262 	int irq;
263 
264 	struct clk *axi_clk;
265 	struct xilinx_dpdma_chan *chan[XILINX_DPDMA_NUM_CHAN];
266 
267 	bool ext_addr;
268 };
269 
270 /* -----------------------------------------------------------------------------
271  * DebugFS
272  */
273 
274 #ifdef CONFIG_DEBUG_FS
275 
276 #define XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE	32
277 #define XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR	"65535"
278 
279 /* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */
280 enum xilinx_dpdma_testcases {
281 	DPDMA_TC_INTR_DONE,
282 	DPDMA_TC_NONE
283 };
284 
285 struct xilinx_dpdma_debugfs {
286 	enum xilinx_dpdma_testcases testcase;
287 	u16 xilinx_dpdma_irq_done_count;
288 	unsigned int chan_id;
289 };
290 
291 static struct xilinx_dpdma_debugfs dpdma_debugfs;
292 struct xilinx_dpdma_debugfs_request {
293 	const char *name;
294 	enum xilinx_dpdma_testcases tc;
295 	ssize_t (*read)(char *buf);
296 	int (*write)(char *args);
297 };
298 
299 static void xilinx_dpdma_debugfs_desc_done_irq(struct xilinx_dpdma_chan *chan)
300 {
301 	if (chan->id == dpdma_debugfs.chan_id)
302 		dpdma_debugfs.xilinx_dpdma_irq_done_count++;
303 }
304 
305 static ssize_t xilinx_dpdma_debugfs_desc_done_irq_read(char *buf)
306 {
307 	size_t out_str_len;
308 
309 	dpdma_debugfs.testcase = DPDMA_TC_NONE;
310 
311 	out_str_len = strlen(XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR);
312 	out_str_len = min_t(size_t, XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE,
313 			    out_str_len);
314 	snprintf(buf, out_str_len, "%d",
315 		 dpdma_debugfs.xilinx_dpdma_irq_done_count);
316 
317 	return 0;
318 }
319 
320 static int xilinx_dpdma_debugfs_desc_done_irq_write(char *args)
321 {
322 	char *arg;
323 	int ret;
324 	u32 id;
325 
326 	arg = strsep(&args, " ");
327 	if (!arg || strncasecmp(arg, "start", 5))
328 		return -EINVAL;
329 
330 	arg = strsep(&args, " ");
331 	if (!arg)
332 		return -EINVAL;
333 
334 	ret = kstrtou32(arg, 0, &id);
335 	if (ret < 0)
336 		return ret;
337 
338 	if (id < ZYNQMP_DPDMA_VIDEO0 || id > ZYNQMP_DPDMA_AUDIO1)
339 		return -EINVAL;
340 
341 	dpdma_debugfs.testcase = DPDMA_TC_INTR_DONE;
342 	dpdma_debugfs.xilinx_dpdma_irq_done_count = 0;
343 	dpdma_debugfs.chan_id = id;
344 
345 	return 0;
346 }
347 
348 /* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */
349 static struct xilinx_dpdma_debugfs_request dpdma_debugfs_reqs[] = {
350 	{
351 		.name = "DESCRIPTOR_DONE_INTR",
352 		.tc = DPDMA_TC_INTR_DONE,
353 		.read = xilinx_dpdma_debugfs_desc_done_irq_read,
354 		.write = xilinx_dpdma_debugfs_desc_done_irq_write,
355 	},
356 };
357 
358 static ssize_t xilinx_dpdma_debugfs_read(struct file *f, char __user *buf,
359 					 size_t size, loff_t *pos)
360 {
361 	enum xilinx_dpdma_testcases testcase;
362 	char *kern_buff;
363 	int ret = 0;
364 
365 	if (*pos != 0 || size <= 0)
366 		return -EINVAL;
367 
368 	kern_buff = kzalloc(XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE, GFP_KERNEL);
369 	if (!kern_buff) {
370 		dpdma_debugfs.testcase = DPDMA_TC_NONE;
371 		return -ENOMEM;
372 	}
373 
374 	testcase = READ_ONCE(dpdma_debugfs.testcase);
375 	if (testcase != DPDMA_TC_NONE) {
376 		ret = dpdma_debugfs_reqs[testcase].read(kern_buff);
377 		if (ret < 0)
378 			goto done;
379 	} else {
380 		strlcpy(kern_buff, "No testcase executed",
381 			XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE);
382 	}
383 
384 	size = min(size, strlen(kern_buff));
385 	if (copy_to_user(buf, kern_buff, size))
386 		ret = -EFAULT;
387 
388 done:
389 	kfree(kern_buff);
390 	if (ret)
391 		return ret;
392 
393 	*pos = size + 1;
394 	return size;
395 }
396 
397 static ssize_t xilinx_dpdma_debugfs_write(struct file *f,
398 					  const char __user *buf, size_t size,
399 					  loff_t *pos)
400 {
401 	char *kern_buff, *kern_buff_start;
402 	char *testcase;
403 	unsigned int i;
404 	int ret;
405 
406 	if (*pos != 0 || size <= 0)
407 		return -EINVAL;
408 
409 	/* Supporting single instance of test as of now. */
410 	if (dpdma_debugfs.testcase != DPDMA_TC_NONE)
411 		return -EBUSY;
412 
413 	kern_buff = kzalloc(size, GFP_KERNEL);
414 	if (!kern_buff)
415 		return -ENOMEM;
416 	kern_buff_start = kern_buff;
417 
418 	ret = strncpy_from_user(kern_buff, buf, size);
419 	if (ret < 0)
420 		goto done;
421 
422 	/* Read the testcase name from a user request. */
423 	testcase = strsep(&kern_buff, " ");
424 
425 	for (i = 0; i < ARRAY_SIZE(dpdma_debugfs_reqs); i++) {
426 		if (!strcasecmp(testcase, dpdma_debugfs_reqs[i].name))
427 			break;
428 	}
429 
430 	if (i == ARRAY_SIZE(dpdma_debugfs_reqs)) {
431 		ret = -EINVAL;
432 		goto done;
433 	}
434 
435 	ret = dpdma_debugfs_reqs[i].write(kern_buff);
436 	if (ret < 0)
437 		goto done;
438 
439 	ret = size;
440 
441 done:
442 	kfree(kern_buff_start);
443 	return ret;
444 }
445 
446 static const struct file_operations fops_xilinx_dpdma_dbgfs = {
447 	.owner = THIS_MODULE,
448 	.read = xilinx_dpdma_debugfs_read,
449 	.write = xilinx_dpdma_debugfs_write,
450 };
451 
452 static void xilinx_dpdma_debugfs_init(struct xilinx_dpdma_device *xdev)
453 {
454 	struct dentry *dent;
455 
456 	dpdma_debugfs.testcase = DPDMA_TC_NONE;
457 
458 	dent = debugfs_create_file("testcase", 0444, xdev->common.dbg_dev_root,
459 				   NULL, &fops_xilinx_dpdma_dbgfs);
460 	if (IS_ERR(dent))
461 		dev_err(xdev->dev, "Failed to create debugfs testcase file\n");
462 }
463 
464 #else
465 static void xilinx_dpdma_debugfs_init(struct xilinx_dpdma_device *xdev)
466 {
467 }
468 
469 static void xilinx_dpdma_debugfs_desc_done_irq(struct xilinx_dpdma_chan *chan)
470 {
471 }
472 #endif /* CONFIG_DEBUG_FS */
473 
474 /* -----------------------------------------------------------------------------
475  * I/O Accessors
476  */
477 
478 static inline u32 dpdma_read(void __iomem *base, u32 offset)
479 {
480 	return ioread32(base + offset);
481 }
482 
483 static inline void dpdma_write(void __iomem *base, u32 offset, u32 val)
484 {
485 	iowrite32(val, base + offset);
486 }
487 
488 static inline void dpdma_clr(void __iomem *base, u32 offset, u32 clr)
489 {
490 	dpdma_write(base, offset, dpdma_read(base, offset) & ~clr);
491 }
492 
493 static inline void dpdma_set(void __iomem *base, u32 offset, u32 set)
494 {
495 	dpdma_write(base, offset, dpdma_read(base, offset) | set);
496 }
497 
498 /* -----------------------------------------------------------------------------
499  * Descriptor Operations
500  */
501 
502 /**
503  * xilinx_dpdma_sw_desc_set_dma_addrs - Set DMA addresses in the descriptor
504  * @xdev: DPDMA device
505  * @sw_desc: The software descriptor in which to set DMA addresses
506  * @prev: The previous descriptor
507  * @dma_addr: array of dma addresses
508  * @num_src_addr: number of addresses in @dma_addr
509  *
510  * Set all the DMA addresses in the hardware descriptor corresponding to @dev
511  * from @dma_addr. If a previous descriptor is specified in @prev, its next
512  * descriptor DMA address is set to the DMA address of @sw_desc. @prev may be
513  * identical to @sw_desc for cyclic transfers.
514  */
515 static void xilinx_dpdma_sw_desc_set_dma_addrs(struct xilinx_dpdma_device *xdev,
516 					       struct xilinx_dpdma_sw_desc *sw_desc,
517 					       struct xilinx_dpdma_sw_desc *prev,
518 					       dma_addr_t dma_addr[],
519 					       unsigned int num_src_addr)
520 {
521 	struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw;
522 	unsigned int i;
523 
524 	hw_desc->src_addr = lower_32_bits(dma_addr[0]);
525 	if (xdev->ext_addr)
526 		hw_desc->addr_ext |=
527 			FIELD_PREP(XILINX_DPDMA_DESC_ADDR_EXT_SRC_ADDR_MASK,
528 				   upper_32_bits(dma_addr[0]));
529 
530 	for (i = 1; i < num_src_addr; i++) {
531 		u32 *addr = &hw_desc->src_addr2;
532 
533 		addr[i-1] = lower_32_bits(dma_addr[i]);
534 
535 		if (xdev->ext_addr) {
536 			u32 *addr_ext = &hw_desc->addr_ext_23;
537 			u32 addr_msb;
538 
539 			addr_msb = upper_32_bits(dma_addr[i]) & GENMASK(15, 0);
540 			addr_msb <<= 16 * ((i - 1) % 2);
541 			addr_ext[(i - 1) / 2] |= addr_msb;
542 		}
543 	}
544 
545 	if (!prev)
546 		return;
547 
548 	prev->hw.next_desc = lower_32_bits(sw_desc->dma_addr);
549 	if (xdev->ext_addr)
550 		prev->hw.addr_ext |=
551 			FIELD_PREP(XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK,
552 				   upper_32_bits(sw_desc->dma_addr));
553 }
554 
555 /**
556  * xilinx_dpdma_chan_alloc_sw_desc - Allocate a software descriptor
557  * @chan: DPDMA channel
558  *
559  * Allocate a software descriptor from the channel's descriptor pool.
560  *
561  * Return: a software descriptor or NULL.
562  */
563 static struct xilinx_dpdma_sw_desc *
564 xilinx_dpdma_chan_alloc_sw_desc(struct xilinx_dpdma_chan *chan)
565 {
566 	struct xilinx_dpdma_sw_desc *sw_desc;
567 	dma_addr_t dma_addr;
568 
569 	sw_desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &dma_addr);
570 	if (!sw_desc)
571 		return NULL;
572 
573 	sw_desc->dma_addr = dma_addr;
574 
575 	return sw_desc;
576 }
577 
578 /**
579  * xilinx_dpdma_chan_free_sw_desc - Free a software descriptor
580  * @chan: DPDMA channel
581  * @sw_desc: software descriptor to free
582  *
583  * Free a software descriptor from the channel's descriptor pool.
584  */
585 static void
586 xilinx_dpdma_chan_free_sw_desc(struct xilinx_dpdma_chan *chan,
587 			       struct xilinx_dpdma_sw_desc *sw_desc)
588 {
589 	dma_pool_free(chan->desc_pool, sw_desc, sw_desc->dma_addr);
590 }
591 
592 /**
593  * xilinx_dpdma_chan_dump_tx_desc - Dump a tx descriptor
594  * @chan: DPDMA channel
595  * @tx_desc: tx descriptor to dump
596  *
597  * Dump contents of a tx descriptor
598  */
599 static void xilinx_dpdma_chan_dump_tx_desc(struct xilinx_dpdma_chan *chan,
600 					   struct xilinx_dpdma_tx_desc *tx_desc)
601 {
602 	struct xilinx_dpdma_sw_desc *sw_desc;
603 	struct device *dev = chan->xdev->dev;
604 	unsigned int i = 0;
605 
606 	dev_dbg(dev, "------- TX descriptor dump start -------\n");
607 	dev_dbg(dev, "------- channel ID = %d -------\n", chan->id);
608 
609 	list_for_each_entry(sw_desc, &tx_desc->descriptors, node) {
610 		struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw;
611 
612 		dev_dbg(dev, "------- HW descriptor %d -------\n", i++);
613 		dev_dbg(dev, "descriptor DMA addr: %pad\n", &sw_desc->dma_addr);
614 		dev_dbg(dev, "control: 0x%08x\n", hw_desc->control);
615 		dev_dbg(dev, "desc_id: 0x%08x\n", hw_desc->desc_id);
616 		dev_dbg(dev, "xfer_size: 0x%08x\n", hw_desc->xfer_size);
617 		dev_dbg(dev, "hsize_stride: 0x%08x\n", hw_desc->hsize_stride);
618 		dev_dbg(dev, "timestamp_lsb: 0x%08x\n", hw_desc->timestamp_lsb);
619 		dev_dbg(dev, "timestamp_msb: 0x%08x\n", hw_desc->timestamp_msb);
620 		dev_dbg(dev, "addr_ext: 0x%08x\n", hw_desc->addr_ext);
621 		dev_dbg(dev, "next_desc: 0x%08x\n", hw_desc->next_desc);
622 		dev_dbg(dev, "src_addr: 0x%08x\n", hw_desc->src_addr);
623 		dev_dbg(dev, "addr_ext_23: 0x%08x\n", hw_desc->addr_ext_23);
624 		dev_dbg(dev, "addr_ext_45: 0x%08x\n", hw_desc->addr_ext_45);
625 		dev_dbg(dev, "src_addr2: 0x%08x\n", hw_desc->src_addr2);
626 		dev_dbg(dev, "src_addr3: 0x%08x\n", hw_desc->src_addr3);
627 		dev_dbg(dev, "src_addr4: 0x%08x\n", hw_desc->src_addr4);
628 		dev_dbg(dev, "src_addr5: 0x%08x\n", hw_desc->src_addr5);
629 		dev_dbg(dev, "crc: 0x%08x\n", hw_desc->crc);
630 	}
631 
632 	dev_dbg(dev, "------- TX descriptor dump end -------\n");
633 }
634 
635 /**
636  * xilinx_dpdma_chan_alloc_tx_desc - Allocate a transaction descriptor
637  * @chan: DPDMA channel
638  *
639  * Allocate a tx descriptor.
640  *
641  * Return: a tx descriptor or NULL.
642  */
643 static struct xilinx_dpdma_tx_desc *
644 xilinx_dpdma_chan_alloc_tx_desc(struct xilinx_dpdma_chan *chan)
645 {
646 	struct xilinx_dpdma_tx_desc *tx_desc;
647 
648 	tx_desc = kzalloc(sizeof(*tx_desc), GFP_NOWAIT);
649 	if (!tx_desc)
650 		return NULL;
651 
652 	INIT_LIST_HEAD(&tx_desc->descriptors);
653 	tx_desc->chan = chan;
654 	tx_desc->error = false;
655 
656 	return tx_desc;
657 }
658 
659 /**
660  * xilinx_dpdma_chan_free_tx_desc - Free a virtual DMA descriptor
661  * @vdesc: virtual DMA descriptor
662  *
663  * Free the virtual DMA descriptor @vdesc including its software descriptors.
664  */
665 static void xilinx_dpdma_chan_free_tx_desc(struct virt_dma_desc *vdesc)
666 {
667 	struct xilinx_dpdma_sw_desc *sw_desc, *next;
668 	struct xilinx_dpdma_tx_desc *desc;
669 
670 	if (!vdesc)
671 		return;
672 
673 	desc = to_dpdma_tx_desc(vdesc);
674 
675 	list_for_each_entry_safe(sw_desc, next, &desc->descriptors, node) {
676 		list_del(&sw_desc->node);
677 		xilinx_dpdma_chan_free_sw_desc(desc->chan, sw_desc);
678 	}
679 
680 	kfree(desc);
681 }
682 
683 /**
684  * xilinx_dpdma_chan_prep_interleaved_dma - Prepare an interleaved dma
685  *					    descriptor
686  * @chan: DPDMA channel
687  * @xt: dma interleaved template
688  *
689  * Prepare a tx descriptor including internal software/hardware descriptors
690  * based on @xt.
691  *
692  * Return: A DPDMA TX descriptor on success, or NULL.
693  */
694 static struct xilinx_dpdma_tx_desc *
695 xilinx_dpdma_chan_prep_interleaved_dma(struct xilinx_dpdma_chan *chan,
696 				       struct dma_interleaved_template *xt)
697 {
698 	struct xilinx_dpdma_tx_desc *tx_desc;
699 	struct xilinx_dpdma_sw_desc *sw_desc;
700 	struct xilinx_dpdma_hw_desc *hw_desc;
701 	size_t hsize = xt->sgl[0].size;
702 	size_t stride = hsize + xt->sgl[0].icg;
703 
704 	if (!IS_ALIGNED(xt->src_start, XILINX_DPDMA_ALIGN_BYTES)) {
705 		dev_err(chan->xdev->dev, "buffer should be aligned at %d B\n",
706 			XILINX_DPDMA_ALIGN_BYTES);
707 		return NULL;
708 	}
709 
710 	tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
711 	if (!tx_desc)
712 		return NULL;
713 
714 	sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
715 	if (!sw_desc) {
716 		xilinx_dpdma_chan_free_tx_desc(&tx_desc->vdesc);
717 		return NULL;
718 	}
719 
720 	xilinx_dpdma_sw_desc_set_dma_addrs(chan->xdev, sw_desc, sw_desc,
721 					   &xt->src_start, 1);
722 
723 	hw_desc = &sw_desc->hw;
724 	hsize = ALIGN(hsize, XILINX_DPDMA_LINESIZE_ALIGN_BITS / 8);
725 	hw_desc->xfer_size = hsize * xt->numf;
726 	hw_desc->hsize_stride =
727 		FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK, hsize) |
728 		FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK,
729 			   stride / 16);
730 	hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE;
731 	hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;
732 	hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE;
733 	hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
734 
735 	list_add_tail(&sw_desc->node, &tx_desc->descriptors);
736 
737 	return tx_desc;
738 }
739 
740 /* -----------------------------------------------------------------------------
741  * DPDMA Channel Operations
742  */
743 
744 /**
745  * xilinx_dpdma_chan_enable - Enable the channel
746  * @chan: DPDMA channel
747  *
748  * Enable the channel and its interrupts. Set the QoS values for video class.
749  */
750 static void xilinx_dpdma_chan_enable(struct xilinx_dpdma_chan *chan)
751 {
752 	u32 reg;
753 
754 	reg = (XILINX_DPDMA_INTR_CHAN_MASK << chan->id)
755 	    | XILINX_DPDMA_INTR_GLOBAL_MASK;
756 	dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, reg);
757 	reg = (XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id)
758 	    | XILINX_DPDMA_INTR_GLOBAL_ERR;
759 	dpdma_write(chan->xdev->reg, XILINX_DPDMA_EIEN, reg);
760 
761 	reg = XILINX_DPDMA_CH_CNTL_ENABLE
762 	    | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_MASK,
763 			 XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS)
764 	    | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_MASK,
765 			 XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS)
766 	    | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_MASK,
767 			 XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS);
768 	dpdma_set(chan->reg, XILINX_DPDMA_CH_CNTL, reg);
769 }
770 
771 /**
772  * xilinx_dpdma_chan_disable - Disable the channel
773  * @chan: DPDMA channel
774  *
775  * Disable the channel and its interrupts.
776  */
777 static void xilinx_dpdma_chan_disable(struct xilinx_dpdma_chan *chan)
778 {
779 	u32 reg;
780 
781 	reg = XILINX_DPDMA_INTR_CHAN_MASK << chan->id;
782 	dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, reg);
783 	reg = XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id;
784 	dpdma_write(chan->xdev->reg, XILINX_DPDMA_EIEN, reg);
785 
786 	dpdma_clr(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE);
787 }
788 
789 /**
790  * xilinx_dpdma_chan_pause - Pause the channel
791  * @chan: DPDMA channel
792  *
793  * Pause the channel.
794  */
795 static void xilinx_dpdma_chan_pause(struct xilinx_dpdma_chan *chan)
796 {
797 	dpdma_set(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE);
798 }
799 
800 /**
801  * xilinx_dpdma_chan_unpause - Unpause the channel
802  * @chan: DPDMA channel
803  *
804  * Unpause the channel.
805  */
806 static void xilinx_dpdma_chan_unpause(struct xilinx_dpdma_chan *chan)
807 {
808 	dpdma_clr(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE);
809 }
810 
811 static u32 xilinx_dpdma_chan_video_group_ready(struct xilinx_dpdma_chan *chan)
812 {
813 	struct xilinx_dpdma_device *xdev = chan->xdev;
814 	u32 channels = 0;
815 	unsigned int i;
816 
817 	for (i = ZYNQMP_DPDMA_VIDEO0; i <= ZYNQMP_DPDMA_VIDEO2; i++) {
818 		if (xdev->chan[i]->video_group && !xdev->chan[i]->running)
819 			return 0;
820 
821 		if (xdev->chan[i]->video_group)
822 			channels |= BIT(i);
823 	}
824 
825 	return channels;
826 }
827 
828 /**
829  * xilinx_dpdma_chan_queue_transfer - Queue the next transfer
830  * @chan: DPDMA channel
831  *
832  * Queue the next descriptor, if any, to the hardware. If the channel is
833  * stopped, start it first. Otherwise retrigger it with the next descriptor.
834  */
835 static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
836 {
837 	struct xilinx_dpdma_device *xdev = chan->xdev;
838 	struct xilinx_dpdma_sw_desc *sw_desc;
839 	struct xilinx_dpdma_tx_desc *desc;
840 	struct virt_dma_desc *vdesc;
841 	u32 reg, channels;
842 	bool first_frame;
843 
844 	lockdep_assert_held(&chan->lock);
845 
846 	if (chan->desc.pending)
847 		return;
848 
849 	if (!chan->running) {
850 		xilinx_dpdma_chan_unpause(chan);
851 		xilinx_dpdma_chan_enable(chan);
852 		chan->first_frame = true;
853 		chan->running = true;
854 	}
855 
856 	vdesc = vchan_next_desc(&chan->vchan);
857 	if (!vdesc)
858 		return;
859 
860 	desc = to_dpdma_tx_desc(vdesc);
861 	chan->desc.pending = desc;
862 	list_del(&desc->vdesc.node);
863 
864 	/*
865 	 * Assign the cookie to descriptors in this transaction. Only 16 bit
866 	 * will be used, but it should be enough.
867 	 */
868 	list_for_each_entry(sw_desc, &desc->descriptors, node)
869 		sw_desc->hw.desc_id = desc->vdesc.tx.cookie;
870 
871 	sw_desc = list_first_entry(&desc->descriptors,
872 				   struct xilinx_dpdma_sw_desc, node);
873 	dpdma_write(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR,
874 		    lower_32_bits(sw_desc->dma_addr));
875 	if (xdev->ext_addr)
876 		dpdma_write(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE,
877 			    FIELD_PREP(XILINX_DPDMA_CH_DESC_START_ADDRE_MASK,
878 				       upper_32_bits(sw_desc->dma_addr)));
879 
880 	first_frame = chan->first_frame;
881 	chan->first_frame = false;
882 
883 	if (chan->video_group) {
884 		channels = xilinx_dpdma_chan_video_group_ready(chan);
885 		/*
886 		 * Trigger the transfer only when all channels in the group are
887 		 * ready.
888 		 */
889 		if (!channels)
890 			return;
891 	} else {
892 		channels = BIT(chan->id);
893 	}
894 
895 	if (first_frame)
896 		reg = XILINX_DPDMA_GBL_TRIG_MASK(channels);
897 	else
898 		reg = XILINX_DPDMA_GBL_RETRIG_MASK(channels);
899 
900 	dpdma_write(xdev->reg, XILINX_DPDMA_GBL, reg);
901 }
902 
903 /**
904  * xilinx_dpdma_chan_ostand - Number of outstanding transactions
905  * @chan: DPDMA channel
906  *
907  * Read and return the number of outstanding transactions from register.
908  *
909  * Return: Number of outstanding transactions from the status register.
910  */
911 static u32 xilinx_dpdma_chan_ostand(struct xilinx_dpdma_chan *chan)
912 {
913 	return FIELD_GET(XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK,
914 			 dpdma_read(chan->reg, XILINX_DPDMA_CH_STATUS));
915 }
916 
917 /**
918  * xilinx_dpdma_chan_no_ostand - Notify no outstanding transaction event
919  * @chan: DPDMA channel
920  *
921  * Notify waiters for no outstanding event, so waiters can stop the channel
922  * safely. This function is supposed to be called when 'no outstanding'
923  * interrupt is generated. The 'no outstanding' interrupt is disabled and
924  * should be re-enabled when this event is handled. If the channel status
925  * register still shows some number of outstanding transactions, the interrupt
926  * remains enabled.
927  *
928  * Return: 0 on success. On failure, -EWOULDBLOCK if there's still outstanding
929  * transaction(s).
930  */
931 static int xilinx_dpdma_chan_notify_no_ostand(struct xilinx_dpdma_chan *chan)
932 {
933 	u32 cnt;
934 
935 	cnt = xilinx_dpdma_chan_ostand(chan);
936 	if (cnt) {
937 		dev_dbg(chan->xdev->dev, "%d outstanding transactions\n", cnt);
938 		return -EWOULDBLOCK;
939 	}
940 
941 	/* Disable 'no outstanding' interrupt */
942 	dpdma_write(chan->xdev->reg, XILINX_DPDMA_IDS,
943 		    XILINX_DPDMA_INTR_NO_OSTAND(chan->id));
944 	wake_up(&chan->wait_to_stop);
945 
946 	return 0;
947 }
948 
949 /**
950  * xilinx_dpdma_chan_wait_no_ostand - Wait for the no outstanding irq
951  * @chan: DPDMA channel
952  *
953  * Wait for the no outstanding transaction interrupt. This functions can sleep
954  * for 50ms.
955  *
956  * Return: 0 on success. On failure, -ETIMEOUT for time out, or the error code
957  * from wait_event_interruptible_timeout().
958  */
959 static int xilinx_dpdma_chan_wait_no_ostand(struct xilinx_dpdma_chan *chan)
960 {
961 	int ret;
962 
963 	/* Wait for a no outstanding transaction interrupt upto 50msec */
964 	ret = wait_event_interruptible_timeout(chan->wait_to_stop,
965 					       !xilinx_dpdma_chan_ostand(chan),
966 					       msecs_to_jiffies(50));
967 	if (ret > 0) {
968 		dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN,
969 			    XILINX_DPDMA_INTR_NO_OSTAND(chan->id));
970 		return 0;
971 	}
972 
973 	dev_err(chan->xdev->dev, "not ready to stop: %d trans\n",
974 		xilinx_dpdma_chan_ostand(chan));
975 
976 	if (ret == 0)
977 		return -ETIMEDOUT;
978 
979 	return ret;
980 }
981 
982 /**
983  * xilinx_dpdma_chan_poll_no_ostand - Poll the outstanding transaction status
984  * @chan: DPDMA channel
985  *
986  * Poll the outstanding transaction status, and return when there's no
987  * outstanding transaction. This functions can be used in the interrupt context
988  * or where the atomicity is required. Calling thread may wait more than 50ms.
989  *
990  * Return: 0 on success, or -ETIMEDOUT.
991  */
992 static int xilinx_dpdma_chan_poll_no_ostand(struct xilinx_dpdma_chan *chan)
993 {
994 	u32 cnt, loop = 50000;
995 
996 	/* Poll at least for 50ms (20 fps). */
997 	do {
998 		cnt = xilinx_dpdma_chan_ostand(chan);
999 		udelay(1);
1000 	} while (loop-- > 0 && cnt);
1001 
1002 	if (loop) {
1003 		dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN,
1004 			    XILINX_DPDMA_INTR_NO_OSTAND(chan->id));
1005 		return 0;
1006 	}
1007 
1008 	dev_err(chan->xdev->dev, "not ready to stop: %d trans\n",
1009 		xilinx_dpdma_chan_ostand(chan));
1010 
1011 	return -ETIMEDOUT;
1012 }
1013 
1014 /**
1015  * xilinx_dpdma_chan_stop - Stop the channel
1016  * @chan: DPDMA channel
1017  *
1018  * Stop a previously paused channel by first waiting for completion of all
1019  * outstanding transaction and then disabling the channel.
1020  *
1021  * Return: 0 on success, or -ETIMEDOUT if the channel failed to stop.
1022  */
1023 static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan)
1024 {
1025 	unsigned long flags;
1026 	int ret;
1027 
1028 	ret = xilinx_dpdma_chan_wait_no_ostand(chan);
1029 	if (ret)
1030 		return ret;
1031 
1032 	spin_lock_irqsave(&chan->lock, flags);
1033 	xilinx_dpdma_chan_disable(chan);
1034 	chan->running = false;
1035 	spin_unlock_irqrestore(&chan->lock, flags);
1036 
1037 	return 0;
1038 }
1039 
1040 /**
1041  * xilinx_dpdma_chan_done_irq - Handle hardware descriptor completion
1042  * @chan: DPDMA channel
1043  *
1044  * Handle completion of the currently active descriptor (@chan->desc.active). As
1045  * we currently support cyclic transfers only, this just invokes the cyclic
1046  * callback. The descriptor will be completed at the VSYNC interrupt when a new
1047  * descriptor replaces it.
1048  */
1049 static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan)
1050 {
1051 	struct xilinx_dpdma_tx_desc *active;
1052 	unsigned long flags;
1053 
1054 	spin_lock_irqsave(&chan->lock, flags);
1055 
1056 	xilinx_dpdma_debugfs_desc_done_irq(chan);
1057 
1058 	active = chan->desc.active;
1059 	if (active)
1060 		vchan_cyclic_callback(&active->vdesc);
1061 	else
1062 		dev_warn(chan->xdev->dev,
1063 			 "DONE IRQ with no active descriptor!\n");
1064 
1065 	spin_unlock_irqrestore(&chan->lock, flags);
1066 }
1067 
1068 /**
1069  * xilinx_dpdma_chan_vsync_irq - Handle hardware descriptor scheduling
1070  * @chan: DPDMA channel
1071  *
1072  * At VSYNC the active descriptor may have been replaced by the pending
1073  * descriptor. Detect this through the DESC_ID and perform appropriate
1074  * bookkeeping.
1075  */
1076 static void xilinx_dpdma_chan_vsync_irq(struct  xilinx_dpdma_chan *chan)
1077 {
1078 	struct xilinx_dpdma_tx_desc *pending;
1079 	struct xilinx_dpdma_sw_desc *sw_desc;
1080 	unsigned long flags;
1081 	u32 desc_id;
1082 
1083 	spin_lock_irqsave(&chan->lock, flags);
1084 
1085 	pending = chan->desc.pending;
1086 	if (!chan->running || !pending)
1087 		goto out;
1088 
1089 	desc_id = dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_ID);
1090 
1091 	/* If the retrigger raced with vsync, retry at the next frame. */
1092 	sw_desc = list_first_entry(&pending->descriptors,
1093 				   struct xilinx_dpdma_sw_desc, node);
1094 	if (sw_desc->hw.desc_id != desc_id)
1095 		goto out;
1096 
1097 	/*
1098 	 * Complete the active descriptor, if any, promote the pending
1099 	 * descriptor to active, and queue the next transfer, if any.
1100 	 */
1101 	if (chan->desc.active)
1102 		vchan_cookie_complete(&chan->desc.active->vdesc);
1103 	chan->desc.active = pending;
1104 	chan->desc.pending = NULL;
1105 
1106 	xilinx_dpdma_chan_queue_transfer(chan);
1107 
1108 out:
1109 	spin_unlock_irqrestore(&chan->lock, flags);
1110 }
1111 
1112 /**
1113  * xilinx_dpdma_chan_err - Detect any channel error
1114  * @chan: DPDMA channel
1115  * @isr: masked Interrupt Status Register
1116  * @eisr: Error Interrupt Status Register
1117  *
1118  * Return: true if any channel error occurs, or false otherwise.
1119  */
1120 static bool
1121 xilinx_dpdma_chan_err(struct xilinx_dpdma_chan *chan, u32 isr, u32 eisr)
1122 {
1123 	if (!chan)
1124 		return false;
1125 
1126 	if (chan->running &&
1127 	    ((isr & (XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id)) ||
1128 	    (eisr & (XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id))))
1129 		return true;
1130 
1131 	return false;
1132 }
1133 
1134 /**
1135  * xilinx_dpdma_chan_handle_err - DPDMA channel error handling
1136  * @chan: DPDMA channel
1137  *
1138  * This function is called when any channel error or any global error occurs.
1139  * The function disables the paused channel by errors and determines
1140  * if the current active descriptor can be rescheduled depending on
1141  * the descriptor status.
1142  */
1143 static void xilinx_dpdma_chan_handle_err(struct xilinx_dpdma_chan *chan)
1144 {
1145 	struct xilinx_dpdma_device *xdev = chan->xdev;
1146 	struct xilinx_dpdma_tx_desc *active;
1147 	unsigned long flags;
1148 
1149 	spin_lock_irqsave(&chan->lock, flags);
1150 
1151 	dev_dbg(xdev->dev, "cur desc addr = 0x%04x%08x\n",
1152 		dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE),
1153 		dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR));
1154 	dev_dbg(xdev->dev, "cur payload addr = 0x%04x%08x\n",
1155 		dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDRE),
1156 		dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDR));
1157 
1158 	xilinx_dpdma_chan_disable(chan);
1159 	chan->running = false;
1160 
1161 	if (!chan->desc.active)
1162 		goto out_unlock;
1163 
1164 	active = chan->desc.active;
1165 	chan->desc.active = NULL;
1166 
1167 	xilinx_dpdma_chan_dump_tx_desc(chan, active);
1168 
1169 	if (active->error)
1170 		dev_dbg(xdev->dev, "repeated error on desc\n");
1171 
1172 	/* Reschedule if there's no new descriptor */
1173 	if (!chan->desc.pending &&
1174 	    list_empty(&chan->vchan.desc_issued)) {
1175 		active->error = true;
1176 		list_add_tail(&active->vdesc.node,
1177 			      &chan->vchan.desc_issued);
1178 	} else {
1179 		xilinx_dpdma_chan_free_tx_desc(&active->vdesc);
1180 	}
1181 
1182 out_unlock:
1183 	spin_unlock_irqrestore(&chan->lock, flags);
1184 }
1185 
1186 /* -----------------------------------------------------------------------------
1187  * DMA Engine Operations
1188  */
1189 
1190 static struct dma_async_tx_descriptor *
1191 xilinx_dpdma_prep_interleaved_dma(struct dma_chan *dchan,
1192 				  struct dma_interleaved_template *xt,
1193 				  unsigned long flags)
1194 {
1195 	struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1196 	struct xilinx_dpdma_tx_desc *desc;
1197 
1198 	if (xt->dir != DMA_MEM_TO_DEV)
1199 		return NULL;
1200 
1201 	if (!xt->numf || !xt->sgl[0].size)
1202 		return NULL;
1203 
1204 	if (!(flags & DMA_PREP_REPEAT) || !(flags & DMA_PREP_LOAD_EOT))
1205 		return NULL;
1206 
1207 	desc = xilinx_dpdma_chan_prep_interleaved_dma(chan, xt);
1208 	if (!desc)
1209 		return NULL;
1210 
1211 	vchan_tx_prep(&chan->vchan, &desc->vdesc, flags | DMA_CTRL_ACK);
1212 
1213 	return &desc->vdesc.tx;
1214 }
1215 
1216 /**
1217  * xilinx_dpdma_alloc_chan_resources - Allocate resources for the channel
1218  * @dchan: DMA channel
1219  *
1220  * Allocate a descriptor pool for the channel.
1221  *
1222  * Return: 0 on success, or -ENOMEM if failed to allocate a pool.
1223  */
1224 static int xilinx_dpdma_alloc_chan_resources(struct dma_chan *dchan)
1225 {
1226 	struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1227 	size_t align = __alignof__(struct xilinx_dpdma_sw_desc);
1228 
1229 	chan->desc_pool = dma_pool_create(dev_name(chan->xdev->dev),
1230 					  chan->xdev->dev,
1231 					  sizeof(struct xilinx_dpdma_sw_desc),
1232 					  align, 0);
1233 	if (!chan->desc_pool) {
1234 		dev_err(chan->xdev->dev,
1235 			"failed to allocate a descriptor pool\n");
1236 		return -ENOMEM;
1237 	}
1238 
1239 	return 0;
1240 }
1241 
1242 /**
1243  * xilinx_dpdma_free_chan_resources - Free all resources for the channel
1244  * @dchan: DMA channel
1245  *
1246  * Free resources associated with the virtual DMA channel, and destroy the
1247  * descriptor pool.
1248  */
1249 static void xilinx_dpdma_free_chan_resources(struct dma_chan *dchan)
1250 {
1251 	struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1252 
1253 	vchan_free_chan_resources(&chan->vchan);
1254 
1255 	dma_pool_destroy(chan->desc_pool);
1256 	chan->desc_pool = NULL;
1257 }
1258 
1259 static void xilinx_dpdma_issue_pending(struct dma_chan *dchan)
1260 {
1261 	struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1262 	unsigned long flags;
1263 
1264 	spin_lock_irqsave(&chan->vchan.lock, flags);
1265 	if (vchan_issue_pending(&chan->vchan))
1266 		xilinx_dpdma_chan_queue_transfer(chan);
1267 	spin_unlock_irqrestore(&chan->vchan.lock, flags);
1268 }
1269 
1270 static int xilinx_dpdma_config(struct dma_chan *dchan,
1271 			       struct dma_slave_config *config)
1272 {
1273 	struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1274 	unsigned long flags;
1275 
1276 	/*
1277 	 * The destination address doesn't need to be specified as the DPDMA is
1278 	 * hardwired to the destination (the DP controller). The transfer
1279 	 * width, burst size and port window size are thus meaningless, they're
1280 	 * fixed both on the DPDMA side and on the DP controller side.
1281 	 */
1282 
1283 	spin_lock_irqsave(&chan->lock, flags);
1284 
1285 	/*
1286 	 * Abuse the slave_id to indicate that the channel is part of a video
1287 	 * group.
1288 	 */
1289 	if (chan->id <= ZYNQMP_DPDMA_VIDEO2)
1290 		chan->video_group = config->slave_id != 0;
1291 
1292 	spin_unlock_irqrestore(&chan->lock, flags);
1293 
1294 	return 0;
1295 }
1296 
1297 static int xilinx_dpdma_pause(struct dma_chan *dchan)
1298 {
1299 	xilinx_dpdma_chan_pause(to_xilinx_chan(dchan));
1300 
1301 	return 0;
1302 }
1303 
1304 static int xilinx_dpdma_resume(struct dma_chan *dchan)
1305 {
1306 	xilinx_dpdma_chan_unpause(to_xilinx_chan(dchan));
1307 
1308 	return 0;
1309 }
1310 
1311 /**
1312  * xilinx_dpdma_terminate_all - Terminate the channel and descriptors
1313  * @dchan: DMA channel
1314  *
1315  * Pause the channel without waiting for ongoing transfers to complete. Waiting
1316  * for completion is performed by xilinx_dpdma_synchronize() that will disable
1317  * the channel to complete the stop.
1318  *
1319  * All the descriptors associated with the channel that are guaranteed not to
1320  * be touched by the hardware. The pending and active descriptor are not
1321  * touched, and will be freed either upon completion, or by
1322  * xilinx_dpdma_synchronize().
1323  *
1324  * Return: 0 on success, or -ETIMEDOUT if the channel failed to stop.
1325  */
1326 static int xilinx_dpdma_terminate_all(struct dma_chan *dchan)
1327 {
1328 	struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1329 	struct xilinx_dpdma_device *xdev = chan->xdev;
1330 	LIST_HEAD(descriptors);
1331 	unsigned long flags;
1332 	unsigned int i;
1333 
1334 	/* Pause the channel (including the whole video group if applicable). */
1335 	if (chan->video_group) {
1336 		for (i = ZYNQMP_DPDMA_VIDEO0; i <= ZYNQMP_DPDMA_VIDEO2; i++) {
1337 			if (xdev->chan[i]->video_group &&
1338 			    xdev->chan[i]->running) {
1339 				xilinx_dpdma_chan_pause(xdev->chan[i]);
1340 				xdev->chan[i]->video_group = false;
1341 			}
1342 		}
1343 	} else {
1344 		xilinx_dpdma_chan_pause(chan);
1345 	}
1346 
1347 	/* Gather all the descriptors we can free and free them. */
1348 	spin_lock_irqsave(&chan->vchan.lock, flags);
1349 	vchan_get_all_descriptors(&chan->vchan, &descriptors);
1350 	spin_unlock_irqrestore(&chan->vchan.lock, flags);
1351 
1352 	vchan_dma_desc_free_list(&chan->vchan, &descriptors);
1353 
1354 	return 0;
1355 }
1356 
1357 /**
1358  * xilinx_dpdma_synchronize - Synchronize callback execution
1359  * @dchan: DMA channel
1360  *
1361  * Synchronizing callback execution ensures that all previously issued
1362  * transfers have completed and all associated callbacks have been called and
1363  * have returned.
1364  *
1365  * This function waits for the DMA channel to stop. It assumes it has been
1366  * paused by a previous call to dmaengine_terminate_async(), and that no new
1367  * pending descriptors have been issued with dma_async_issue_pending(). The
1368  * behaviour is undefined otherwise.
1369  */
1370 static void xilinx_dpdma_synchronize(struct dma_chan *dchan)
1371 {
1372 	struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1373 	unsigned long flags;
1374 
1375 	xilinx_dpdma_chan_stop(chan);
1376 
1377 	spin_lock_irqsave(&chan->vchan.lock, flags);
1378 	if (chan->desc.pending) {
1379 		vchan_terminate_vdesc(&chan->desc.pending->vdesc);
1380 		chan->desc.pending = NULL;
1381 	}
1382 	if (chan->desc.active) {
1383 		vchan_terminate_vdesc(&chan->desc.active->vdesc);
1384 		chan->desc.active = NULL;
1385 	}
1386 	spin_unlock_irqrestore(&chan->vchan.lock, flags);
1387 
1388 	vchan_synchronize(&chan->vchan);
1389 }
1390 
1391 /* -----------------------------------------------------------------------------
1392  * Interrupt and Tasklet Handling
1393  */
1394 
1395 /**
1396  * xilinx_dpdma_err - Detect any global error
1397  * @isr: Interrupt Status Register
1398  * @eisr: Error Interrupt Status Register
1399  *
1400  * Return: True if any global error occurs, or false otherwise.
1401  */
1402 static bool xilinx_dpdma_err(u32 isr, u32 eisr)
1403 {
1404 	if (isr & XILINX_DPDMA_INTR_GLOBAL_ERR ||
1405 	    eisr & XILINX_DPDMA_EINTR_GLOBAL_ERR)
1406 		return true;
1407 
1408 	return false;
1409 }
1410 
1411 /**
1412  * xilinx_dpdma_handle_err_irq - Handle DPDMA error interrupt
1413  * @xdev: DPDMA device
1414  * @isr: masked Interrupt Status Register
1415  * @eisr: Error Interrupt Status Register
1416  *
1417  * Handle if any error occurs based on @isr and @eisr. This function disables
1418  * corresponding error interrupts, and those should be re-enabled once handling
1419  * is done.
1420  */
1421 static void xilinx_dpdma_handle_err_irq(struct xilinx_dpdma_device *xdev,
1422 					u32 isr, u32 eisr)
1423 {
1424 	bool err = xilinx_dpdma_err(isr, eisr);
1425 	unsigned int i;
1426 
1427 	dev_dbg_ratelimited(xdev->dev,
1428 			    "error irq: isr = 0x%08x, eisr = 0x%08x\n",
1429 			    isr, eisr);
1430 
1431 	/* Disable channel error interrupts until errors are handled. */
1432 	dpdma_write(xdev->reg, XILINX_DPDMA_IDS,
1433 		    isr & ~XILINX_DPDMA_INTR_GLOBAL_ERR);
1434 	dpdma_write(xdev->reg, XILINX_DPDMA_EIDS,
1435 		    eisr & ~XILINX_DPDMA_EINTR_GLOBAL_ERR);
1436 
1437 	for (i = 0; i < ARRAY_SIZE(xdev->chan); i++)
1438 		if (err || xilinx_dpdma_chan_err(xdev->chan[i], isr, eisr))
1439 			tasklet_schedule(&xdev->chan[i]->err_task);
1440 }
1441 
1442 /**
1443  * xilinx_dpdma_enable_irq - Enable interrupts
1444  * @xdev: DPDMA device
1445  *
1446  * Enable interrupts.
1447  */
1448 static void xilinx_dpdma_enable_irq(struct xilinx_dpdma_device *xdev)
1449 {
1450 	dpdma_write(xdev->reg, XILINX_DPDMA_IEN, XILINX_DPDMA_INTR_ALL);
1451 	dpdma_write(xdev->reg, XILINX_DPDMA_EIEN, XILINX_DPDMA_EINTR_ALL);
1452 }
1453 
1454 /**
1455  * xilinx_dpdma_disable_irq - Disable interrupts
1456  * @xdev: DPDMA device
1457  *
1458  * Disable interrupts.
1459  */
1460 static void xilinx_dpdma_disable_irq(struct xilinx_dpdma_device *xdev)
1461 {
1462 	dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ERR_ALL);
1463 	dpdma_write(xdev->reg, XILINX_DPDMA_EIDS, XILINX_DPDMA_EINTR_ALL);
1464 }
1465 
1466 /**
1467  * xilinx_dpdma_chan_err_task - Per channel tasklet for error handling
1468  * @t: pointer to the tasklet associated with this handler
1469  *
1470  * Per channel error handling tasklet. This function waits for the outstanding
1471  * transaction to complete and triggers error handling. After error handling,
1472  * re-enable channel error interrupts, and restart the channel if needed.
1473  */
1474 static void xilinx_dpdma_chan_err_task(struct tasklet_struct *t)
1475 {
1476 	struct xilinx_dpdma_chan *chan = from_tasklet(chan, t, err_task);
1477 	struct xilinx_dpdma_device *xdev = chan->xdev;
1478 	unsigned long flags;
1479 
1480 	/* Proceed error handling even when polling fails. */
1481 	xilinx_dpdma_chan_poll_no_ostand(chan);
1482 
1483 	xilinx_dpdma_chan_handle_err(chan);
1484 
1485 	dpdma_write(xdev->reg, XILINX_DPDMA_IEN,
1486 		    XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id);
1487 	dpdma_write(xdev->reg, XILINX_DPDMA_EIEN,
1488 		    XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id);
1489 
1490 	spin_lock_irqsave(&chan->lock, flags);
1491 	xilinx_dpdma_chan_queue_transfer(chan);
1492 	spin_unlock_irqrestore(&chan->lock, flags);
1493 }
1494 
1495 static irqreturn_t xilinx_dpdma_irq_handler(int irq, void *data)
1496 {
1497 	struct xilinx_dpdma_device *xdev = data;
1498 	unsigned long mask;
1499 	unsigned int i;
1500 	u32 status;
1501 	u32 error;
1502 
1503 	status = dpdma_read(xdev->reg, XILINX_DPDMA_ISR);
1504 	error = dpdma_read(xdev->reg, XILINX_DPDMA_EISR);
1505 	if (!status && !error)
1506 		return IRQ_NONE;
1507 
1508 	dpdma_write(xdev->reg, XILINX_DPDMA_ISR, status);
1509 	dpdma_write(xdev->reg, XILINX_DPDMA_EISR, error);
1510 
1511 	if (status & XILINX_DPDMA_INTR_VSYNC) {
1512 		/*
1513 		 * There's a single VSYNC interrupt that needs to be processed
1514 		 * by each running channel to update the active descriptor.
1515 		 */
1516 		for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) {
1517 			struct xilinx_dpdma_chan *chan = xdev->chan[i];
1518 
1519 			if (chan)
1520 				xilinx_dpdma_chan_vsync_irq(chan);
1521 		}
1522 	}
1523 
1524 	mask = FIELD_GET(XILINX_DPDMA_INTR_DESC_DONE_MASK, status);
1525 	if (mask) {
1526 		for_each_set_bit(i, &mask, ARRAY_SIZE(xdev->chan))
1527 			xilinx_dpdma_chan_done_irq(xdev->chan[i]);
1528 	}
1529 
1530 	mask = FIELD_GET(XILINX_DPDMA_INTR_NO_OSTAND_MASK, status);
1531 	if (mask) {
1532 		for_each_set_bit(i, &mask, ARRAY_SIZE(xdev->chan))
1533 			xilinx_dpdma_chan_notify_no_ostand(xdev->chan[i]);
1534 	}
1535 
1536 	mask = status & XILINX_DPDMA_INTR_ERR_ALL;
1537 	if (mask || error)
1538 		xilinx_dpdma_handle_err_irq(xdev, mask, error);
1539 
1540 	return IRQ_HANDLED;
1541 }
1542 
1543 /* -----------------------------------------------------------------------------
1544  * Initialization & Cleanup
1545  */
1546 
1547 static int xilinx_dpdma_chan_init(struct xilinx_dpdma_device *xdev,
1548 				  unsigned int chan_id)
1549 {
1550 	struct xilinx_dpdma_chan *chan;
1551 
1552 	chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
1553 	if (!chan)
1554 		return -ENOMEM;
1555 
1556 	chan->id = chan_id;
1557 	chan->reg = xdev->reg + XILINX_DPDMA_CH_BASE
1558 		  + XILINX_DPDMA_CH_OFFSET * chan->id;
1559 	chan->running = false;
1560 	chan->xdev = xdev;
1561 
1562 	spin_lock_init(&chan->lock);
1563 	init_waitqueue_head(&chan->wait_to_stop);
1564 
1565 	tasklet_setup(&chan->err_task, xilinx_dpdma_chan_err_task);
1566 
1567 	chan->vchan.desc_free = xilinx_dpdma_chan_free_tx_desc;
1568 	vchan_init(&chan->vchan, &xdev->common);
1569 
1570 	xdev->chan[chan->id] = chan;
1571 
1572 	return 0;
1573 }
1574 
1575 static void xilinx_dpdma_chan_remove(struct xilinx_dpdma_chan *chan)
1576 {
1577 	if (!chan)
1578 		return;
1579 
1580 	tasklet_kill(&chan->err_task);
1581 	list_del(&chan->vchan.chan.device_node);
1582 }
1583 
1584 static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
1585 					    struct of_dma *ofdma)
1586 {
1587 	struct xilinx_dpdma_device *xdev = ofdma->of_dma_data;
1588 	uint32_t chan_id = dma_spec->args[0];
1589 
1590 	if (chan_id >= ARRAY_SIZE(xdev->chan))
1591 		return NULL;
1592 
1593 	if (!xdev->chan[chan_id])
1594 		return NULL;
1595 
1596 	return dma_get_slave_channel(&xdev->chan[chan_id]->vchan.chan);
1597 }
1598 
1599 static int xilinx_dpdma_probe(struct platform_device *pdev)
1600 {
1601 	struct xilinx_dpdma_device *xdev;
1602 	struct dma_device *ddev;
1603 	unsigned int i;
1604 	int ret;
1605 
1606 	xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
1607 	if (!xdev)
1608 		return -ENOMEM;
1609 
1610 	xdev->dev = &pdev->dev;
1611 	xdev->ext_addr = sizeof(dma_addr_t) > 4;
1612 
1613 	INIT_LIST_HEAD(&xdev->common.channels);
1614 
1615 	platform_set_drvdata(pdev, xdev);
1616 
1617 	xdev->axi_clk = devm_clk_get(xdev->dev, "axi_clk");
1618 	if (IS_ERR(xdev->axi_clk))
1619 		return PTR_ERR(xdev->axi_clk);
1620 
1621 	xdev->reg = devm_platform_ioremap_resource(pdev, 0);
1622 	if (IS_ERR(xdev->reg))
1623 		return PTR_ERR(xdev->reg);
1624 
1625 	xdev->irq = platform_get_irq(pdev, 0);
1626 	if (xdev->irq < 0) {
1627 		dev_err(xdev->dev, "failed to get platform irq\n");
1628 		return xdev->irq;
1629 	}
1630 
1631 	ret = request_irq(xdev->irq, xilinx_dpdma_irq_handler, IRQF_SHARED,
1632 			  dev_name(xdev->dev), xdev);
1633 	if (ret) {
1634 		dev_err(xdev->dev, "failed to request IRQ\n");
1635 		return ret;
1636 	}
1637 
1638 	ddev = &xdev->common;
1639 	ddev->dev = &pdev->dev;
1640 
1641 	dma_cap_set(DMA_SLAVE, ddev->cap_mask);
1642 	dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
1643 	dma_cap_set(DMA_INTERLEAVE, ddev->cap_mask);
1644 	dma_cap_set(DMA_REPEAT, ddev->cap_mask);
1645 	dma_cap_set(DMA_LOAD_EOT, ddev->cap_mask);
1646 	ddev->copy_align = fls(XILINX_DPDMA_ALIGN_BYTES - 1);
1647 
1648 	ddev->device_alloc_chan_resources = xilinx_dpdma_alloc_chan_resources;
1649 	ddev->device_free_chan_resources = xilinx_dpdma_free_chan_resources;
1650 	ddev->device_prep_interleaved_dma = xilinx_dpdma_prep_interleaved_dma;
1651 	/* TODO: Can we achieve better granularity ? */
1652 	ddev->device_tx_status = dma_cookie_status;
1653 	ddev->device_issue_pending = xilinx_dpdma_issue_pending;
1654 	ddev->device_config = xilinx_dpdma_config;
1655 	ddev->device_pause = xilinx_dpdma_pause;
1656 	ddev->device_resume = xilinx_dpdma_resume;
1657 	ddev->device_terminate_all = xilinx_dpdma_terminate_all;
1658 	ddev->device_synchronize = xilinx_dpdma_synchronize;
1659 	ddev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED);
1660 	ddev->directions = BIT(DMA_MEM_TO_DEV);
1661 	ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1662 
1663 	for (i = 0; i < ARRAY_SIZE(xdev->chan); ++i) {
1664 		ret = xilinx_dpdma_chan_init(xdev, i);
1665 		if (ret < 0) {
1666 			dev_err(xdev->dev, "failed to initialize channel %u\n",
1667 				i);
1668 			goto error;
1669 		}
1670 	}
1671 
1672 	ret = clk_prepare_enable(xdev->axi_clk);
1673 	if (ret) {
1674 		dev_err(xdev->dev, "failed to enable the axi clock\n");
1675 		goto error;
1676 	}
1677 
1678 	ret = dma_async_device_register(ddev);
1679 	if (ret) {
1680 		dev_err(xdev->dev, "failed to register the dma device\n");
1681 		goto error_dma_async;
1682 	}
1683 
1684 	ret = of_dma_controller_register(xdev->dev->of_node,
1685 					 of_dma_xilinx_xlate, ddev);
1686 	if (ret) {
1687 		dev_err(xdev->dev, "failed to register DMA to DT DMA helper\n");
1688 		goto error_of_dma;
1689 	}
1690 
1691 	xilinx_dpdma_enable_irq(xdev);
1692 
1693 	xilinx_dpdma_debugfs_init(xdev);
1694 
1695 	dev_info(&pdev->dev, "Xilinx DPDMA engine is probed\n");
1696 
1697 	return 0;
1698 
1699 error_of_dma:
1700 	dma_async_device_unregister(ddev);
1701 error_dma_async:
1702 	clk_disable_unprepare(xdev->axi_clk);
1703 error:
1704 	for (i = 0; i < ARRAY_SIZE(xdev->chan); i++)
1705 		xilinx_dpdma_chan_remove(xdev->chan[i]);
1706 
1707 	free_irq(xdev->irq, xdev);
1708 
1709 	return ret;
1710 }
1711 
1712 static int xilinx_dpdma_remove(struct platform_device *pdev)
1713 {
1714 	struct xilinx_dpdma_device *xdev = platform_get_drvdata(pdev);
1715 	unsigned int i;
1716 
1717 	/* Start by disabling the IRQ to avoid races during cleanup. */
1718 	free_irq(xdev->irq, xdev);
1719 
1720 	xilinx_dpdma_disable_irq(xdev);
1721 	of_dma_controller_free(pdev->dev.of_node);
1722 	dma_async_device_unregister(&xdev->common);
1723 	clk_disable_unprepare(xdev->axi_clk);
1724 
1725 	for (i = 0; i < ARRAY_SIZE(xdev->chan); i++)
1726 		xilinx_dpdma_chan_remove(xdev->chan[i]);
1727 
1728 	return 0;
1729 }
1730 
1731 static const struct of_device_id xilinx_dpdma_of_match[] = {
1732 	{ .compatible = "xlnx,zynqmp-dpdma",},
1733 	{ /* end of table */ },
1734 };
1735 MODULE_DEVICE_TABLE(of, xilinx_dpdma_of_match);
1736 
1737 static struct platform_driver xilinx_dpdma_driver = {
1738 	.probe			= xilinx_dpdma_probe,
1739 	.remove			= xilinx_dpdma_remove,
1740 	.driver			= {
1741 		.name		= "xilinx-zynqmp-dpdma",
1742 		.of_match_table	= xilinx_dpdma_of_match,
1743 	},
1744 };
1745 
1746 module_platform_driver(xilinx_dpdma_driver);
1747 
1748 MODULE_AUTHOR("Xilinx, Inc.");
1749 MODULE_DESCRIPTION("Xilinx ZynqMP DPDMA driver");
1750 MODULE_LICENSE("GPL v2");
1751