xref: /linux/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c (revision b49c70273801bf2f7c16ac49f403a5c253b46159)
1 // SPDX-License-Identifier: GPL-2.0
2 // (C) 2017-2018 Synopsys, Inc. (www.synopsys.com)
3 
4 /*
5  * Synopsys DesignWare AXI DMA Controller driver.
6  *
7  * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
8  */
9 
10 #include <linux/bitops.h>
11 #include <linux/delay.h>
12 #include <linux/device.h>
13 #include <linux/dmaengine.h>
14 #include <linux/dmapool.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/err.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/iopoll.h>
20 #include <linux/io-64-nonatomic-lo-hi.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/of.h>
24 #include <linux/of_dma.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/property.h>
28 #include <linux/reset.h>
29 #include <linux/slab.h>
30 #include <linux/types.h>
31 
32 #include "dw-axi-dmac.h"
33 #include "../dmaengine.h"
34 #include "../virt-dma.h"
35 
36 /*
37  * The set of bus widths supported by the DMA controller. DW AXI DMAC supports
38  * master data bus width up to 512 bits (for both AXI master interfaces), but
39  * it depends on IP block configuration.
40  */
41 #define AXI_DMA_BUSWIDTHS		  \
42 	(DMA_SLAVE_BUSWIDTH_1_BYTE	| \
43 	DMA_SLAVE_BUSWIDTH_2_BYTES	| \
44 	DMA_SLAVE_BUSWIDTH_4_BYTES	| \
45 	DMA_SLAVE_BUSWIDTH_8_BYTES	| \
46 	DMA_SLAVE_BUSWIDTH_16_BYTES	| \
47 	DMA_SLAVE_BUSWIDTH_32_BYTES	| \
48 	DMA_SLAVE_BUSWIDTH_64_BYTES)
49 
50 #define AXI_DMA_FLAG_HAS_APB_REGS	BIT(0)
51 #define AXI_DMA_FLAG_HAS_RESETS		BIT(1)
52 #define AXI_DMA_FLAG_USE_CFG2		BIT(2)
53 #define AXI_DMA_FLAG_ARG0_AS_CHAN	BIT(3)
54 
55 static inline void
56 axi_dma_iowrite32(struct axi_dma_chip *chip, u32 reg, u32 val)
57 {
58 	iowrite32(val, chip->regs + reg);
59 }
60 
61 static inline u32 axi_dma_ioread32(struct axi_dma_chip *chip, u32 reg)
62 {
63 	return ioread32(chip->regs + reg);
64 }
65 
66 static inline void
67 axi_dma_iowrite64(struct axi_dma_chip *chip, u32 reg, u64 val)
68 {
69 	iowrite64(val, chip->regs + reg);
70 }
71 
72 static inline u64 axi_dma_ioread64(struct axi_dma_chip *chip, u32 reg)
73 {
74 	return ioread64(chip->regs + reg);
75 }
76 
77 static inline void
78 axi_chan_iowrite32(struct axi_dma_chan *chan, u32 reg, u32 val)
79 {
80 	iowrite32(val, chan->chan_regs + reg);
81 }
82 
83 static inline u32 axi_chan_ioread32(struct axi_dma_chan *chan, u32 reg)
84 {
85 	return ioread32(chan->chan_regs + reg);
86 }
87 
88 static inline void
89 axi_chan_iowrite64(struct axi_dma_chan *chan, u32 reg, u64 val)
90 {
91 	/*
92 	 * We split one 64 bit write for two 32 bit write as some HW doesn't
93 	 * support 64 bit access.
94 	 */
95 	iowrite32(lower_32_bits(val), chan->chan_regs + reg);
96 	iowrite32(upper_32_bits(val), chan->chan_regs + reg + 4);
97 }
98 
99 static inline void axi_chan_config_write(struct axi_dma_chan *chan,
100 					 struct axi_dma_chan_config *config)
101 {
102 	u32 cfg_lo, cfg_hi;
103 
104 	cfg_lo = (config->dst_multblk_type << CH_CFG_L_DST_MULTBLK_TYPE_POS |
105 		  config->src_multblk_type << CH_CFG_L_SRC_MULTBLK_TYPE_POS);
106 	if (chan->chip->dw->hdata->reg_map_8_channels &&
107 	    !chan->chip->dw->hdata->use_cfg2) {
108 		cfg_hi = config->tt_fc << CH_CFG_H_TT_FC_POS |
109 			 config->hs_sel_src << CH_CFG_H_HS_SEL_SRC_POS |
110 			 config->hs_sel_dst << CH_CFG_H_HS_SEL_DST_POS |
111 			 config->src_per << CH_CFG_H_SRC_PER_POS |
112 			 config->dst_per << CH_CFG_H_DST_PER_POS |
113 			 config->prior << CH_CFG_H_PRIORITY_POS;
114 	} else {
115 		cfg_lo |= config->src_per << CH_CFG2_L_SRC_PER_POS |
116 			  config->dst_per << CH_CFG2_L_DST_PER_POS;
117 		cfg_hi = config->tt_fc << CH_CFG2_H_TT_FC_POS |
118 			 config->hs_sel_src << CH_CFG2_H_HS_SEL_SRC_POS |
119 			 config->hs_sel_dst << CH_CFG2_H_HS_SEL_DST_POS |
120 			 config->prior << CH_CFG2_H_PRIORITY_POS;
121 	}
122 	axi_chan_iowrite32(chan, CH_CFG_L, cfg_lo);
123 	axi_chan_iowrite32(chan, CH_CFG_H, cfg_hi);
124 }
125 
126 static inline void axi_dma_disable(struct axi_dma_chip *chip)
127 {
128 	u32 val;
129 
130 	val = axi_dma_ioread32(chip, DMAC_CFG);
131 	val &= ~DMAC_EN_MASK;
132 	axi_dma_iowrite32(chip, DMAC_CFG, val);
133 }
134 
135 static inline void axi_dma_enable(struct axi_dma_chip *chip)
136 {
137 	u32 val;
138 
139 	val = axi_dma_ioread32(chip, DMAC_CFG);
140 	val |= DMAC_EN_MASK;
141 	axi_dma_iowrite32(chip, DMAC_CFG, val);
142 }
143 
144 static inline void axi_dma_irq_disable(struct axi_dma_chip *chip)
145 {
146 	u32 val;
147 
148 	val = axi_dma_ioread32(chip, DMAC_CFG);
149 	val &= ~INT_EN_MASK;
150 	axi_dma_iowrite32(chip, DMAC_CFG, val);
151 }
152 
153 static inline void axi_dma_irq_enable(struct axi_dma_chip *chip)
154 {
155 	u32 val;
156 
157 	val = axi_dma_ioread32(chip, DMAC_CFG);
158 	val |= INT_EN_MASK;
159 	axi_dma_iowrite32(chip, DMAC_CFG, val);
160 }
161 
162 static inline void axi_chan_irq_disable(struct axi_dma_chan *chan, u32 irq_mask)
163 {
164 	u32 val;
165 
166 	if (likely(irq_mask == DWAXIDMAC_IRQ_ALL)) {
167 		axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, DWAXIDMAC_IRQ_NONE);
168 	} else {
169 		val = axi_chan_ioread32(chan, CH_INTSTATUS_ENA);
170 		val &= ~irq_mask;
171 		axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, val);
172 	}
173 }
174 
175 static inline void axi_chan_irq_set(struct axi_dma_chan *chan, u32 irq_mask)
176 {
177 	axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, irq_mask);
178 }
179 
180 static inline void axi_chan_irq_sig_set(struct axi_dma_chan *chan, u32 irq_mask)
181 {
182 	axi_chan_iowrite32(chan, CH_INTSIGNAL_ENA, irq_mask);
183 }
184 
185 static inline void axi_chan_irq_clear(struct axi_dma_chan *chan, u32 irq_mask)
186 {
187 	axi_chan_iowrite32(chan, CH_INTCLEAR, irq_mask);
188 }
189 
190 static inline u32 axi_chan_irq_read(struct axi_dma_chan *chan)
191 {
192 	return axi_chan_ioread32(chan, CH_INTSTATUS);
193 }
194 
195 static inline void axi_chan_disable(struct axi_dma_chan *chan)
196 {
197 	u64 val;
198 
199 	if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) {
200 		val = axi_dma_ioread64(chan->chip, DMAC_CHEN);
201 		if (chan->id >= DMAC_CHAN_16) {
202 			val &= ~((u64)(BIT(chan->id) >> DMAC_CHAN_16)
203 				<< (DMAC_CHAN_EN_SHIFT + DMAC_CHAN_BLOCK_SHIFT));
204 			val |=   (u64)(BIT(chan->id) >> DMAC_CHAN_16)
205 				<< (DMAC_CHAN_EN2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT);
206 		} else {
207 			val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);
208 			val |=   BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
209 		}
210 		axi_dma_iowrite64(chan->chip, DMAC_CHEN, val);
211 	} else {
212 		val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
213 		val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);
214 		if (chan->chip->dw->hdata->reg_map_8_channels)
215 			val |=   BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
216 		else
217 			val |=   BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
218 		axi_dma_iowrite32(chan->chip, DMAC_CHEN, (u32)val);
219 	}
220 }
221 
222 static inline void axi_chan_enable(struct axi_dma_chan *chan)
223 {
224 	u64 val;
225 
226 	if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) {
227 		val = axi_dma_ioread64(chan->chip, DMAC_CHEN);
228 		if (chan->id >= DMAC_CHAN_16) {
229 			val |= (u64)(BIT(chan->id) >> DMAC_CHAN_16)
230 				<< (DMAC_CHAN_EN_SHIFT + DMAC_CHAN_BLOCK_SHIFT) |
231 				(u64)(BIT(chan->id) >> DMAC_CHAN_16)
232 				<< (DMAC_CHAN_EN2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT);
233 		} else {
234 			val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
235 			BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
236 		}
237 		axi_dma_iowrite64(chan->chip, DMAC_CHEN, val);
238 	} else {
239 		val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
240 		if (chan->chip->dw->hdata->reg_map_8_channels) {
241 			val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
242 			BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
243 		} else {
244 			val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
245 				BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
246 		}
247 		axi_dma_iowrite32(chan->chip, DMAC_CHEN, (u32)val);
248 	}
249 }
250 
251 static inline bool axi_chan_is_hw_enable(struct axi_dma_chan *chan)
252 {
253 	u64 val;
254 
255 	if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16)
256 		val = axi_dma_ioread64(chan->chip, DMAC_CHEN);
257 	else
258 		val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
259 
260 	if (chan->id >= DMAC_CHAN_16)
261 		return !!(val & ((u64)(BIT(chan->id) >> DMAC_CHAN_16) << DMAC_CHAN_BLOCK_SHIFT));
262 	else
263 		return !!(val & (BIT(chan->id) << DMAC_CHAN_EN_SHIFT));
264 }
265 
266 static void axi_dma_hw_init(struct axi_dma_chip *chip)
267 {
268 	int ret;
269 	u32 i;
270 
271 	for (i = 0; i < chip->dw->hdata->nr_channels; i++) {
272 		axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
273 		axi_chan_disable(&chip->dw->chan[i]);
274 	}
275 	ret = dma_set_mask_and_coherent(chip->dev, DMA_BIT_MASK(64));
276 	if (ret)
277 		dev_warn(chip->dev, "Unable to set coherent mask\n");
278 }
279 
280 static u32 axi_chan_get_xfer_width(struct axi_dma_chan *chan, dma_addr_t src,
281 				   dma_addr_t dst, size_t len)
282 {
283 	u32 max_width = chan->chip->dw->hdata->m_data_width;
284 
285 	return __ffs(src | dst | len | BIT(max_width));
286 }
287 
288 static inline const char *axi_chan_name(struct axi_dma_chan *chan)
289 {
290 	return dma_chan_name(&chan->vc.chan);
291 }
292 
293 static struct axi_dma_desc *axi_desc_alloc(u32 num)
294 {
295 	struct axi_dma_desc *desc;
296 
297 	desc = kzalloc_obj(*desc, GFP_NOWAIT);
298 	if (!desc)
299 		return NULL;
300 
301 	desc->hw_desc = kzalloc_objs(*desc->hw_desc, num, GFP_NOWAIT);
302 	if (!desc->hw_desc) {
303 		kfree(desc);
304 		return NULL;
305 	}
306 	desc->nr_hw_descs = num;
307 
308 	return desc;
309 }
310 
311 static struct axi_dma_lli *axi_desc_get(struct axi_dma_chan *chan,
312 					dma_addr_t *addr)
313 {
314 	struct axi_dma_lli *lli;
315 	dma_addr_t phys;
316 
317 	lli = dma_pool_zalloc(chan->desc_pool, GFP_NOWAIT, &phys);
318 	if (unlikely(!lli)) {
319 		dev_err(chan2dev(chan), "%s: not enough descriptors available\n",
320 			axi_chan_name(chan));
321 		return NULL;
322 	}
323 
324 	atomic_inc(&chan->descs_allocated);
325 	*addr = phys;
326 
327 	return lli;
328 }
329 
330 static void axi_desc_put(struct axi_dma_desc *desc)
331 {
332 	struct axi_dma_chan *chan = desc->chan;
333 	int count = desc->nr_hw_descs;
334 	struct axi_dma_hw_desc *hw_desc;
335 	int descs_put;
336 
337 	for (descs_put = 0; descs_put < count; descs_put++) {
338 		hw_desc = &desc->hw_desc[descs_put];
339 		dma_pool_free(chan->desc_pool, hw_desc->lli, hw_desc->llp);
340 	}
341 
342 	kfree(desc->hw_desc);
343 	kfree(desc);
344 	atomic_sub(descs_put, &chan->descs_allocated);
345 	dev_vdbg(chan2dev(chan), "%s: %d descs put, %d still allocated\n",
346 		 axi_chan_name(chan), descs_put,
347 		 atomic_read(&chan->descs_allocated));
348 }
349 
350 static void vchan_desc_put(struct virt_dma_desc *vdesc)
351 {
352 	axi_desc_put(vd_to_axi_desc(vdesc));
353 }
354 
355 static enum dma_status
356 dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
357 		   struct dma_tx_state *txstate)
358 {
359 	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
360 	struct virt_dma_desc *vdesc;
361 	enum dma_status status;
362 	u32 completed_length;
363 	unsigned long flags;
364 	u32 completed_blocks;
365 	size_t bytes = 0;
366 	u32 length;
367 	u32 len;
368 
369 	status = dma_cookie_status(dchan, cookie, txstate);
370 	if (status == DMA_COMPLETE || !txstate)
371 		return status;
372 
373 	spin_lock_irqsave(&chan->vc.lock, flags);
374 
375 	vdesc = vchan_find_desc(&chan->vc, cookie);
376 	if (vdesc) {
377 		length = vd_to_axi_desc(vdesc)->length;
378 		completed_blocks = vd_to_axi_desc(vdesc)->completed_blocks;
379 		len = vd_to_axi_desc(vdesc)->hw_desc[0].len;
380 		completed_length = completed_blocks * len;
381 		bytes = length - completed_length;
382 	}
383 
384 	spin_unlock_irqrestore(&chan->vc.lock, flags);
385 	dma_set_residue(txstate, bytes);
386 
387 	return status;
388 }
389 
390 static void write_desc_llp(struct axi_dma_hw_desc *desc, dma_addr_t adr)
391 {
392 	desc->lli->llp = cpu_to_le64(adr);
393 }
394 
395 static void write_chan_llp(struct axi_dma_chan *chan, dma_addr_t adr)
396 {
397 	axi_chan_iowrite64(chan, CH_LLP, adr);
398 }
399 
400 static void dw_axi_dma_set_byte_halfword(struct axi_dma_chan *chan, bool set)
401 {
402 	u32 offset = DMAC_APB_BYTE_WR_CH_EN;
403 	u32 reg_width, val;
404 
405 	if (!chan->chip->apb_regs) {
406 		dev_dbg(chan->chip->dev, "apb_regs not initialized\n");
407 		return;
408 	}
409 
410 	reg_width = __ffs(chan->config.dst_addr_width);
411 	if (reg_width == DWAXIDMAC_TRANS_WIDTH_16)
412 		offset = DMAC_APB_HALFWORD_WR_CH_EN;
413 
414 	val = ioread32(chan->chip->apb_regs + offset);
415 
416 	if (set)
417 		val |= BIT(chan->id);
418 	else
419 		val &= ~BIT(chan->id);
420 
421 	iowrite32(val, chan->chip->apb_regs + offset);
422 }
423 
424 /* Called in chan locked context */
425 static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
426 				      struct axi_dma_desc *first)
427 {
428 	u32 priority = chan->chip->dw->hdata->priority[chan->id];
429 	struct axi_dma_chan_config config = {};
430 	u32 irq_mask;
431 	u8 lms = 0; /* Select AXI0 master for LLI fetching */
432 
433 	if (unlikely(axi_chan_is_hw_enable(chan))) {
434 		dev_err(chan2dev(chan), "%s is non-idle!\n",
435 			axi_chan_name(chan));
436 
437 		return;
438 	}
439 
440 	axi_dma_enable(chan->chip);
441 
442 	config.dst_multblk_type = DWAXIDMAC_MBLK_TYPE_LL;
443 	config.src_multblk_type = DWAXIDMAC_MBLK_TYPE_LL;
444 	config.tt_fc = DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC;
445 	config.prior = priority;
446 	config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW;
447 	config.hs_sel_src = DWAXIDMAC_HS_SEL_HW;
448 	switch (chan->direction) {
449 	case DMA_MEM_TO_DEV:
450 		dw_axi_dma_set_byte_halfword(chan, true);
451 		config.tt_fc = chan->config.device_fc ?
452 				DWAXIDMAC_TT_FC_MEM_TO_PER_DST :
453 				DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC;
454 		if (chan->chip->apb_regs)
455 			config.dst_per = chan->id;
456 		else
457 			config.dst_per = chan->hw_handshake_num;
458 		break;
459 	case DMA_DEV_TO_MEM:
460 		config.tt_fc = chan->config.device_fc ?
461 				DWAXIDMAC_TT_FC_PER_TO_MEM_SRC :
462 				DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC;
463 		if (chan->chip->apb_regs)
464 			config.src_per = chan->id;
465 		else
466 			config.src_per = chan->hw_handshake_num;
467 		break;
468 	default:
469 		break;
470 	}
471 	axi_chan_config_write(chan, &config);
472 
473 	write_chan_llp(chan, first->hw_desc[0].llp | lms);
474 
475 	irq_mask = DWAXIDMAC_IRQ_DMA_TRF | DWAXIDMAC_IRQ_ALL_ERR;
476 	axi_chan_irq_sig_set(chan, irq_mask);
477 
478 	/* Generate 'suspend' status but don't generate interrupt */
479 	irq_mask |= DWAXIDMAC_IRQ_SUSPENDED;
480 	axi_chan_irq_set(chan, irq_mask);
481 
482 	axi_chan_enable(chan);
483 }
484 
485 static void axi_chan_start_first_queued(struct axi_dma_chan *chan)
486 {
487 	struct axi_dma_desc *desc;
488 	struct virt_dma_desc *vd;
489 
490 	vd = vchan_next_desc(&chan->vc);
491 	if (!vd)
492 		return;
493 
494 	desc = vd_to_axi_desc(vd);
495 	dev_vdbg(chan2dev(chan), "%s: started %u\n", axi_chan_name(chan),
496 		 vd->tx.cookie);
497 	axi_chan_block_xfer_start(chan, desc);
498 }
499 
500 static void dma_chan_issue_pending(struct dma_chan *dchan)
501 {
502 	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
503 	unsigned long flags;
504 
505 	spin_lock_irqsave(&chan->vc.lock, flags);
506 	if (vchan_issue_pending(&chan->vc))
507 		axi_chan_start_first_queued(chan);
508 	spin_unlock_irqrestore(&chan->vc.lock, flags);
509 }
510 
511 static void dw_axi_dma_synchronize(struct dma_chan *dchan)
512 {
513 	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
514 
515 	vchan_synchronize(&chan->vc);
516 }
517 
518 static int dma_chan_alloc_chan_resources(struct dma_chan *dchan)
519 {
520 	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
521 
522 	/* ASSERT: channel is idle */
523 	if (axi_chan_is_hw_enable(chan)) {
524 		dev_err(chan2dev(chan), "%s is non-idle!\n",
525 			axi_chan_name(chan));
526 		return -EBUSY;
527 	}
528 
529 	/* LLI address must be aligned to a 64-byte boundary */
530 	chan->desc_pool = dma_pool_create(dev_name(chan2dev(chan)),
531 					  chan->chip->dev,
532 					  sizeof(struct axi_dma_lli),
533 					  64, 0);
534 	if (!chan->desc_pool) {
535 		dev_err(chan2dev(chan), "No memory for descriptors\n");
536 		return -ENOMEM;
537 	}
538 	dev_vdbg(dchan2dev(dchan), "%s: allocating\n", axi_chan_name(chan));
539 
540 	pm_runtime_get(chan->chip->dev);
541 
542 	return 0;
543 }
544 
545 static void dma_chan_free_chan_resources(struct dma_chan *dchan)
546 {
547 	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
548 
549 	/* ASSERT: channel is idle */
550 	if (axi_chan_is_hw_enable(chan))
551 		dev_err(dchan2dev(dchan), "%s is non-idle!\n",
552 			axi_chan_name(chan));
553 
554 	axi_chan_disable(chan);
555 	axi_chan_irq_disable(chan, DWAXIDMAC_IRQ_ALL);
556 
557 	vchan_free_chan_resources(&chan->vc);
558 
559 	dma_pool_destroy(chan->desc_pool);
560 	chan->desc_pool = NULL;
561 	dev_vdbg(dchan2dev(dchan),
562 		 "%s: free resources, descriptor still allocated: %u\n",
563 		 axi_chan_name(chan), atomic_read(&chan->descs_allocated));
564 
565 	pm_runtime_put(chan->chip->dev);
566 }
567 
568 static void dw_axi_dma_set_hw_channel(struct axi_dma_chan *chan, bool set)
569 {
570 	struct axi_dma_chip *chip = chan->chip;
571 	unsigned long reg_value, val;
572 
573 	if (!chip->apb_regs) {
574 		dev_err(chip->dev, "apb_regs not initialized\n");
575 		return;
576 	}
577 
578 	/*
579 	 * An unused DMA channel has a default value of 0x3F.
580 	 * Lock the DMA channel by assign a handshake number to the channel.
581 	 * Unlock the DMA channel by assign 0x3F to the channel.
582 	 */
583 	if (set)
584 		val = chan->hw_handshake_num;
585 	else
586 		val = UNUSED_CHANNEL;
587 
588 	reg_value = lo_hi_readq(chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
589 
590 	/* Channel is already allocated, set handshake as per channel ID */
591 	/* 64 bit write should handle for 8 channels */
592 
593 	reg_value &= ~(DMA_APB_HS_SEL_MASK <<
594 			(chan->id * DMA_APB_HS_SEL_BIT_SIZE));
595 	reg_value |= (val << (chan->id * DMA_APB_HS_SEL_BIT_SIZE));
596 	lo_hi_writeq(reg_value, chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
597 }
598 
599 /*
600  * If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI
601  * as 1, it understands that the current block is the final block in the
602  * transfer and completes the DMA transfer operation at the end of current
603  * block transfer.
604  */
605 static void set_desc_last(struct axi_dma_hw_desc *desc)
606 {
607 	u32 val;
608 
609 	val = le32_to_cpu(desc->lli->ctl_hi);
610 	val |= CH_CTL_H_LLI_LAST;
611 	desc->lli->ctl_hi = cpu_to_le32(val);
612 }
613 
614 static void write_desc_sar(struct axi_dma_hw_desc *desc, dma_addr_t adr)
615 {
616 	desc->lli->sar = cpu_to_le64(adr);
617 }
618 
619 static void write_desc_dar(struct axi_dma_hw_desc *desc, dma_addr_t adr)
620 {
621 	desc->lli->dar = cpu_to_le64(adr);
622 }
623 
624 static void set_desc_src_master(struct axi_dma_hw_desc *desc)
625 {
626 	u32 val;
627 
628 	/* Select AXI0 for source master */
629 	val = le32_to_cpu(desc->lli->ctl_lo);
630 	val &= ~CH_CTL_L_SRC_MAST;
631 	desc->lli->ctl_lo = cpu_to_le32(val);
632 }
633 
634 static void set_desc_dest_master(struct axi_dma_hw_desc *hw_desc,
635 				 struct axi_dma_desc *desc)
636 {
637 	u32 val;
638 
639 	/* Select AXI1 for source master if available */
640 	val = le32_to_cpu(hw_desc->lli->ctl_lo);
641 	if (desc->chan->chip->dw->hdata->nr_masters > 1)
642 		val |= CH_CTL_L_DST_MAST;
643 	else
644 		val &= ~CH_CTL_L_DST_MAST;
645 
646 	hw_desc->lli->ctl_lo = cpu_to_le32(val);
647 }
648 
649 static int dw_axi_dma_set_hw_desc(struct axi_dma_chan *chan,
650 				  struct axi_dma_hw_desc *hw_desc,
651 				  dma_addr_t mem_addr, size_t len)
652 {
653 	unsigned int data_width = BIT(chan->chip->dw->hdata->m_data_width);
654 	unsigned int reg_width;
655 	unsigned int mem_width;
656 	dma_addr_t device_addr;
657 	size_t axi_block_ts;
658 	size_t block_ts;
659 	u32 ctllo, ctlhi;
660 	u32 burst_len;
661 
662 	axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];
663 
664 	mem_width = __ffs(data_width | mem_addr | len);
665 	if (mem_width > DWAXIDMAC_TRANS_WIDTH_32)
666 		mem_width = DWAXIDMAC_TRANS_WIDTH_32;
667 
668 	if (!IS_ALIGNED(mem_addr, 4)) {
669 		dev_err(chan->chip->dev, "invalid buffer alignment\n");
670 		return -EINVAL;
671 	}
672 
673 	switch (chan->direction) {
674 	case DMA_MEM_TO_DEV:
675 		reg_width = __ffs(chan->config.dst_addr_width);
676 		device_addr = chan->config.dst_addr;
677 		ctllo = reg_width << CH_CTL_L_DST_WIDTH_POS |
678 			mem_width << CH_CTL_L_SRC_WIDTH_POS |
679 			DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_DST_INC_POS |
680 			DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS;
681 		block_ts = len >> mem_width;
682 		break;
683 	case DMA_DEV_TO_MEM:
684 		reg_width = __ffs(chan->config.src_addr_width);
685 		device_addr = chan->config.src_addr;
686 		ctllo = reg_width << CH_CTL_L_SRC_WIDTH_POS |
687 			mem_width << CH_CTL_L_DST_WIDTH_POS |
688 			DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
689 			DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_SRC_INC_POS;
690 		block_ts = len >> reg_width;
691 		break;
692 	default:
693 		return -EINVAL;
694 	}
695 
696 	if (block_ts > axi_block_ts)
697 		return -EINVAL;
698 
699 	hw_desc->lli = axi_desc_get(chan, &hw_desc->llp);
700 	if (unlikely(!hw_desc->lli))
701 		return -ENOMEM;
702 
703 	ctlhi = CH_CTL_H_LLI_VALID;
704 
705 	if (chan->chip->dw->hdata->restrict_axi_burst_len) {
706 		burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
707 		ctlhi |= CH_CTL_H_ARLEN_EN | CH_CTL_H_AWLEN_EN |
708 			 burst_len << CH_CTL_H_ARLEN_POS |
709 			 burst_len << CH_CTL_H_AWLEN_POS;
710 	}
711 
712 	hw_desc->lli->ctl_hi = cpu_to_le32(ctlhi);
713 
714 	if (chan->direction == DMA_MEM_TO_DEV) {
715 		write_desc_sar(hw_desc, mem_addr);
716 		write_desc_dar(hw_desc, device_addr);
717 	} else {
718 		write_desc_sar(hw_desc, device_addr);
719 		write_desc_dar(hw_desc, mem_addr);
720 	}
721 
722 	hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1);
723 
724 	ctllo |= DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
725 		 DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS;
726 	hw_desc->lli->ctl_lo = cpu_to_le32(ctllo);
727 
728 	set_desc_src_master(hw_desc);
729 
730 	hw_desc->len = len;
731 	return 0;
732 }
733 
734 static size_t calculate_block_len(struct axi_dma_chan *chan,
735 				  dma_addr_t dma_addr, size_t buf_len,
736 				  enum dma_transfer_direction direction)
737 {
738 	u32 data_width, reg_width, mem_width;
739 	size_t axi_block_ts, block_len;
740 
741 	axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];
742 
743 	switch (direction) {
744 	case DMA_MEM_TO_DEV:
745 		data_width = BIT(chan->chip->dw->hdata->m_data_width);
746 		mem_width = __ffs(data_width | dma_addr | buf_len);
747 		if (mem_width > DWAXIDMAC_TRANS_WIDTH_32)
748 			mem_width = DWAXIDMAC_TRANS_WIDTH_32;
749 
750 		block_len = axi_block_ts << mem_width;
751 		break;
752 	case DMA_DEV_TO_MEM:
753 		reg_width = __ffs(chan->config.src_addr_width);
754 		block_len = axi_block_ts << reg_width;
755 		break;
756 	default:
757 		block_len = 0;
758 	}
759 
760 	return block_len;
761 }
762 
763 static struct dma_async_tx_descriptor *
764 dw_axi_dma_chan_prep_cyclic(struct dma_chan *dchan, dma_addr_t dma_addr,
765 			    size_t buf_len, size_t period_len,
766 			    enum dma_transfer_direction direction,
767 			    unsigned long flags)
768 {
769 	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
770 	struct axi_dma_hw_desc *hw_desc = NULL;
771 	struct axi_dma_desc *desc = NULL;
772 	dma_addr_t src_addr = dma_addr;
773 	u32 num_periods, num_segments;
774 	size_t axi_block_len;
775 	u32 total_segments;
776 	u32 segment_len;
777 	unsigned int i;
778 	int status;
779 	u64 llp = 0;
780 	u8 lms = 0; /* Select AXI0 master for LLI fetching */
781 
782 	num_periods = buf_len / period_len;
783 
784 	axi_block_len = calculate_block_len(chan, dma_addr, buf_len, direction);
785 	if (axi_block_len == 0)
786 		return NULL;
787 
788 	num_segments = DIV_ROUND_UP(period_len, axi_block_len);
789 	segment_len = DIV_ROUND_UP(period_len, num_segments);
790 
791 	total_segments = num_periods * num_segments;
792 
793 	desc = axi_desc_alloc(total_segments);
794 	if (unlikely(!desc))
795 		goto err_desc_get;
796 
797 	chan->direction = direction;
798 	desc->chan = chan;
799 	chan->cyclic = true;
800 	desc->length = 0;
801 	desc->period_len = period_len;
802 
803 	for (i = 0; i < total_segments; i++) {
804 		hw_desc = &desc->hw_desc[i];
805 
806 		status = dw_axi_dma_set_hw_desc(chan, hw_desc, src_addr,
807 						segment_len);
808 		if (status < 0)
809 			goto err_desc_get;
810 
811 		desc->length += hw_desc->len;
812 		/* Set end-of-link to the linked descriptor, so that cyclic
813 		 * callback function can be triggered during interrupt.
814 		 */
815 		set_desc_last(hw_desc);
816 
817 		src_addr += segment_len;
818 	}
819 
820 	llp = desc->hw_desc[0].llp;
821 
822 	/* Managed transfer list */
823 	do {
824 		hw_desc = &desc->hw_desc[--total_segments];
825 		write_desc_llp(hw_desc, llp | lms);
826 		llp = hw_desc->llp;
827 	} while (total_segments);
828 
829 	dw_axi_dma_set_hw_channel(chan, true);
830 
831 	return vchan_tx_prep(&chan->vc, &desc->vd, flags);
832 
833 err_desc_get:
834 	if (desc)
835 		axi_desc_put(desc);
836 
837 	return NULL;
838 }
839 
840 static struct dma_async_tx_descriptor *
841 dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
842 			      unsigned int sg_len,
843 			      enum dma_transfer_direction direction,
844 			      unsigned long flags, void *context)
845 {
846 	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
847 	struct axi_dma_hw_desc *hw_desc = NULL;
848 	struct axi_dma_desc *desc = NULL;
849 	u32 num_segments, segment_len;
850 	unsigned int loop = 0;
851 	struct scatterlist *sg;
852 	size_t axi_block_len;
853 	u32 len, num_sgs;
854 	unsigned int i;
855 	dma_addr_t mem;
856 	int status;
857 	u64 llp = 0;
858 	u8 lms = 0; /* Select AXI0 master for LLI fetching */
859 
860 	if (unlikely(!is_slave_direction(direction) || !sg_len))
861 		return NULL;
862 
863 	mem = sg_dma_address(sgl);
864 	len = sg_dma_len(sgl);
865 
866 	axi_block_len = calculate_block_len(chan, mem, len, direction);
867 	if (axi_block_len == 0)
868 		return NULL;
869 
870 	num_sgs = sg_nents_for_dma(sgl, sg_len, axi_block_len);
871 	desc = axi_desc_alloc(num_sgs);
872 	if (unlikely(!desc))
873 		goto err_desc_get;
874 
875 	desc->chan = chan;
876 	desc->length = 0;
877 	chan->direction = direction;
878 
879 	for_each_sg(sgl, sg, sg_len, i) {
880 		mem = sg_dma_address(sg);
881 		len = sg_dma_len(sg);
882 		num_segments = DIV_ROUND_UP(sg_dma_len(sg), axi_block_len);
883 		segment_len = DIV_ROUND_UP(sg_dma_len(sg), num_segments);
884 
885 		do {
886 			hw_desc = &desc->hw_desc[loop++];
887 			status = dw_axi_dma_set_hw_desc(chan, hw_desc, mem, segment_len);
888 			if (status < 0)
889 				goto err_desc_get;
890 
891 			desc->length += hw_desc->len;
892 			len -= segment_len;
893 			mem += segment_len;
894 		} while (len >= segment_len);
895 	}
896 
897 	/* Set end-of-link to the last link descriptor of list */
898 	set_desc_last(&desc->hw_desc[num_sgs - 1]);
899 
900 	/* Managed transfer list */
901 	do {
902 		hw_desc = &desc->hw_desc[--num_sgs];
903 		write_desc_llp(hw_desc, llp | lms);
904 		llp = hw_desc->llp;
905 	} while (num_sgs);
906 
907 	dw_axi_dma_set_hw_channel(chan, true);
908 
909 	return vchan_tx_prep(&chan->vc, &desc->vd, flags);
910 
911 err_desc_get:
912 	if (desc)
913 		axi_desc_put(desc);
914 
915 	return NULL;
916 }
917 
918 static struct dma_async_tx_descriptor *
919 dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
920 			 dma_addr_t src_adr, size_t len, unsigned long flags)
921 {
922 	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
923 	size_t block_ts, max_block_ts, xfer_len;
924 	struct axi_dma_hw_desc *hw_desc = NULL;
925 	struct axi_dma_desc *desc = NULL;
926 	u32 xfer_width, reg, num;
927 	u64 llp = 0;
928 	u8 lms = 0; /* Select AXI0 master for LLI fetching */
929 
930 	dev_dbg(chan2dev(chan), "%s: memcpy: src: %pad dst: %pad length: %zd flags: %#lx",
931 		axi_chan_name(chan), &src_adr, &dst_adr, len, flags);
932 
933 	max_block_ts = chan->chip->dw->hdata->block_size[chan->id];
934 	xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, len);
935 	num = DIV_ROUND_UP(len, max_block_ts << xfer_width);
936 	desc = axi_desc_alloc(num);
937 	if (unlikely(!desc))
938 		goto err_desc_get;
939 
940 	desc->chan = chan;
941 	num = 0;
942 	desc->length = 0;
943 	while (len) {
944 		xfer_len = len;
945 
946 		hw_desc = &desc->hw_desc[num];
947 		/*
948 		 * Take care for the alignment.
949 		 * Actually source and destination widths can be different, but
950 		 * make them same to be simpler.
951 		 */
952 		xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, xfer_len);
953 
954 		/*
955 		 * block_ts indicates the total number of data of width
956 		 * to be transferred in a DMA block transfer.
957 		 * BLOCK_TS register should be set to block_ts - 1
958 		 */
959 		block_ts = xfer_len >> xfer_width;
960 		if (block_ts > max_block_ts) {
961 			block_ts = max_block_ts;
962 			xfer_len = max_block_ts << xfer_width;
963 		}
964 
965 		hw_desc->lli = axi_desc_get(chan, &hw_desc->llp);
966 		if (unlikely(!hw_desc->lli))
967 			goto err_desc_get;
968 
969 		write_desc_sar(hw_desc, src_adr);
970 		write_desc_dar(hw_desc, dst_adr);
971 		hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1);
972 
973 		reg = CH_CTL_H_LLI_VALID;
974 		if (chan->chip->dw->hdata->restrict_axi_burst_len) {
975 			u32 burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
976 
977 			reg |= (CH_CTL_H_ARLEN_EN |
978 				burst_len << CH_CTL_H_ARLEN_POS |
979 				CH_CTL_H_AWLEN_EN |
980 				burst_len << CH_CTL_H_AWLEN_POS);
981 		}
982 		hw_desc->lli->ctl_hi = cpu_to_le32(reg);
983 
984 		reg = (DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
985 		       DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS |
986 		       xfer_width << CH_CTL_L_DST_WIDTH_POS |
987 		       xfer_width << CH_CTL_L_SRC_WIDTH_POS |
988 		       DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
989 		       DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS);
990 		hw_desc->lli->ctl_lo = cpu_to_le32(reg);
991 
992 		set_desc_src_master(hw_desc);
993 		set_desc_dest_master(hw_desc, desc);
994 
995 		hw_desc->len = xfer_len;
996 		desc->length += hw_desc->len;
997 		/* update the length and addresses for the next loop cycle */
998 		len -= xfer_len;
999 		dst_adr += xfer_len;
1000 		src_adr += xfer_len;
1001 		num++;
1002 	}
1003 
1004 	/* Set end-of-link to the last link descriptor of list */
1005 	set_desc_last(&desc->hw_desc[num - 1]);
1006 	/* Managed transfer list */
1007 	do {
1008 		hw_desc = &desc->hw_desc[--num];
1009 		write_desc_llp(hw_desc, llp | lms);
1010 		llp = hw_desc->llp;
1011 	} while (num);
1012 
1013 	return vchan_tx_prep(&chan->vc, &desc->vd, flags);
1014 
1015 err_desc_get:
1016 	if (desc)
1017 		axi_desc_put(desc);
1018 	return NULL;
1019 }
1020 
1021 static int dw_axi_dma_chan_slave_config(struct dma_chan *dchan,
1022 					struct dma_slave_config *config)
1023 {
1024 	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
1025 
1026 	memcpy(&chan->config, config, sizeof(*config));
1027 
1028 	return 0;
1029 }
1030 
1031 static void axi_chan_dump_lli(struct axi_dma_chan *chan,
1032 			      struct axi_dma_hw_desc *desc)
1033 {
1034 	if (!desc->lli) {
1035 		dev_err(dchan2dev(&chan->vc.chan), "NULL LLI\n");
1036 		return;
1037 	}
1038 
1039 	dev_err(dchan2dev(&chan->vc.chan),
1040 		"SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x",
1041 		le64_to_cpu(desc->lli->sar),
1042 		le64_to_cpu(desc->lli->dar),
1043 		le64_to_cpu(desc->lli->llp),
1044 		le32_to_cpu(desc->lli->block_ts_lo),
1045 		le32_to_cpu(desc->lli->ctl_hi),
1046 		le32_to_cpu(desc->lli->ctl_lo));
1047 }
1048 
1049 static void axi_chan_list_dump_lli(struct axi_dma_chan *chan,
1050 				   struct axi_dma_desc *desc_head)
1051 {
1052 	int count = atomic_read(&chan->descs_allocated);
1053 	int i;
1054 
1055 	for (i = 0; i < count; i++)
1056 		axi_chan_dump_lli(chan, &desc_head->hw_desc[i]);
1057 }
1058 
1059 static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
1060 {
1061 	struct virt_dma_desc *vd;
1062 	unsigned long flags;
1063 
1064 	spin_lock_irqsave(&chan->vc.lock, flags);
1065 
1066 	axi_chan_disable(chan);
1067 
1068 	/* The bad descriptor currently is in the head of vc list */
1069 	vd = vchan_next_desc(&chan->vc);
1070 	if (!vd) {
1071 		dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",
1072 			axi_chan_name(chan));
1073 		goto out;
1074 	}
1075 	/* Remove the completed descriptor from issued list */
1076 	list_del(&vd->node);
1077 
1078 	/* WARN about bad descriptor */
1079 	dev_err(chan2dev(chan),
1080 		"Bad descriptor submitted for %s, cookie: %d, irq: 0x%08x\n",
1081 		axi_chan_name(chan), vd->tx.cookie, status);
1082 	axi_chan_list_dump_lli(chan, vd_to_axi_desc(vd));
1083 
1084 	vchan_cookie_complete(vd);
1085 
1086 	/* Try to restart the controller */
1087 	axi_chan_start_first_queued(chan);
1088 
1089 out:
1090 	spin_unlock_irqrestore(&chan->vc.lock, flags);
1091 }
1092 
1093 static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
1094 {
1095 	int count = atomic_read(&chan->descs_allocated);
1096 	struct axi_dma_hw_desc *hw_desc;
1097 	struct axi_dma_desc *desc;
1098 	struct virt_dma_desc *vd;
1099 	unsigned long flags;
1100 	u64 llp;
1101 	int i;
1102 
1103 	spin_lock_irqsave(&chan->vc.lock, flags);
1104 	if (unlikely(axi_chan_is_hw_enable(chan))) {
1105 		dev_err(chan2dev(chan), "BUG: %s caught DWAXIDMAC_IRQ_DMA_TRF, but channel not idle!\n",
1106 			axi_chan_name(chan));
1107 		axi_chan_disable(chan);
1108 	}
1109 
1110 	/* The completed descriptor currently is in the head of vc list */
1111 	vd = vchan_next_desc(&chan->vc);
1112 	if (!vd) {
1113 		dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",
1114 			axi_chan_name(chan));
1115 		goto out;
1116 	}
1117 
1118 	if (chan->cyclic) {
1119 		desc = vd_to_axi_desc(vd);
1120 		if (desc) {
1121 			llp = lo_hi_readq(chan->chan_regs + CH_LLP);
1122 			for (i = 0; i < count; i++) {
1123 				hw_desc = &desc->hw_desc[i];
1124 				if (hw_desc->llp == llp) {
1125 					axi_chan_irq_clear(chan, hw_desc->lli->status_lo);
1126 					hw_desc->lli->ctl_hi |= CH_CTL_H_LLI_VALID;
1127 					desc->completed_blocks = i;
1128 
1129 					if (((hw_desc->len * (i + 1)) % desc->period_len) == 0)
1130 						vchan_cyclic_callback(vd);
1131 					break;
1132 				}
1133 			}
1134 
1135 			axi_chan_enable(chan);
1136 		}
1137 	} else {
1138 		/* Remove the completed descriptor from issued list before completing */
1139 		list_del(&vd->node);
1140 		vchan_cookie_complete(vd);
1141 	}
1142 
1143 out:
1144 	spin_unlock_irqrestore(&chan->vc.lock, flags);
1145 }
1146 
1147 static irqreturn_t dw_axi_dma_interrupt(int irq, void *dev_id)
1148 {
1149 	struct axi_dma_chip *chip = dev_id;
1150 	struct dw_axi_dma *dw = chip->dw;
1151 	struct axi_dma_chan *chan;
1152 
1153 	u32 status, i;
1154 
1155 	/* Disable DMAC interrupts. We'll enable them after processing channels */
1156 	axi_dma_irq_disable(chip);
1157 
1158 	/* Poll, clear and process every channel interrupt status */
1159 	for (i = 0; i < dw->hdata->nr_channels; i++) {
1160 		chan = &dw->chan[i];
1161 		status = axi_chan_irq_read(chan);
1162 		axi_chan_irq_clear(chan, status);
1163 
1164 		dev_vdbg(chip->dev, "%s %u IRQ status: 0x%08x\n",
1165 			 axi_chan_name(chan), i, status);
1166 
1167 		if (status & DWAXIDMAC_IRQ_ALL_ERR)
1168 			axi_chan_handle_err(chan, status);
1169 		else if (status & DWAXIDMAC_IRQ_DMA_TRF)
1170 			axi_chan_block_xfer_complete(chan);
1171 	}
1172 
1173 	/* Re-enable interrupts */
1174 	axi_dma_irq_enable(chip);
1175 
1176 	return IRQ_HANDLED;
1177 }
1178 
1179 static int dma_chan_terminate_all(struct dma_chan *dchan)
1180 {
1181 	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
1182 	u32 chan_active = BIT(chan->id) << DMAC_CHAN_EN_SHIFT;
1183 	unsigned long flags;
1184 	u32 val;
1185 	int ret;
1186 	LIST_HEAD(head);
1187 
1188 	axi_chan_disable(chan);
1189 
1190 	ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val,
1191 					!(val & chan_active), 1000, 50000);
1192 	if (ret == -ETIMEDOUT)
1193 		dev_warn(dchan2dev(dchan),
1194 			 "%s failed to stop\n", axi_chan_name(chan));
1195 
1196 	if (chan->direction != DMA_MEM_TO_MEM)
1197 		dw_axi_dma_set_hw_channel(chan, false);
1198 	if (chan->direction == DMA_MEM_TO_DEV)
1199 		dw_axi_dma_set_byte_halfword(chan, false);
1200 
1201 	spin_lock_irqsave(&chan->vc.lock, flags);
1202 
1203 	vchan_get_all_descriptors(&chan->vc, &head);
1204 
1205 	chan->cyclic = false;
1206 	spin_unlock_irqrestore(&chan->vc.lock, flags);
1207 
1208 	vchan_dma_desc_free_list(&chan->vc, &head);
1209 
1210 	dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan));
1211 
1212 	return 0;
1213 }
1214 
1215 static int dma_chan_pause(struct dma_chan *dchan)
1216 {
1217 	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
1218 	unsigned long flags;
1219 	unsigned int timeout = 20; /* timeout iterations */
1220 	u64 val;
1221 
1222 	spin_lock_irqsave(&chan->vc.lock, flags);
1223 
1224 	if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) {
1225 		val = axi_dma_ioread64(chan->chip, DMAC_CHSUSPREG);
1226 		if (chan->id >= DMAC_CHAN_16) {
1227 			val |= (u64)(BIT(chan->id) >> DMAC_CHAN_16)
1228 				<< (DMAC_CHAN_SUSP2_SHIFT + DMAC_CHAN_BLOCK_SHIFT) |
1229 				(u64)(BIT(chan->id) >> DMAC_CHAN_16)
1230 				<< (DMAC_CHAN_SUSP2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT);
1231 		} else {
1232 			val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT |
1233 			       BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT;
1234 			}
1235 			axi_dma_iowrite64(chan->chip, DMAC_CHSUSPREG, val);
1236 	} else {
1237 		if (chan->chip->dw->hdata->reg_map_8_channels) {
1238 			val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
1239 			val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT |
1240 			BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT;
1241 			axi_dma_iowrite32(chan->chip, DMAC_CHEN, (u32)val);
1242 		} else {
1243 			val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);
1244 			val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT |
1245 			BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT;
1246 			axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, (u32)val);
1247 		}
1248 	}
1249 
1250 	do  {
1251 		if (axi_chan_irq_read(chan) & DWAXIDMAC_IRQ_SUSPENDED)
1252 			break;
1253 
1254 		udelay(2);
1255 	} while (--timeout);
1256 
1257 	axi_chan_irq_clear(chan, DWAXIDMAC_IRQ_SUSPENDED);
1258 
1259 	chan->is_paused = true;
1260 
1261 	spin_unlock_irqrestore(&chan->vc.lock, flags);
1262 
1263 	return timeout ? 0 : -EAGAIN;
1264 }
1265 
1266 /* Called in chan locked context */
1267 static inline void axi_chan_resume(struct axi_dma_chan *chan)
1268 {
1269 	u64 val;
1270 
1271 	if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) {
1272 		val = axi_dma_ioread64(chan->chip, DMAC_CHSUSPREG);
1273 		if (chan->id >= DMAC_CHAN_16) {
1274 			val &= ~((u64)(BIT(chan->id) >> DMAC_CHAN_16)
1275 				<< (DMAC_CHAN_SUSP2_SHIFT + DMAC_CHAN_BLOCK_SHIFT));
1276 			val |=  ((u64)(BIT(chan->id) >> DMAC_CHAN_16)
1277 				<< (DMAC_CHAN_SUSP2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT));
1278 		} else {
1279 			val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT);
1280 			val |=  (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT);
1281 		}
1282 			axi_dma_iowrite64(chan->chip, DMAC_CHSUSPREG, val);
1283 	} else {
1284 		if (chan->chip->dw->hdata->reg_map_8_channels) {
1285 			val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
1286 			val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT);
1287 			val |=  (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT);
1288 			axi_dma_iowrite32(chan->chip, DMAC_CHEN, (u32)val);
1289 		} else {
1290 			val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);
1291 			val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT);
1292 			val |=  (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT);
1293 			axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, (u32)val);
1294 		}
1295 	}
1296 
1297 	chan->is_paused = false;
1298 }
1299 
1300 static int dma_chan_resume(struct dma_chan *dchan)
1301 {
1302 	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
1303 	unsigned long flags;
1304 
1305 	spin_lock_irqsave(&chan->vc.lock, flags);
1306 
1307 	if (chan->is_paused)
1308 		axi_chan_resume(chan);
1309 
1310 	spin_unlock_irqrestore(&chan->vc.lock, flags);
1311 
1312 	return 0;
1313 }
1314 
1315 static int axi_dma_suspend(struct axi_dma_chip *chip)
1316 {
1317 	axi_dma_irq_disable(chip);
1318 	axi_dma_disable(chip);
1319 
1320 	clk_disable_unprepare(chip->core_clk);
1321 	clk_disable_unprepare(chip->cfgr_clk);
1322 
1323 	return 0;
1324 }
1325 
1326 static int axi_dma_resume(struct axi_dma_chip *chip)
1327 {
1328 	int ret;
1329 
1330 	ret = clk_prepare_enable(chip->cfgr_clk);
1331 	if (ret < 0)
1332 		return ret;
1333 
1334 	ret = clk_prepare_enable(chip->core_clk);
1335 	if (ret < 0)
1336 		return ret;
1337 
1338 	axi_dma_enable(chip);
1339 	axi_dma_irq_enable(chip);
1340 
1341 	return 0;
1342 }
1343 
1344 static int __maybe_unused axi_dma_runtime_suspend(struct device *dev)
1345 {
1346 	struct axi_dma_chip *chip = dev_get_drvdata(dev);
1347 
1348 	return axi_dma_suspend(chip);
1349 }
1350 
1351 static int __maybe_unused axi_dma_runtime_resume(struct device *dev)
1352 {
1353 	struct axi_dma_chip *chip = dev_get_drvdata(dev);
1354 
1355 	return axi_dma_resume(chip);
1356 }
1357 
1358 static struct dma_chan *dw_axi_dma_of_xlate(struct of_phandle_args *dma_spec,
1359 					    struct of_dma *ofdma)
1360 {
1361 	unsigned int handshake = dma_spec->args[0];
1362 	struct dw_axi_dma *dw = ofdma->of_dma_data;
1363 	struct axi_dma_chan *chan = NULL;
1364 	struct dma_chan *dchan;
1365 
1366 	if (dw->hdata->use_handshake_as_channel_number) {
1367 		if (handshake >= dw->hdata->nr_channels)
1368 			return NULL;
1369 
1370 		chan = &dw->chan[handshake];
1371 		dchan = dma_get_slave_channel(&chan->vc.chan);
1372 	} else {
1373 		dchan = dma_get_any_slave_channel(&dw->dma);
1374 	}
1375 
1376 	if (!dchan)
1377 		return NULL;
1378 
1379 	if (!chan)
1380 		chan = dchan_to_axi_dma_chan(dchan);
1381 	chan->hw_handshake_num = handshake;
1382 	return dchan;
1383 }
1384 
1385 static int parse_device_properties(struct axi_dma_chip *chip)
1386 {
1387 	struct device *dev = chip->dev;
1388 	u32 tmp, carr[DMAC_MAX_CHANNELS];
1389 	int ret;
1390 
1391 	ret = device_property_read_u32(dev, "dma-channels", &tmp);
1392 	if (ret)
1393 		return ret;
1394 	if (tmp == 0 || tmp > DMAC_MAX_CHANNELS)
1395 		return -EINVAL;
1396 
1397 	chip->dw->hdata->nr_channels = tmp;
1398 	if (tmp <= DMA_REG_MAP_CH_REF)
1399 		chip->dw->hdata->reg_map_8_channels = true;
1400 
1401 	ret = device_property_read_u32(dev, "snps,dma-masters", &tmp);
1402 	if (ret)
1403 		return ret;
1404 	if (tmp == 0 || tmp > DMAC_MAX_MASTERS)
1405 		return -EINVAL;
1406 
1407 	chip->dw->hdata->nr_masters = tmp;
1408 
1409 	ret = device_property_read_u32(dev, "snps,data-width", &tmp);
1410 	if (ret)
1411 		return ret;
1412 	if (tmp > DWAXIDMAC_TRANS_WIDTH_MAX)
1413 		return -EINVAL;
1414 
1415 	chip->dw->hdata->m_data_width = tmp;
1416 
1417 	ret = device_property_read_u32_array(dev, "snps,block-size", carr,
1418 					     chip->dw->hdata->nr_channels);
1419 	if (ret)
1420 		return ret;
1421 	for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
1422 		if (carr[tmp] == 0 || carr[tmp] > DMAC_MAX_BLK_SIZE)
1423 			return -EINVAL;
1424 
1425 		chip->dw->hdata->block_size[tmp] = carr[tmp];
1426 	}
1427 
1428 	ret = device_property_read_u32_array(dev, "snps,priority", carr,
1429 					     chip->dw->hdata->nr_channels);
1430 	if (ret)
1431 		return ret;
1432 	/* Priority value must be programmed within [0:nr_channels-1] range */
1433 	for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
1434 		if (carr[tmp] >= chip->dw->hdata->nr_channels)
1435 			return -EINVAL;
1436 
1437 		chip->dw->hdata->priority[tmp] = carr[tmp];
1438 	}
1439 
1440 	/* axi-max-burst-len is optional property */
1441 	ret = device_property_read_u32(dev, "snps,axi-max-burst-len", &tmp);
1442 	if (!ret) {
1443 		if (tmp > DWAXIDMAC_ARWLEN_MAX + 1)
1444 			return -EINVAL;
1445 		if (tmp < DWAXIDMAC_ARWLEN_MIN + 1)
1446 			return -EINVAL;
1447 
1448 		chip->dw->hdata->restrict_axi_burst_len = true;
1449 		chip->dw->hdata->axi_rw_burst_len = tmp;
1450 	}
1451 
1452 	return 0;
1453 }
1454 
1455 static int axi_req_irqs(struct platform_device *pdev, struct axi_dma_chip *chip)
1456 {
1457 	int irq_count = platform_irq_count(pdev);
1458 	int ret;
1459 
1460 	for (int i = 0; i < irq_count; i++) {
1461 		chip->irq[i] = platform_get_irq(pdev, i);
1462 		if (chip->irq[i] < 0)
1463 			return chip->irq[i];
1464 		ret = devm_request_irq(chip->dev, chip->irq[i], dw_axi_dma_interrupt,
1465 				       IRQF_SHARED, KBUILD_MODNAME, chip);
1466 		if (ret < 0)
1467 			return ret;
1468 	}
1469 
1470 	return 0;
1471 }
1472 
1473 static int dw_probe(struct platform_device *pdev)
1474 {
1475 	struct axi_dma_chip *chip;
1476 	struct dw_axi_dma *dw;
1477 	struct dw_axi_dma_hcfg *hdata;
1478 	struct reset_control *resets;
1479 	unsigned int flags;
1480 	u32 i;
1481 	int ret;
1482 
1483 	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
1484 	if (!chip)
1485 		return -ENOMEM;
1486 
1487 	dw = devm_kzalloc(&pdev->dev, sizeof(*dw), GFP_KERNEL);
1488 	if (!dw)
1489 		return -ENOMEM;
1490 
1491 	hdata = devm_kzalloc(&pdev->dev, sizeof(*hdata), GFP_KERNEL);
1492 	if (!hdata)
1493 		return -ENOMEM;
1494 
1495 	chip->dw = dw;
1496 	chip->dev = &pdev->dev;
1497 	chip->dw->hdata = hdata;
1498 
1499 	chip->regs = devm_platform_ioremap_resource(pdev, 0);
1500 	if (IS_ERR(chip->regs))
1501 		return PTR_ERR(chip->regs);
1502 
1503 	flags = (uintptr_t)of_device_get_match_data(&pdev->dev);
1504 	if (flags & AXI_DMA_FLAG_HAS_APB_REGS) {
1505 		chip->apb_regs = devm_platform_ioremap_resource(pdev, 1);
1506 		if (IS_ERR(chip->apb_regs))
1507 			return PTR_ERR(chip->apb_regs);
1508 	}
1509 
1510 	if (flags & AXI_DMA_FLAG_HAS_RESETS) {
1511 		resets = devm_reset_control_array_get_exclusive(&pdev->dev);
1512 		if (IS_ERR(resets))
1513 			return PTR_ERR(resets);
1514 
1515 		ret = reset_control_deassert(resets);
1516 		if (ret)
1517 			return ret;
1518 	}
1519 
1520 	chip->dw->hdata->use_handshake_as_channel_number = !!(flags & AXI_DMA_FLAG_ARG0_AS_CHAN);
1521 
1522 	chip->dw->hdata->use_cfg2 = !!(flags & AXI_DMA_FLAG_USE_CFG2);
1523 
1524 	chip->core_clk = devm_clk_get(chip->dev, "core-clk");
1525 	if (IS_ERR(chip->core_clk))
1526 		return PTR_ERR(chip->core_clk);
1527 
1528 	chip->cfgr_clk = devm_clk_get(chip->dev, "cfgr-clk");
1529 	if (IS_ERR(chip->cfgr_clk))
1530 		return PTR_ERR(chip->cfgr_clk);
1531 
1532 	ret = parse_device_properties(chip);
1533 	if (ret)
1534 		return ret;
1535 
1536 	dw->chan = devm_kcalloc(chip->dev, hdata->nr_channels,
1537 				sizeof(*dw->chan), GFP_KERNEL);
1538 	if (!dw->chan)
1539 		return -ENOMEM;
1540 
1541 	ret = axi_req_irqs(pdev, chip);
1542 	if (ret)
1543 		return ret;
1544 
1545 	INIT_LIST_HEAD(&dw->dma.channels);
1546 	for (i = 0; i < hdata->nr_channels; i++) {
1547 		struct axi_dma_chan *chan = &dw->chan[i];
1548 
1549 		chan->chip = chip;
1550 		chan->id = i;
1551 		chan->chan_regs = chip->regs + COMMON_REG_LEN + i * CHAN_REG_LEN;
1552 		atomic_set(&chan->descs_allocated, 0);
1553 
1554 		chan->vc.desc_free = vchan_desc_put;
1555 		vchan_init(&chan->vc, &dw->dma);
1556 	}
1557 
1558 	/* Set capabilities */
1559 	dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1560 	dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1561 	dma_cap_set(DMA_CYCLIC, dw->dma.cap_mask);
1562 
1563 	/* DMA capabilities */
1564 	dw->dma.max_burst = hdata->axi_rw_burst_len;
1565 	dw->dma.src_addr_widths = AXI_DMA_BUSWIDTHS;
1566 	dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS;
1567 	dw->dma.directions = BIT(DMA_MEM_TO_MEM);
1568 	dw->dma.directions |= BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1569 	dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1570 
1571 	dw->dma.dev = chip->dev;
1572 	dw->dma.device_tx_status = dma_chan_tx_status;
1573 	dw->dma.device_issue_pending = dma_chan_issue_pending;
1574 	dw->dma.device_terminate_all = dma_chan_terminate_all;
1575 	dw->dma.device_pause = dma_chan_pause;
1576 	dw->dma.device_resume = dma_chan_resume;
1577 
1578 	dw->dma.device_alloc_chan_resources = dma_chan_alloc_chan_resources;
1579 	dw->dma.device_free_chan_resources = dma_chan_free_chan_resources;
1580 
1581 	dw->dma.device_prep_dma_memcpy = dma_chan_prep_dma_memcpy;
1582 	dw->dma.device_synchronize = dw_axi_dma_synchronize;
1583 	dw->dma.device_config = dw_axi_dma_chan_slave_config;
1584 	dw->dma.device_prep_slave_sg = dw_axi_dma_chan_prep_slave_sg;
1585 	dw->dma.device_prep_dma_cyclic = dw_axi_dma_chan_prep_cyclic;
1586 
1587 	/*
1588 	 * Synopsis DesignWare AxiDMA datasheet mentioned Maximum
1589 	 * supported blocks is 1024. Device register width is 4 bytes.
1590 	 * Therefore, set constraint to 1024 * 4.
1591 	 */
1592 	dw->dma.dev->dma_parms = &dw->dma_parms;
1593 	dma_set_max_seg_size(&pdev->dev, MAX_BLOCK_SIZE);
1594 	platform_set_drvdata(pdev, chip);
1595 
1596 	pm_runtime_enable(chip->dev);
1597 
1598 	/*
1599 	 * We can't just call pm_runtime_get here instead of
1600 	 * pm_runtime_get_noresume + axi_dma_resume because we need
1601 	 * driver to work also without Runtime PM.
1602 	 */
1603 	pm_runtime_get_noresume(chip->dev);
1604 	ret = axi_dma_resume(chip);
1605 	if (ret < 0)
1606 		goto err_pm_disable;
1607 
1608 	axi_dma_hw_init(chip);
1609 
1610 	pm_runtime_put(chip->dev);
1611 
1612 	ret = dmaenginem_async_device_register(&dw->dma);
1613 	if (ret)
1614 		goto err_pm_disable;
1615 
1616 	/* Register with OF helpers for DMA lookups */
1617 	ret = of_dma_controller_register(pdev->dev.of_node,
1618 					 dw_axi_dma_of_xlate, dw);
1619 	if (ret < 0)
1620 		dev_warn(&pdev->dev,
1621 			 "Failed to register OF DMA controller, fallback to MEM_TO_MEM mode\n");
1622 
1623 	dev_info(chip->dev, "DesignWare AXI DMA Controller, %d channels\n",
1624 		 dw->hdata->nr_channels);
1625 
1626 	return 0;
1627 
1628 err_pm_disable:
1629 	pm_runtime_disable(chip->dev);
1630 
1631 	return ret;
1632 }
1633 
1634 static void dw_remove(struct platform_device *pdev)
1635 {
1636 	struct axi_dma_chip *chip = platform_get_drvdata(pdev);
1637 	struct dw_axi_dma *dw = chip->dw;
1638 	struct axi_dma_chan *chan, *_chan;
1639 	u32 i;
1640 
1641 	/* Enable clk before accessing to registers */
1642 	clk_prepare_enable(chip->cfgr_clk);
1643 	clk_prepare_enable(chip->core_clk);
1644 	axi_dma_irq_disable(chip);
1645 	for (i = 0; i < dw->hdata->nr_channels; i++) {
1646 		axi_chan_disable(&chip->dw->chan[i]);
1647 		axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
1648 	}
1649 	axi_dma_disable(chip);
1650 
1651 	pm_runtime_disable(chip->dev);
1652 	axi_dma_suspend(chip);
1653 
1654 	for (i = 0; i < DMAC_MAX_CHANNELS; i++)
1655 		if (chip->irq[i] > 0)
1656 			devm_free_irq(chip->dev, chip->irq[i], chip);
1657 
1658 	of_dma_controller_free(chip->dev->of_node);
1659 
1660 	list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
1661 				 vc.chan.device_node) {
1662 		list_del(&chan->vc.chan.device_node);
1663 		tasklet_kill(&chan->vc.task);
1664 	}
1665 }
1666 
1667 static const struct dev_pm_ops dw_axi_dma_pm_ops = {
1668 	SET_RUNTIME_PM_OPS(axi_dma_runtime_suspend, axi_dma_runtime_resume, NULL)
1669 };
1670 
1671 static const struct of_device_id dw_dma_of_id_table[] = {
1672 	{
1673 		.compatible = "snps,axi-dma-1.01a"
1674 	}, {
1675 		.compatible = "intel,kmb-axi-dma",
1676 		.data = (void *)AXI_DMA_FLAG_HAS_APB_REGS,
1677 	}, {
1678 		.compatible = "sophgo,cv1800b-axi-dma",
1679 		.data = (void *)AXI_DMA_FLAG_ARG0_AS_CHAN,
1680 	}, {
1681 		.compatible = "starfive,jh7110-axi-dma",
1682 		.data = (void *)(AXI_DMA_FLAG_HAS_RESETS | AXI_DMA_FLAG_USE_CFG2),
1683 	}, {
1684 		.compatible = "starfive,jh8100-axi-dma",
1685 		.data = (void *)AXI_DMA_FLAG_HAS_RESETS,
1686 	},
1687 	{}
1688 };
1689 MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
1690 
1691 static struct platform_driver dw_driver = {
1692 	.probe		= dw_probe,
1693 	.remove		= dw_remove,
1694 	.driver = {
1695 		.name	= KBUILD_MODNAME,
1696 		.of_match_table = dw_dma_of_id_table,
1697 		.pm = &dw_axi_dma_pm_ops,
1698 	},
1699 };
1700 module_platform_driver(dw_driver);
1701 
1702 MODULE_LICENSE("GPL v2");
1703 MODULE_DESCRIPTION("Synopsys DesignWare AXI DMA Controller platform driver");
1704 MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>");
1705