xref: /linux/drivers/dma/ti/k3-udma.c (revision c88fb897c1fb5a590dc6353ac4b01c8f46a347b3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
4  *  Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/delay.h>
9 #include <linux/dmaengine.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dmapool.h>
12 #include <linux/err.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/list.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/sys_soc.h>
20 #include <linux/of.h>
21 #include <linux/of_dma.h>
22 #include <linux/of_device.h>
23 #include <linux/of_irq.h>
24 #include <linux/workqueue.h>
25 #include <linux/completion.h>
26 #include <linux/soc/ti/k3-ringacc.h>
27 #include <linux/soc/ti/ti_sci_protocol.h>
28 #include <linux/soc/ti/ti_sci_inta_msi.h>
29 #include <linux/dma/k3-event-router.h>
30 #include <linux/dma/ti-cppi5.h>
31 
32 #include "../virt-dma.h"
33 #include "k3-udma.h"
34 #include "k3-psil-priv.h"
35 
36 struct udma_static_tr {
37 	u8 elsize; /* RPSTR0 */
38 	u16 elcnt; /* RPSTR0 */
39 	u16 bstcnt; /* RPSTR1 */
40 };
41 
42 #define K3_UDMA_MAX_RFLOWS		1024
43 #define K3_UDMA_DEFAULT_RING_SIZE	16
44 
45 /* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */
46 #define UDMA_RFLOW_SRCTAG_NONE		0
47 #define UDMA_RFLOW_SRCTAG_CFG_TAG	1
48 #define UDMA_RFLOW_SRCTAG_FLOW_ID	2
49 #define UDMA_RFLOW_SRCTAG_SRC_TAG	4
50 
51 #define UDMA_RFLOW_DSTTAG_NONE		0
52 #define UDMA_RFLOW_DSTTAG_CFG_TAG	1
53 #define UDMA_RFLOW_DSTTAG_FLOW_ID	2
54 #define UDMA_RFLOW_DSTTAG_DST_TAG_LO	4
55 #define UDMA_RFLOW_DSTTAG_DST_TAG_HI	5
56 
57 struct udma_chan;
58 
59 enum k3_dma_type {
60 	DMA_TYPE_UDMA = 0,
61 	DMA_TYPE_BCDMA,
62 	DMA_TYPE_PKTDMA,
63 };
64 
65 enum udma_mmr {
66 	MMR_GCFG = 0,
67 	MMR_BCHANRT,
68 	MMR_RCHANRT,
69 	MMR_TCHANRT,
70 	MMR_LAST,
71 };
72 
73 static const char * const mmr_names[] = {
74 	[MMR_GCFG] = "gcfg",
75 	[MMR_BCHANRT] = "bchanrt",
76 	[MMR_RCHANRT] = "rchanrt",
77 	[MMR_TCHANRT] = "tchanrt",
78 };
79 
80 struct udma_tchan {
81 	void __iomem *reg_rt;
82 
83 	int id;
84 	struct k3_ring *t_ring; /* Transmit ring */
85 	struct k3_ring *tc_ring; /* Transmit Completion ring */
86 	int tflow_id; /* applicable only for PKTDMA */
87 
88 };
89 
90 #define udma_bchan udma_tchan
91 
92 struct udma_rflow {
93 	int id;
94 	struct k3_ring *fd_ring; /* Free Descriptor ring */
95 	struct k3_ring *r_ring; /* Receive ring */
96 };
97 
98 struct udma_rchan {
99 	void __iomem *reg_rt;
100 
101 	int id;
102 };
103 
104 struct udma_oes_offsets {
105 	/* K3 UDMA Output Event Offset */
106 	u32 udma_rchan;
107 
108 	/* BCDMA Output Event Offsets */
109 	u32 bcdma_bchan_data;
110 	u32 bcdma_bchan_ring;
111 	u32 bcdma_tchan_data;
112 	u32 bcdma_tchan_ring;
113 	u32 bcdma_rchan_data;
114 	u32 bcdma_rchan_ring;
115 
116 	/* PKTDMA Output Event Offsets */
117 	u32 pktdma_tchan_flow;
118 	u32 pktdma_rchan_flow;
119 };
120 
121 #define UDMA_FLAG_PDMA_ACC32		BIT(0)
122 #define UDMA_FLAG_PDMA_BURST		BIT(1)
123 #define UDMA_FLAG_TDTYPE		BIT(2)
124 
125 struct udma_match_data {
126 	enum k3_dma_type type;
127 	u32 psil_base;
128 	bool enable_memcpy_support;
129 	u32 flags;
130 	u32 statictr_z_mask;
131 };
132 
133 struct udma_soc_data {
134 	struct udma_oes_offsets oes;
135 	u32 bcdma_trigger_event_offset;
136 };
137 
138 struct udma_hwdesc {
139 	size_t cppi5_desc_size;
140 	void *cppi5_desc_vaddr;
141 	dma_addr_t cppi5_desc_paddr;
142 
143 	/* TR descriptor internal pointers */
144 	void *tr_req_base;
145 	struct cppi5_tr_resp_t *tr_resp_base;
146 };
147 
148 struct udma_rx_flush {
149 	struct udma_hwdesc hwdescs[2];
150 
151 	size_t buffer_size;
152 	void *buffer_vaddr;
153 	dma_addr_t buffer_paddr;
154 };
155 
156 struct udma_tpl {
157 	u8 levels;
158 	u32 start_idx[3];
159 };
160 
161 struct udma_dev {
162 	struct dma_device ddev;
163 	struct device *dev;
164 	void __iomem *mmrs[MMR_LAST];
165 	const struct udma_match_data *match_data;
166 	const struct udma_soc_data *soc_data;
167 
168 	struct udma_tpl bchan_tpl;
169 	struct udma_tpl tchan_tpl;
170 	struct udma_tpl rchan_tpl;
171 
172 	size_t desc_align; /* alignment to use for descriptors */
173 
174 	struct udma_tisci_rm tisci_rm;
175 
176 	struct k3_ringacc *ringacc;
177 
178 	struct work_struct purge_work;
179 	struct list_head desc_to_purge;
180 	spinlock_t lock;
181 
182 	struct udma_rx_flush rx_flush;
183 
184 	int bchan_cnt;
185 	int tchan_cnt;
186 	int echan_cnt;
187 	int rchan_cnt;
188 	int rflow_cnt;
189 	int tflow_cnt;
190 	unsigned long *bchan_map;
191 	unsigned long *tchan_map;
192 	unsigned long *rchan_map;
193 	unsigned long *rflow_gp_map;
194 	unsigned long *rflow_gp_map_allocated;
195 	unsigned long *rflow_in_use;
196 	unsigned long *tflow_map;
197 
198 	struct udma_bchan *bchans;
199 	struct udma_tchan *tchans;
200 	struct udma_rchan *rchans;
201 	struct udma_rflow *rflows;
202 
203 	struct udma_chan *channels;
204 	u32 psil_base;
205 	u32 atype;
206 	u32 asel;
207 };
208 
209 struct udma_desc {
210 	struct virt_dma_desc vd;
211 
212 	bool terminated;
213 
214 	enum dma_transfer_direction dir;
215 
216 	struct udma_static_tr static_tr;
217 	u32 residue;
218 
219 	unsigned int sglen;
220 	unsigned int desc_idx; /* Only used for cyclic in packet mode */
221 	unsigned int tr_idx;
222 
223 	u32 metadata_size;
224 	void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */
225 
226 	unsigned int hwdesc_count;
227 	struct udma_hwdesc hwdesc[];
228 };
229 
230 enum udma_chan_state {
231 	UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */
232 	UDMA_CHAN_IS_ACTIVE, /* Normal operation */
233 	UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */
234 };
235 
236 struct udma_tx_drain {
237 	struct delayed_work work;
238 	ktime_t tstamp;
239 	u32 residue;
240 };
241 
242 struct udma_chan_config {
243 	bool pkt_mode; /* TR or packet */
244 	bool needs_epib; /* EPIB is needed for the communication or not */
245 	u32 psd_size; /* size of Protocol Specific Data */
246 	u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
247 	u32 hdesc_size; /* Size of a packet descriptor in packet mode */
248 	bool notdpkt; /* Suppress sending TDC packet */
249 	int remote_thread_id;
250 	u32 atype;
251 	u32 asel;
252 	u32 src_thread;
253 	u32 dst_thread;
254 	enum psil_endpoint_type ep_type;
255 	bool enable_acc32;
256 	bool enable_burst;
257 	enum udma_tp_level channel_tpl; /* Channel Throughput Level */
258 
259 	u32 tr_trigger_type;
260 
261 	/* PKDMA mapped channel */
262 	int mapped_channel_id;
263 	/* PKTDMA default tflow or rflow for mapped channel */
264 	int default_flow_id;
265 
266 	enum dma_transfer_direction dir;
267 };
268 
269 struct udma_chan {
270 	struct virt_dma_chan vc;
271 	struct dma_slave_config	cfg;
272 	struct udma_dev *ud;
273 	struct device *dma_dev;
274 	struct udma_desc *desc;
275 	struct udma_desc *terminated_desc;
276 	struct udma_static_tr static_tr;
277 	char *name;
278 
279 	struct udma_bchan *bchan;
280 	struct udma_tchan *tchan;
281 	struct udma_rchan *rchan;
282 	struct udma_rflow *rflow;
283 
284 	bool psil_paired;
285 
286 	int irq_num_ring;
287 	int irq_num_udma;
288 
289 	bool cyclic;
290 	bool paused;
291 
292 	enum udma_chan_state state;
293 	struct completion teardown_completed;
294 
295 	struct udma_tx_drain tx_drain;
296 
297 	u32 bcnt; /* number of bytes completed since the start of the channel */
298 
299 	/* Channel configuration parameters */
300 	struct udma_chan_config config;
301 
302 	/* dmapool for packet mode descriptors */
303 	bool use_dma_pool;
304 	struct dma_pool *hdesc_pool;
305 
306 	u32 id;
307 };
308 
309 static inline struct udma_dev *to_udma_dev(struct dma_device *d)
310 {
311 	return container_of(d, struct udma_dev, ddev);
312 }
313 
314 static inline struct udma_chan *to_udma_chan(struct dma_chan *c)
315 {
316 	return container_of(c, struct udma_chan, vc.chan);
317 }
318 
319 static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t)
320 {
321 	return container_of(t, struct udma_desc, vd.tx);
322 }
323 
324 /* Generic register access functions */
325 static inline u32 udma_read(void __iomem *base, int reg)
326 {
327 	return readl(base + reg);
328 }
329 
330 static inline void udma_write(void __iomem *base, int reg, u32 val)
331 {
332 	writel(val, base + reg);
333 }
334 
335 static inline void udma_update_bits(void __iomem *base, int reg,
336 				    u32 mask, u32 val)
337 {
338 	u32 tmp, orig;
339 
340 	orig = readl(base + reg);
341 	tmp = orig & ~mask;
342 	tmp |= (val & mask);
343 
344 	if (tmp != orig)
345 		writel(tmp, base + reg);
346 }
347 
348 /* TCHANRT */
349 static inline u32 udma_tchanrt_read(struct udma_chan *uc, int reg)
350 {
351 	if (!uc->tchan)
352 		return 0;
353 	return udma_read(uc->tchan->reg_rt, reg);
354 }
355 
356 static inline void udma_tchanrt_write(struct udma_chan *uc, int reg, u32 val)
357 {
358 	if (!uc->tchan)
359 		return;
360 	udma_write(uc->tchan->reg_rt, reg, val);
361 }
362 
363 static inline void udma_tchanrt_update_bits(struct udma_chan *uc, int reg,
364 					    u32 mask, u32 val)
365 {
366 	if (!uc->tchan)
367 		return;
368 	udma_update_bits(uc->tchan->reg_rt, reg, mask, val);
369 }
370 
371 /* RCHANRT */
372 static inline u32 udma_rchanrt_read(struct udma_chan *uc, int reg)
373 {
374 	if (!uc->rchan)
375 		return 0;
376 	return udma_read(uc->rchan->reg_rt, reg);
377 }
378 
379 static inline void udma_rchanrt_write(struct udma_chan *uc, int reg, u32 val)
380 {
381 	if (!uc->rchan)
382 		return;
383 	udma_write(uc->rchan->reg_rt, reg, val);
384 }
385 
386 static inline void udma_rchanrt_update_bits(struct udma_chan *uc, int reg,
387 					    u32 mask, u32 val)
388 {
389 	if (!uc->rchan)
390 		return;
391 	udma_update_bits(uc->rchan->reg_rt, reg, mask, val);
392 }
393 
394 static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
395 {
396 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
397 
398 	dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
399 	return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
400 					      tisci_rm->tisci_navss_dev_id,
401 					      src_thread, dst_thread);
402 }
403 
404 static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
405 			     u32 dst_thread)
406 {
407 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
408 
409 	dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
410 	return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
411 						tisci_rm->tisci_navss_dev_id,
412 						src_thread, dst_thread);
413 }
414 
415 static void k3_configure_chan_coherency(struct dma_chan *chan, u32 asel)
416 {
417 	struct device *chan_dev = &chan->dev->device;
418 
419 	if (asel == 0) {
420 		/* No special handling for the channel */
421 		chan->dev->chan_dma_dev = false;
422 
423 		chan_dev->dma_coherent = false;
424 		chan_dev->dma_parms = NULL;
425 	} else if (asel == 14 || asel == 15) {
426 		chan->dev->chan_dma_dev = true;
427 
428 		chan_dev->dma_coherent = true;
429 		dma_coerce_mask_and_coherent(chan_dev, DMA_BIT_MASK(48));
430 		chan_dev->dma_parms = chan_dev->parent->dma_parms;
431 	} else {
432 		dev_warn(chan->device->dev, "Invalid ASEL value: %u\n", asel);
433 
434 		chan_dev->dma_coherent = false;
435 		chan_dev->dma_parms = NULL;
436 	}
437 }
438 
439 static void udma_reset_uchan(struct udma_chan *uc)
440 {
441 	memset(&uc->config, 0, sizeof(uc->config));
442 	uc->config.remote_thread_id = -1;
443 	uc->config.mapped_channel_id = -1;
444 	uc->config.default_flow_id = -1;
445 	uc->state = UDMA_CHAN_IS_IDLE;
446 }
447 
448 static void udma_dump_chan_stdata(struct udma_chan *uc)
449 {
450 	struct device *dev = uc->ud->dev;
451 	u32 offset;
452 	int i;
453 
454 	if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) {
455 		dev_dbg(dev, "TCHAN State data:\n");
456 		for (i = 0; i < 32; i++) {
457 			offset = UDMA_CHAN_RT_STDATA_REG + i * 4;
458 			dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i,
459 				udma_tchanrt_read(uc, offset));
460 		}
461 	}
462 
463 	if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) {
464 		dev_dbg(dev, "RCHAN State data:\n");
465 		for (i = 0; i < 32; i++) {
466 			offset = UDMA_CHAN_RT_STDATA_REG + i * 4;
467 			dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i,
468 				udma_rchanrt_read(uc, offset));
469 		}
470 	}
471 }
472 
473 static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d,
474 						    int idx)
475 {
476 	return d->hwdesc[idx].cppi5_desc_paddr;
477 }
478 
479 static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx)
480 {
481 	return d->hwdesc[idx].cppi5_desc_vaddr;
482 }
483 
484 static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc,
485 						   dma_addr_t paddr)
486 {
487 	struct udma_desc *d = uc->terminated_desc;
488 
489 	if (d) {
490 		dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
491 								   d->desc_idx);
492 
493 		if (desc_paddr != paddr)
494 			d = NULL;
495 	}
496 
497 	if (!d) {
498 		d = uc->desc;
499 		if (d) {
500 			dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
501 								d->desc_idx);
502 
503 			if (desc_paddr != paddr)
504 				d = NULL;
505 		}
506 	}
507 
508 	return d;
509 }
510 
511 static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d)
512 {
513 	if (uc->use_dma_pool) {
514 		int i;
515 
516 		for (i = 0; i < d->hwdesc_count; i++) {
517 			if (!d->hwdesc[i].cppi5_desc_vaddr)
518 				continue;
519 
520 			dma_pool_free(uc->hdesc_pool,
521 				      d->hwdesc[i].cppi5_desc_vaddr,
522 				      d->hwdesc[i].cppi5_desc_paddr);
523 
524 			d->hwdesc[i].cppi5_desc_vaddr = NULL;
525 		}
526 	} else if (d->hwdesc[0].cppi5_desc_vaddr) {
527 		dma_free_coherent(uc->dma_dev, d->hwdesc[0].cppi5_desc_size,
528 				  d->hwdesc[0].cppi5_desc_vaddr,
529 				  d->hwdesc[0].cppi5_desc_paddr);
530 
531 		d->hwdesc[0].cppi5_desc_vaddr = NULL;
532 	}
533 }
534 
535 static void udma_purge_desc_work(struct work_struct *work)
536 {
537 	struct udma_dev *ud = container_of(work, typeof(*ud), purge_work);
538 	struct virt_dma_desc *vd, *_vd;
539 	unsigned long flags;
540 	LIST_HEAD(head);
541 
542 	spin_lock_irqsave(&ud->lock, flags);
543 	list_splice_tail_init(&ud->desc_to_purge, &head);
544 	spin_unlock_irqrestore(&ud->lock, flags);
545 
546 	list_for_each_entry_safe(vd, _vd, &head, node) {
547 		struct udma_chan *uc = to_udma_chan(vd->tx.chan);
548 		struct udma_desc *d = to_udma_desc(&vd->tx);
549 
550 		udma_free_hwdesc(uc, d);
551 		list_del(&vd->node);
552 		kfree(d);
553 	}
554 
555 	/* If more to purge, schedule the work again */
556 	if (!list_empty(&ud->desc_to_purge))
557 		schedule_work(&ud->purge_work);
558 }
559 
560 static void udma_desc_free(struct virt_dma_desc *vd)
561 {
562 	struct udma_dev *ud = to_udma_dev(vd->tx.chan->device);
563 	struct udma_chan *uc = to_udma_chan(vd->tx.chan);
564 	struct udma_desc *d = to_udma_desc(&vd->tx);
565 	unsigned long flags;
566 
567 	if (uc->terminated_desc == d)
568 		uc->terminated_desc = NULL;
569 
570 	if (uc->use_dma_pool) {
571 		udma_free_hwdesc(uc, d);
572 		kfree(d);
573 		return;
574 	}
575 
576 	spin_lock_irqsave(&ud->lock, flags);
577 	list_add_tail(&vd->node, &ud->desc_to_purge);
578 	spin_unlock_irqrestore(&ud->lock, flags);
579 
580 	schedule_work(&ud->purge_work);
581 }
582 
583 static bool udma_is_chan_running(struct udma_chan *uc)
584 {
585 	u32 trt_ctl = 0;
586 	u32 rrt_ctl = 0;
587 
588 	if (uc->tchan)
589 		trt_ctl = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
590 	if (uc->rchan)
591 		rrt_ctl = udma_rchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
592 
593 	if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
594 		return true;
595 
596 	return false;
597 }
598 
599 static bool udma_is_chan_paused(struct udma_chan *uc)
600 {
601 	u32 val, pause_mask;
602 
603 	switch (uc->config.dir) {
604 	case DMA_DEV_TO_MEM:
605 		val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG);
606 		pause_mask = UDMA_PEER_RT_EN_PAUSE;
607 		break;
608 	case DMA_MEM_TO_DEV:
609 		val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG);
610 		pause_mask = UDMA_PEER_RT_EN_PAUSE;
611 		break;
612 	case DMA_MEM_TO_MEM:
613 		val = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
614 		pause_mask = UDMA_CHAN_RT_CTL_PAUSE;
615 		break;
616 	default:
617 		return false;
618 	}
619 
620 	if (val & pause_mask)
621 		return true;
622 
623 	return false;
624 }
625 
626 static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc)
627 {
628 	return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr;
629 }
630 
631 static int udma_push_to_ring(struct udma_chan *uc, int idx)
632 {
633 	struct udma_desc *d = uc->desc;
634 	struct k3_ring *ring = NULL;
635 	dma_addr_t paddr;
636 
637 	switch (uc->config.dir) {
638 	case DMA_DEV_TO_MEM:
639 		ring = uc->rflow->fd_ring;
640 		break;
641 	case DMA_MEM_TO_DEV:
642 	case DMA_MEM_TO_MEM:
643 		ring = uc->tchan->t_ring;
644 		break;
645 	default:
646 		return -EINVAL;
647 	}
648 
649 	/* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */
650 	if (idx == -1) {
651 		paddr = udma_get_rx_flush_hwdesc_paddr(uc);
652 	} else {
653 		paddr = udma_curr_cppi5_desc_paddr(d, idx);
654 
655 		wmb(); /* Ensure that writes are not moved over this point */
656 	}
657 
658 	return k3_ringacc_ring_push(ring, &paddr);
659 }
660 
661 static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr)
662 {
663 	if (uc->config.dir != DMA_DEV_TO_MEM)
664 		return false;
665 
666 	if (addr == udma_get_rx_flush_hwdesc_paddr(uc))
667 		return true;
668 
669 	return false;
670 }
671 
672 static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
673 {
674 	struct k3_ring *ring = NULL;
675 	int ret;
676 
677 	switch (uc->config.dir) {
678 	case DMA_DEV_TO_MEM:
679 		ring = uc->rflow->r_ring;
680 		break;
681 	case DMA_MEM_TO_DEV:
682 	case DMA_MEM_TO_MEM:
683 		ring = uc->tchan->tc_ring;
684 		break;
685 	default:
686 		return -ENOENT;
687 	}
688 
689 	ret = k3_ringacc_ring_pop(ring, addr);
690 	if (ret)
691 		return ret;
692 
693 	rmb(); /* Ensure that reads are not moved before this point */
694 
695 	/* Teardown completion */
696 	if (cppi5_desc_is_tdcm(*addr))
697 		return 0;
698 
699 	/* Check for flush descriptor */
700 	if (udma_desc_is_rx_flush(uc, *addr))
701 		return -ENOENT;
702 
703 	return 0;
704 }
705 
706 static void udma_reset_rings(struct udma_chan *uc)
707 {
708 	struct k3_ring *ring1 = NULL;
709 	struct k3_ring *ring2 = NULL;
710 
711 	switch (uc->config.dir) {
712 	case DMA_DEV_TO_MEM:
713 		if (uc->rchan) {
714 			ring1 = uc->rflow->fd_ring;
715 			ring2 = uc->rflow->r_ring;
716 		}
717 		break;
718 	case DMA_MEM_TO_DEV:
719 	case DMA_MEM_TO_MEM:
720 		if (uc->tchan) {
721 			ring1 = uc->tchan->t_ring;
722 			ring2 = uc->tchan->tc_ring;
723 		}
724 		break;
725 	default:
726 		break;
727 	}
728 
729 	if (ring1)
730 		k3_ringacc_ring_reset_dma(ring1,
731 					  k3_ringacc_ring_get_occ(ring1));
732 	if (ring2)
733 		k3_ringacc_ring_reset(ring2);
734 
735 	/* make sure we are not leaking memory by stalled descriptor */
736 	if (uc->terminated_desc) {
737 		udma_desc_free(&uc->terminated_desc->vd);
738 		uc->terminated_desc = NULL;
739 	}
740 }
741 
742 static void udma_reset_counters(struct udma_chan *uc)
743 {
744 	u32 val;
745 
746 	if (uc->tchan) {
747 		val = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
748 		udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
749 
750 		val = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
751 		udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
752 
753 		val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
754 		udma_tchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
755 
756 		if (!uc->bchan) {
757 			val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
758 			udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
759 		}
760 	}
761 
762 	if (uc->rchan) {
763 		val = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
764 		udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
765 
766 		val = udma_rchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
767 		udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
768 
769 		val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
770 		udma_rchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
771 
772 		val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
773 		udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
774 	}
775 
776 	uc->bcnt = 0;
777 }
778 
779 static int udma_reset_chan(struct udma_chan *uc, bool hard)
780 {
781 	switch (uc->config.dir) {
782 	case DMA_DEV_TO_MEM:
783 		udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
784 		udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
785 		break;
786 	case DMA_MEM_TO_DEV:
787 		udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
788 		udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
789 		break;
790 	case DMA_MEM_TO_MEM:
791 		udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
792 		udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
793 		break;
794 	default:
795 		return -EINVAL;
796 	}
797 
798 	/* Reset all counters */
799 	udma_reset_counters(uc);
800 
801 	/* Hard reset: re-initialize the channel to reset */
802 	if (hard) {
803 		struct udma_chan_config ucc_backup;
804 		int ret;
805 
806 		memcpy(&ucc_backup, &uc->config, sizeof(uc->config));
807 		uc->ud->ddev.device_free_chan_resources(&uc->vc.chan);
808 
809 		/* restore the channel configuration */
810 		memcpy(&uc->config, &ucc_backup, sizeof(uc->config));
811 		ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan);
812 		if (ret)
813 			return ret;
814 
815 		/*
816 		 * Setting forced teardown after forced reset helps recovering
817 		 * the rchan.
818 		 */
819 		if (uc->config.dir == DMA_DEV_TO_MEM)
820 			udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
821 					   UDMA_CHAN_RT_CTL_EN |
822 					   UDMA_CHAN_RT_CTL_TDOWN |
823 					   UDMA_CHAN_RT_CTL_FTDOWN);
824 	}
825 	uc->state = UDMA_CHAN_IS_IDLE;
826 
827 	return 0;
828 }
829 
830 static void udma_start_desc(struct udma_chan *uc)
831 {
832 	struct udma_chan_config *ucc = &uc->config;
833 
834 	if (uc->ud->match_data->type == DMA_TYPE_UDMA && ucc->pkt_mode &&
835 	    (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) {
836 		int i;
837 
838 		/*
839 		 * UDMA only: Push all descriptors to ring for packet mode
840 		 * cyclic or RX
841 		 * PKTDMA supports pre-linked descriptor and cyclic is not
842 		 * supported
843 		 */
844 		for (i = 0; i < uc->desc->sglen; i++)
845 			udma_push_to_ring(uc, i);
846 	} else {
847 		udma_push_to_ring(uc, 0);
848 	}
849 }
850 
851 static bool udma_chan_needs_reconfiguration(struct udma_chan *uc)
852 {
853 	/* Only PDMAs have staticTR */
854 	if (uc->config.ep_type == PSIL_EP_NATIVE)
855 		return false;
856 
857 	/* Check if the staticTR configuration has changed for TX */
858 	if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr)))
859 		return true;
860 
861 	return false;
862 }
863 
864 static int udma_start(struct udma_chan *uc)
865 {
866 	struct virt_dma_desc *vd = vchan_next_desc(&uc->vc);
867 
868 	if (!vd) {
869 		uc->desc = NULL;
870 		return -ENOENT;
871 	}
872 
873 	list_del(&vd->node);
874 
875 	uc->desc = to_udma_desc(&vd->tx);
876 
877 	/* Channel is already running and does not need reconfiguration */
878 	if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) {
879 		udma_start_desc(uc);
880 		goto out;
881 	}
882 
883 	/* Make sure that we clear the teardown bit, if it is set */
884 	udma_reset_chan(uc, false);
885 
886 	/* Push descriptors before we start the channel */
887 	udma_start_desc(uc);
888 
889 	switch (uc->desc->dir) {
890 	case DMA_DEV_TO_MEM:
891 		/* Config remote TR */
892 		if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
893 			u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
894 				  PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
895 			const struct udma_match_data *match_data =
896 							uc->ud->match_data;
897 
898 			if (uc->config.enable_acc32)
899 				val |= PDMA_STATIC_TR_XY_ACC32;
900 			if (uc->config.enable_burst)
901 				val |= PDMA_STATIC_TR_XY_BURST;
902 
903 			udma_rchanrt_write(uc,
904 					   UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG,
905 					   val);
906 
907 			udma_rchanrt_write(uc,
908 				UDMA_CHAN_RT_PEER_STATIC_TR_Z_REG,
909 				PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt,
910 						 match_data->statictr_z_mask));
911 
912 			/* save the current staticTR configuration */
913 			memcpy(&uc->static_tr, &uc->desc->static_tr,
914 			       sizeof(uc->static_tr));
915 		}
916 
917 		udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
918 				   UDMA_CHAN_RT_CTL_EN);
919 
920 		/* Enable remote */
921 		udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
922 				   UDMA_PEER_RT_EN_ENABLE);
923 
924 		break;
925 	case DMA_MEM_TO_DEV:
926 		/* Config remote TR */
927 		if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
928 			u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
929 				  PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
930 
931 			if (uc->config.enable_acc32)
932 				val |= PDMA_STATIC_TR_XY_ACC32;
933 			if (uc->config.enable_burst)
934 				val |= PDMA_STATIC_TR_XY_BURST;
935 
936 			udma_tchanrt_write(uc,
937 					   UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG,
938 					   val);
939 
940 			/* save the current staticTR configuration */
941 			memcpy(&uc->static_tr, &uc->desc->static_tr,
942 			       sizeof(uc->static_tr));
943 		}
944 
945 		/* Enable remote */
946 		udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
947 				   UDMA_PEER_RT_EN_ENABLE);
948 
949 		udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
950 				   UDMA_CHAN_RT_CTL_EN);
951 
952 		break;
953 	case DMA_MEM_TO_MEM:
954 		udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
955 				   UDMA_CHAN_RT_CTL_EN);
956 		udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
957 				   UDMA_CHAN_RT_CTL_EN);
958 
959 		break;
960 	default:
961 		return -EINVAL;
962 	}
963 
964 	uc->state = UDMA_CHAN_IS_ACTIVE;
965 out:
966 
967 	return 0;
968 }
969 
970 static int udma_stop(struct udma_chan *uc)
971 {
972 	enum udma_chan_state old_state = uc->state;
973 
974 	uc->state = UDMA_CHAN_IS_TERMINATING;
975 	reinit_completion(&uc->teardown_completed);
976 
977 	switch (uc->config.dir) {
978 	case DMA_DEV_TO_MEM:
979 		if (!uc->cyclic && !uc->desc)
980 			udma_push_to_ring(uc, -1);
981 
982 		udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
983 				   UDMA_PEER_RT_EN_ENABLE |
984 				   UDMA_PEER_RT_EN_TEARDOWN);
985 		break;
986 	case DMA_MEM_TO_DEV:
987 		udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
988 				   UDMA_PEER_RT_EN_ENABLE |
989 				   UDMA_PEER_RT_EN_FLUSH);
990 		udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
991 				   UDMA_CHAN_RT_CTL_EN |
992 				   UDMA_CHAN_RT_CTL_TDOWN);
993 		break;
994 	case DMA_MEM_TO_MEM:
995 		udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
996 				   UDMA_CHAN_RT_CTL_EN |
997 				   UDMA_CHAN_RT_CTL_TDOWN);
998 		break;
999 	default:
1000 		uc->state = old_state;
1001 		complete_all(&uc->teardown_completed);
1002 		return -EINVAL;
1003 	}
1004 
1005 	return 0;
1006 }
1007 
1008 static void udma_cyclic_packet_elapsed(struct udma_chan *uc)
1009 {
1010 	struct udma_desc *d = uc->desc;
1011 	struct cppi5_host_desc_t *h_desc;
1012 
1013 	h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr;
1014 	cppi5_hdesc_reset_to_original(h_desc);
1015 	udma_push_to_ring(uc, d->desc_idx);
1016 	d->desc_idx = (d->desc_idx + 1) % d->sglen;
1017 }
1018 
1019 static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d)
1020 {
1021 	struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr;
1022 
1023 	memcpy(d->metadata, h_desc->epib, d->metadata_size);
1024 }
1025 
1026 static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
1027 {
1028 	u32 peer_bcnt, bcnt;
1029 
1030 	/* Only TX towards PDMA is affected */
1031 	if (uc->config.ep_type == PSIL_EP_NATIVE ||
1032 	    uc->config.dir != DMA_MEM_TO_DEV)
1033 		return true;
1034 
1035 	peer_bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
1036 	bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
1037 
1038 	/* Transfer is incomplete, store current residue and time stamp */
1039 	if (peer_bcnt < bcnt) {
1040 		uc->tx_drain.residue = bcnt - peer_bcnt;
1041 		uc->tx_drain.tstamp = ktime_get();
1042 		return false;
1043 	}
1044 
1045 	return true;
1046 }
1047 
1048 static void udma_check_tx_completion(struct work_struct *work)
1049 {
1050 	struct udma_chan *uc = container_of(work, typeof(*uc),
1051 					    tx_drain.work.work);
1052 	bool desc_done = true;
1053 	u32 residue_diff;
1054 	ktime_t time_diff;
1055 	unsigned long delay;
1056 
1057 	while (1) {
1058 		if (uc->desc) {
1059 			/* Get previous residue and time stamp */
1060 			residue_diff = uc->tx_drain.residue;
1061 			time_diff = uc->tx_drain.tstamp;
1062 			/*
1063 			 * Get current residue and time stamp or see if
1064 			 * transfer is complete
1065 			 */
1066 			desc_done = udma_is_desc_really_done(uc, uc->desc);
1067 		}
1068 
1069 		if (!desc_done) {
1070 			/*
1071 			 * Find the time delta and residue delta w.r.t
1072 			 * previous poll
1073 			 */
1074 			time_diff = ktime_sub(uc->tx_drain.tstamp,
1075 					      time_diff) + 1;
1076 			residue_diff -= uc->tx_drain.residue;
1077 			if (residue_diff) {
1078 				/*
1079 				 * Try to guess when we should check
1080 				 * next time by calculating rate at
1081 				 * which data is being drained at the
1082 				 * peer device
1083 				 */
1084 				delay = (time_diff / residue_diff) *
1085 					uc->tx_drain.residue;
1086 			} else {
1087 				/* No progress, check again in 1 second  */
1088 				schedule_delayed_work(&uc->tx_drain.work, HZ);
1089 				break;
1090 			}
1091 
1092 			usleep_range(ktime_to_us(delay),
1093 				     ktime_to_us(delay) + 10);
1094 			continue;
1095 		}
1096 
1097 		if (uc->desc) {
1098 			struct udma_desc *d = uc->desc;
1099 
1100 			uc->bcnt += d->residue;
1101 			udma_start(uc);
1102 			vchan_cookie_complete(&d->vd);
1103 			break;
1104 		}
1105 
1106 		break;
1107 	}
1108 }
1109 
1110 static irqreturn_t udma_ring_irq_handler(int irq, void *data)
1111 {
1112 	struct udma_chan *uc = data;
1113 	struct udma_desc *d;
1114 	dma_addr_t paddr = 0;
1115 
1116 	if (udma_pop_from_ring(uc, &paddr) || !paddr)
1117 		return IRQ_HANDLED;
1118 
1119 	spin_lock(&uc->vc.lock);
1120 
1121 	/* Teardown completion message */
1122 	if (cppi5_desc_is_tdcm(paddr)) {
1123 		complete_all(&uc->teardown_completed);
1124 
1125 		if (uc->terminated_desc) {
1126 			udma_desc_free(&uc->terminated_desc->vd);
1127 			uc->terminated_desc = NULL;
1128 		}
1129 
1130 		if (!uc->desc)
1131 			udma_start(uc);
1132 
1133 		goto out;
1134 	}
1135 
1136 	d = udma_udma_desc_from_paddr(uc, paddr);
1137 
1138 	if (d) {
1139 		dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
1140 								   d->desc_idx);
1141 		if (desc_paddr != paddr) {
1142 			dev_err(uc->ud->dev, "not matching descriptors!\n");
1143 			goto out;
1144 		}
1145 
1146 		if (d == uc->desc) {
1147 			/* active descriptor */
1148 			if (uc->cyclic) {
1149 				udma_cyclic_packet_elapsed(uc);
1150 				vchan_cyclic_callback(&d->vd);
1151 			} else {
1152 				if (udma_is_desc_really_done(uc, d)) {
1153 					uc->bcnt += d->residue;
1154 					udma_start(uc);
1155 					vchan_cookie_complete(&d->vd);
1156 				} else {
1157 					schedule_delayed_work(&uc->tx_drain.work,
1158 							      0);
1159 				}
1160 			}
1161 		} else {
1162 			/*
1163 			 * terminated descriptor, mark the descriptor as
1164 			 * completed to update the channel's cookie marker
1165 			 */
1166 			dma_cookie_complete(&d->vd.tx);
1167 		}
1168 	}
1169 out:
1170 	spin_unlock(&uc->vc.lock);
1171 
1172 	return IRQ_HANDLED;
1173 }
1174 
1175 static irqreturn_t udma_udma_irq_handler(int irq, void *data)
1176 {
1177 	struct udma_chan *uc = data;
1178 	struct udma_desc *d;
1179 
1180 	spin_lock(&uc->vc.lock);
1181 	d = uc->desc;
1182 	if (d) {
1183 		d->tr_idx = (d->tr_idx + 1) % d->sglen;
1184 
1185 		if (uc->cyclic) {
1186 			vchan_cyclic_callback(&d->vd);
1187 		} else {
1188 			/* TODO: figure out the real amount of data */
1189 			uc->bcnt += d->residue;
1190 			udma_start(uc);
1191 			vchan_cookie_complete(&d->vd);
1192 		}
1193 	}
1194 
1195 	spin_unlock(&uc->vc.lock);
1196 
1197 	return IRQ_HANDLED;
1198 }
1199 
1200 /**
1201  * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
1202  * @ud: UDMA device
1203  * @from: Start the search from this flow id number
1204  * @cnt: Number of consecutive flow ids to allocate
1205  *
1206  * Allocate range of RX flow ids for future use, those flows can be requested
1207  * only using explicit flow id number. if @from is set to -1 it will try to find
1208  * first free range. if @from is positive value it will force allocation only
1209  * of the specified range of flows.
1210  *
1211  * Returns -ENOMEM if can't find free range.
1212  * -EEXIST if requested range is busy.
1213  * -EINVAL if wrong input values passed.
1214  * Returns flow id on success.
1215  */
1216 static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1217 {
1218 	int start, tmp_from;
1219 	DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
1220 
1221 	tmp_from = from;
1222 	if (tmp_from < 0)
1223 		tmp_from = ud->rchan_cnt;
1224 	/* default flows can't be allocated and accessible only by id */
1225 	if (tmp_from < ud->rchan_cnt)
1226 		return -EINVAL;
1227 
1228 	if (tmp_from + cnt > ud->rflow_cnt)
1229 		return -EINVAL;
1230 
1231 	bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated,
1232 		  ud->rflow_cnt);
1233 
1234 	start = bitmap_find_next_zero_area(tmp,
1235 					   ud->rflow_cnt,
1236 					   tmp_from, cnt, 0);
1237 	if (start >= ud->rflow_cnt)
1238 		return -ENOMEM;
1239 
1240 	if (from >= 0 && start != from)
1241 		return -EEXIST;
1242 
1243 	bitmap_set(ud->rflow_gp_map_allocated, start, cnt);
1244 	return start;
1245 }
1246 
1247 static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1248 {
1249 	if (from < ud->rchan_cnt)
1250 		return -EINVAL;
1251 	if (from + cnt > ud->rflow_cnt)
1252 		return -EINVAL;
1253 
1254 	bitmap_clear(ud->rflow_gp_map_allocated, from, cnt);
1255 	return 0;
1256 }
1257 
1258 static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
1259 {
1260 	/*
1261 	 * Attempt to request rflow by ID can be made for any rflow
1262 	 * if not in use with assumption that caller knows what's doing.
1263 	 * TI-SCI FW will perform additional permission check ant way, it's
1264 	 * safe
1265 	 */
1266 
1267 	if (id < 0 || id >= ud->rflow_cnt)
1268 		return ERR_PTR(-ENOENT);
1269 
1270 	if (test_bit(id, ud->rflow_in_use))
1271 		return ERR_PTR(-ENOENT);
1272 
1273 	if (ud->rflow_gp_map) {
1274 		/* GP rflow has to be allocated first */
1275 		if (!test_bit(id, ud->rflow_gp_map) &&
1276 		    !test_bit(id, ud->rflow_gp_map_allocated))
1277 			return ERR_PTR(-EINVAL);
1278 	}
1279 
1280 	dev_dbg(ud->dev, "get rflow%d\n", id);
1281 	set_bit(id, ud->rflow_in_use);
1282 	return &ud->rflows[id];
1283 }
1284 
1285 static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow)
1286 {
1287 	if (!test_bit(rflow->id, ud->rflow_in_use)) {
1288 		dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id);
1289 		return;
1290 	}
1291 
1292 	dev_dbg(ud->dev, "put rflow%d\n", rflow->id);
1293 	clear_bit(rflow->id, ud->rflow_in_use);
1294 }
1295 
1296 #define UDMA_RESERVE_RESOURCE(res)					\
1297 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud,	\
1298 					       enum udma_tp_level tpl,	\
1299 					       int id)			\
1300 {									\
1301 	if (id >= 0) {							\
1302 		if (test_bit(id, ud->res##_map)) {			\
1303 			dev_err(ud->dev, "res##%d is in use\n", id);	\
1304 			return ERR_PTR(-ENOENT);			\
1305 		}							\
1306 	} else {							\
1307 		int start;						\
1308 									\
1309 		if (tpl >= ud->res##_tpl.levels)			\
1310 			tpl = ud->res##_tpl.levels - 1;			\
1311 									\
1312 		start = ud->res##_tpl.start_idx[tpl];			\
1313 									\
1314 		id = find_next_zero_bit(ud->res##_map, ud->res##_cnt,	\
1315 					start);				\
1316 		if (id == ud->res##_cnt) {				\
1317 			return ERR_PTR(-ENOENT);			\
1318 		}							\
1319 	}								\
1320 									\
1321 	set_bit(id, ud->res##_map);					\
1322 	return &ud->res##s[id];						\
1323 }
1324 
1325 UDMA_RESERVE_RESOURCE(bchan);
1326 UDMA_RESERVE_RESOURCE(tchan);
1327 UDMA_RESERVE_RESOURCE(rchan);
1328 
1329 static int bcdma_get_bchan(struct udma_chan *uc)
1330 {
1331 	struct udma_dev *ud = uc->ud;
1332 	enum udma_tp_level tpl;
1333 
1334 	if (uc->bchan) {
1335 		dev_dbg(ud->dev, "chan%d: already have bchan%d allocated\n",
1336 			uc->id, uc->bchan->id);
1337 		return 0;
1338 	}
1339 
1340 	/*
1341 	 * Use normal channels for peripherals, and highest TPL channel for
1342 	 * mem2mem
1343 	 */
1344 	if (uc->config.tr_trigger_type)
1345 		tpl = 0;
1346 	else
1347 		tpl = ud->bchan_tpl.levels - 1;
1348 
1349 	uc->bchan = __udma_reserve_bchan(ud, tpl, -1);
1350 	if (IS_ERR(uc->bchan))
1351 		return PTR_ERR(uc->bchan);
1352 
1353 	uc->tchan = uc->bchan;
1354 
1355 	return 0;
1356 }
1357 
1358 static int udma_get_tchan(struct udma_chan *uc)
1359 {
1360 	struct udma_dev *ud = uc->ud;
1361 
1362 	if (uc->tchan) {
1363 		dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
1364 			uc->id, uc->tchan->id);
1365 		return 0;
1366 	}
1367 
1368 	/*
1369 	 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
1370 	 * For PKTDMA mapped channels it is configured to a channel which must
1371 	 * be used to service the peripheral.
1372 	 */
1373 	uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl,
1374 					 uc->config.mapped_channel_id);
1375 	if (IS_ERR(uc->tchan))
1376 		return PTR_ERR(uc->tchan);
1377 
1378 	if (ud->tflow_cnt) {
1379 		int tflow_id;
1380 
1381 		/* Only PKTDMA have support for tx flows */
1382 		if (uc->config.default_flow_id >= 0)
1383 			tflow_id = uc->config.default_flow_id;
1384 		else
1385 			tflow_id = uc->tchan->id;
1386 
1387 		if (test_bit(tflow_id, ud->tflow_map)) {
1388 			dev_err(ud->dev, "tflow%d is in use\n", tflow_id);
1389 			clear_bit(uc->tchan->id, ud->tchan_map);
1390 			uc->tchan = NULL;
1391 			return -ENOENT;
1392 		}
1393 
1394 		uc->tchan->tflow_id = tflow_id;
1395 		set_bit(tflow_id, ud->tflow_map);
1396 	} else {
1397 		uc->tchan->tflow_id = -1;
1398 	}
1399 
1400 	return 0;
1401 }
1402 
1403 static int udma_get_rchan(struct udma_chan *uc)
1404 {
1405 	struct udma_dev *ud = uc->ud;
1406 
1407 	if (uc->rchan) {
1408 		dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
1409 			uc->id, uc->rchan->id);
1410 		return 0;
1411 	}
1412 
1413 	/*
1414 	 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
1415 	 * For PKTDMA mapped channels it is configured to a channel which must
1416 	 * be used to service the peripheral.
1417 	 */
1418 	uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl,
1419 					 uc->config.mapped_channel_id);
1420 
1421 	return PTR_ERR_OR_ZERO(uc->rchan);
1422 }
1423 
1424 static int udma_get_chan_pair(struct udma_chan *uc)
1425 {
1426 	struct udma_dev *ud = uc->ud;
1427 	int chan_id, end;
1428 
1429 	if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
1430 		dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
1431 			 uc->id, uc->tchan->id);
1432 		return 0;
1433 	}
1434 
1435 	if (uc->tchan) {
1436 		dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
1437 			uc->id, uc->tchan->id);
1438 		return -EBUSY;
1439 	} else if (uc->rchan) {
1440 		dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
1441 			uc->id, uc->rchan->id);
1442 		return -EBUSY;
1443 	}
1444 
1445 	/* Can be optimized, but let's have it like this for now */
1446 	end = min(ud->tchan_cnt, ud->rchan_cnt);
1447 	/*
1448 	 * Try to use the highest TPL channel pair for MEM_TO_MEM channels
1449 	 * Note: in UDMAP the channel TPL is symmetric between tchan and rchan
1450 	 */
1451 	chan_id = ud->tchan_tpl.start_idx[ud->tchan_tpl.levels - 1];
1452 	for (; chan_id < end; chan_id++) {
1453 		if (!test_bit(chan_id, ud->tchan_map) &&
1454 		    !test_bit(chan_id, ud->rchan_map))
1455 			break;
1456 	}
1457 
1458 	if (chan_id == end)
1459 		return -ENOENT;
1460 
1461 	set_bit(chan_id, ud->tchan_map);
1462 	set_bit(chan_id, ud->rchan_map);
1463 	uc->tchan = &ud->tchans[chan_id];
1464 	uc->rchan = &ud->rchans[chan_id];
1465 
1466 	/* UDMA does not use tx flows */
1467 	uc->tchan->tflow_id = -1;
1468 
1469 	return 0;
1470 }
1471 
1472 static int udma_get_rflow(struct udma_chan *uc, int flow_id)
1473 {
1474 	struct udma_dev *ud = uc->ud;
1475 
1476 	if (!uc->rchan) {
1477 		dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
1478 		return -EINVAL;
1479 	}
1480 
1481 	if (uc->rflow) {
1482 		dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
1483 			uc->id, uc->rflow->id);
1484 		return 0;
1485 	}
1486 
1487 	uc->rflow = __udma_get_rflow(ud, flow_id);
1488 
1489 	return PTR_ERR_OR_ZERO(uc->rflow);
1490 }
1491 
1492 static void bcdma_put_bchan(struct udma_chan *uc)
1493 {
1494 	struct udma_dev *ud = uc->ud;
1495 
1496 	if (uc->bchan) {
1497 		dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id,
1498 			uc->bchan->id);
1499 		clear_bit(uc->bchan->id, ud->bchan_map);
1500 		uc->bchan = NULL;
1501 		uc->tchan = NULL;
1502 	}
1503 }
1504 
1505 static void udma_put_rchan(struct udma_chan *uc)
1506 {
1507 	struct udma_dev *ud = uc->ud;
1508 
1509 	if (uc->rchan) {
1510 		dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
1511 			uc->rchan->id);
1512 		clear_bit(uc->rchan->id, ud->rchan_map);
1513 		uc->rchan = NULL;
1514 	}
1515 }
1516 
1517 static void udma_put_tchan(struct udma_chan *uc)
1518 {
1519 	struct udma_dev *ud = uc->ud;
1520 
1521 	if (uc->tchan) {
1522 		dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
1523 			uc->tchan->id);
1524 		clear_bit(uc->tchan->id, ud->tchan_map);
1525 
1526 		if (uc->tchan->tflow_id >= 0)
1527 			clear_bit(uc->tchan->tflow_id, ud->tflow_map);
1528 
1529 		uc->tchan = NULL;
1530 	}
1531 }
1532 
1533 static void udma_put_rflow(struct udma_chan *uc)
1534 {
1535 	struct udma_dev *ud = uc->ud;
1536 
1537 	if (uc->rflow) {
1538 		dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
1539 			uc->rflow->id);
1540 		__udma_put_rflow(ud, uc->rflow);
1541 		uc->rflow = NULL;
1542 	}
1543 }
1544 
1545 static void bcdma_free_bchan_resources(struct udma_chan *uc)
1546 {
1547 	if (!uc->bchan)
1548 		return;
1549 
1550 	k3_ringacc_ring_free(uc->bchan->tc_ring);
1551 	k3_ringacc_ring_free(uc->bchan->t_ring);
1552 	uc->bchan->tc_ring = NULL;
1553 	uc->bchan->t_ring = NULL;
1554 	k3_configure_chan_coherency(&uc->vc.chan, 0);
1555 
1556 	bcdma_put_bchan(uc);
1557 }
1558 
1559 static int bcdma_alloc_bchan_resources(struct udma_chan *uc)
1560 {
1561 	struct k3_ring_cfg ring_cfg;
1562 	struct udma_dev *ud = uc->ud;
1563 	int ret;
1564 
1565 	ret = bcdma_get_bchan(uc);
1566 	if (ret)
1567 		return ret;
1568 
1569 	ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1,
1570 					    &uc->bchan->t_ring,
1571 					    &uc->bchan->tc_ring);
1572 	if (ret) {
1573 		ret = -EBUSY;
1574 		goto err_ring;
1575 	}
1576 
1577 	memset(&ring_cfg, 0, sizeof(ring_cfg));
1578 	ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1579 	ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1580 	ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
1581 
1582 	k3_configure_chan_coherency(&uc->vc.chan, ud->asel);
1583 	ring_cfg.asel = ud->asel;
1584 	ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
1585 
1586 	ret = k3_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg);
1587 	if (ret)
1588 		goto err_ringcfg;
1589 
1590 	return 0;
1591 
1592 err_ringcfg:
1593 	k3_ringacc_ring_free(uc->bchan->tc_ring);
1594 	uc->bchan->tc_ring = NULL;
1595 	k3_ringacc_ring_free(uc->bchan->t_ring);
1596 	uc->bchan->t_ring = NULL;
1597 	k3_configure_chan_coherency(&uc->vc.chan, 0);
1598 err_ring:
1599 	bcdma_put_bchan(uc);
1600 
1601 	return ret;
1602 }
1603 
1604 static void udma_free_tx_resources(struct udma_chan *uc)
1605 {
1606 	if (!uc->tchan)
1607 		return;
1608 
1609 	k3_ringacc_ring_free(uc->tchan->t_ring);
1610 	k3_ringacc_ring_free(uc->tchan->tc_ring);
1611 	uc->tchan->t_ring = NULL;
1612 	uc->tchan->tc_ring = NULL;
1613 
1614 	udma_put_tchan(uc);
1615 }
1616 
1617 static int udma_alloc_tx_resources(struct udma_chan *uc)
1618 {
1619 	struct k3_ring_cfg ring_cfg;
1620 	struct udma_dev *ud = uc->ud;
1621 	struct udma_tchan *tchan;
1622 	int ring_idx, ret;
1623 
1624 	ret = udma_get_tchan(uc);
1625 	if (ret)
1626 		return ret;
1627 
1628 	tchan = uc->tchan;
1629 	if (tchan->tflow_id >= 0)
1630 		ring_idx = tchan->tflow_id;
1631 	else
1632 		ring_idx = ud->bchan_cnt + tchan->id;
1633 
1634 	ret = k3_ringacc_request_rings_pair(ud->ringacc, ring_idx, -1,
1635 					    &tchan->t_ring,
1636 					    &tchan->tc_ring);
1637 	if (ret) {
1638 		ret = -EBUSY;
1639 		goto err_ring;
1640 	}
1641 
1642 	memset(&ring_cfg, 0, sizeof(ring_cfg));
1643 	ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1644 	ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1645 	if (ud->match_data->type == DMA_TYPE_UDMA) {
1646 		ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1647 	} else {
1648 		ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
1649 
1650 		k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel);
1651 		ring_cfg.asel = uc->config.asel;
1652 		ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
1653 	}
1654 
1655 	ret = k3_ringacc_ring_cfg(tchan->t_ring, &ring_cfg);
1656 	ret |= k3_ringacc_ring_cfg(tchan->tc_ring, &ring_cfg);
1657 
1658 	if (ret)
1659 		goto err_ringcfg;
1660 
1661 	return 0;
1662 
1663 err_ringcfg:
1664 	k3_ringacc_ring_free(uc->tchan->tc_ring);
1665 	uc->tchan->tc_ring = NULL;
1666 	k3_ringacc_ring_free(uc->tchan->t_ring);
1667 	uc->tchan->t_ring = NULL;
1668 err_ring:
1669 	udma_put_tchan(uc);
1670 
1671 	return ret;
1672 }
1673 
1674 static void udma_free_rx_resources(struct udma_chan *uc)
1675 {
1676 	if (!uc->rchan)
1677 		return;
1678 
1679 	if (uc->rflow) {
1680 		struct udma_rflow *rflow = uc->rflow;
1681 
1682 		k3_ringacc_ring_free(rflow->fd_ring);
1683 		k3_ringacc_ring_free(rflow->r_ring);
1684 		rflow->fd_ring = NULL;
1685 		rflow->r_ring = NULL;
1686 
1687 		udma_put_rflow(uc);
1688 	}
1689 
1690 	udma_put_rchan(uc);
1691 }
1692 
1693 static int udma_alloc_rx_resources(struct udma_chan *uc)
1694 {
1695 	struct udma_dev *ud = uc->ud;
1696 	struct k3_ring_cfg ring_cfg;
1697 	struct udma_rflow *rflow;
1698 	int fd_ring_id;
1699 	int ret;
1700 
1701 	ret = udma_get_rchan(uc);
1702 	if (ret)
1703 		return ret;
1704 
1705 	/* For MEM_TO_MEM we don't need rflow or rings */
1706 	if (uc->config.dir == DMA_MEM_TO_MEM)
1707 		return 0;
1708 
1709 	if (uc->config.default_flow_id >= 0)
1710 		ret = udma_get_rflow(uc, uc->config.default_flow_id);
1711 	else
1712 		ret = udma_get_rflow(uc, uc->rchan->id);
1713 
1714 	if (ret) {
1715 		ret = -EBUSY;
1716 		goto err_rflow;
1717 	}
1718 
1719 	rflow = uc->rflow;
1720 	if (ud->tflow_cnt)
1721 		fd_ring_id = ud->tflow_cnt + rflow->id;
1722 	else
1723 		fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt +
1724 			     uc->rchan->id;
1725 
1726 	ret = k3_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
1727 					    &rflow->fd_ring, &rflow->r_ring);
1728 	if (ret) {
1729 		ret = -EBUSY;
1730 		goto err_ring;
1731 	}
1732 
1733 	memset(&ring_cfg, 0, sizeof(ring_cfg));
1734 
1735 	ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1736 	if (ud->match_data->type == DMA_TYPE_UDMA) {
1737 		if (uc->config.pkt_mode)
1738 			ring_cfg.size = SG_MAX_SEGMENTS;
1739 		else
1740 			ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1741 
1742 		ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1743 	} else {
1744 		ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1745 		ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
1746 
1747 		k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel);
1748 		ring_cfg.asel = uc->config.asel;
1749 		ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
1750 	}
1751 
1752 	ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
1753 
1754 	ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1755 	ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
1756 
1757 	if (ret)
1758 		goto err_ringcfg;
1759 
1760 	return 0;
1761 
1762 err_ringcfg:
1763 	k3_ringacc_ring_free(rflow->r_ring);
1764 	rflow->r_ring = NULL;
1765 	k3_ringacc_ring_free(rflow->fd_ring);
1766 	rflow->fd_ring = NULL;
1767 err_ring:
1768 	udma_put_rflow(uc);
1769 err_rflow:
1770 	udma_put_rchan(uc);
1771 
1772 	return ret;
1773 }
1774 
1775 #define TISCI_BCDMA_BCHAN_VALID_PARAMS (			\
1776 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |	\
1777 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID)
1778 
1779 #define TISCI_BCDMA_TCHAN_VALID_PARAMS (			\
1780 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |	\
1781 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID)
1782 
1783 #define TISCI_BCDMA_RCHAN_VALID_PARAMS (			\
1784 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID)
1785 
1786 #define TISCI_UDMA_TCHAN_VALID_PARAMS (				\
1787 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |	\
1788 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |	\
1789 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |	\
1790 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |		\
1791 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |	\
1792 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |		\
1793 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |		\
1794 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1795 
1796 #define TISCI_UDMA_RCHAN_VALID_PARAMS (				\
1797 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |	\
1798 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |		\
1799 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |		\
1800 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |		\
1801 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID |	\
1802 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID |	\
1803 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |	\
1804 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID |	\
1805 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1806 
1807 static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
1808 {
1809 	struct udma_dev *ud = uc->ud;
1810 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1811 	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1812 	struct udma_tchan *tchan = uc->tchan;
1813 	struct udma_rchan *rchan = uc->rchan;
1814 	int ret = 0;
1815 
1816 	/* Non synchronized - mem to mem type of transfer */
1817 	int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1818 	struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1819 	struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
1820 
1821 	req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS;
1822 	req_tx.nav_id = tisci_rm->tisci_dev_id;
1823 	req_tx.index = tchan->id;
1824 	req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1825 	req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1826 	req_tx.txcq_qnum = tc_ring;
1827 	req_tx.tx_atype = ud->atype;
1828 
1829 	ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1830 	if (ret) {
1831 		dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1832 		return ret;
1833 	}
1834 
1835 	req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS;
1836 	req_rx.nav_id = tisci_rm->tisci_dev_id;
1837 	req_rx.index = rchan->id;
1838 	req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1839 	req_rx.rxcq_qnum = tc_ring;
1840 	req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1841 	req_rx.rx_atype = ud->atype;
1842 
1843 	ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
1844 	if (ret)
1845 		dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret);
1846 
1847 	return ret;
1848 }
1849 
1850 static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc)
1851 {
1852 	struct udma_dev *ud = uc->ud;
1853 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1854 	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1855 	struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1856 	struct udma_bchan *bchan = uc->bchan;
1857 	int ret = 0;
1858 
1859 	req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS;
1860 	req_tx.nav_id = tisci_rm->tisci_dev_id;
1861 	req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN;
1862 	req_tx.index = bchan->id;
1863 
1864 	ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1865 	if (ret)
1866 		dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret);
1867 
1868 	return ret;
1869 }
1870 
1871 static int udma_tisci_tx_channel_config(struct udma_chan *uc)
1872 {
1873 	struct udma_dev *ud = uc->ud;
1874 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1875 	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1876 	struct udma_tchan *tchan = uc->tchan;
1877 	int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1878 	struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1879 	u32 mode, fetch_size;
1880 	int ret = 0;
1881 
1882 	if (uc->config.pkt_mode) {
1883 		mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1884 		fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1885 						   uc->config.psd_size, 0);
1886 	} else {
1887 		mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
1888 		fetch_size = sizeof(struct cppi5_desc_hdr_t);
1889 	}
1890 
1891 	req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS;
1892 	req_tx.nav_id = tisci_rm->tisci_dev_id;
1893 	req_tx.index = tchan->id;
1894 	req_tx.tx_chan_type = mode;
1895 	req_tx.tx_supr_tdpkt = uc->config.notdpkt;
1896 	req_tx.tx_fetch_size = fetch_size >> 2;
1897 	req_tx.txcq_qnum = tc_ring;
1898 	req_tx.tx_atype = uc->config.atype;
1899 	if (uc->config.ep_type == PSIL_EP_PDMA_XY &&
1900 	    ud->match_data->flags & UDMA_FLAG_TDTYPE) {
1901 		/* wait for peer to complete the teardown for PDMAs */
1902 		req_tx.valid_params |=
1903 				TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
1904 		req_tx.tx_tdtype = 1;
1905 	}
1906 
1907 	ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1908 	if (ret)
1909 		dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1910 
1911 	return ret;
1912 }
1913 
1914 static int bcdma_tisci_tx_channel_config(struct udma_chan *uc)
1915 {
1916 	struct udma_dev *ud = uc->ud;
1917 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1918 	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1919 	struct udma_tchan *tchan = uc->tchan;
1920 	struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1921 	int ret = 0;
1922 
1923 	req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS;
1924 	req_tx.nav_id = tisci_rm->tisci_dev_id;
1925 	req_tx.index = tchan->id;
1926 	req_tx.tx_supr_tdpkt = uc->config.notdpkt;
1927 	if (ud->match_data->flags & UDMA_FLAG_TDTYPE) {
1928 		/* wait for peer to complete the teardown for PDMAs */
1929 		req_tx.valid_params |=
1930 				TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
1931 		req_tx.tx_tdtype = 1;
1932 	}
1933 
1934 	ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1935 	if (ret)
1936 		dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1937 
1938 	return ret;
1939 }
1940 
1941 #define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config
1942 
1943 static int udma_tisci_rx_channel_config(struct udma_chan *uc)
1944 {
1945 	struct udma_dev *ud = uc->ud;
1946 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1947 	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1948 	struct udma_rchan *rchan = uc->rchan;
1949 	int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring);
1950 	int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring);
1951 	struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
1952 	struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
1953 	u32 mode, fetch_size;
1954 	int ret = 0;
1955 
1956 	if (uc->config.pkt_mode) {
1957 		mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1958 		fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1959 						   uc->config.psd_size, 0);
1960 	} else {
1961 		mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
1962 		fetch_size = sizeof(struct cppi5_desc_hdr_t);
1963 	}
1964 
1965 	req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS;
1966 	req_rx.nav_id = tisci_rm->tisci_dev_id;
1967 	req_rx.index = rchan->id;
1968 	req_rx.rx_fetch_size =  fetch_size >> 2;
1969 	req_rx.rxcq_qnum = rx_ring;
1970 	req_rx.rx_chan_type = mode;
1971 	req_rx.rx_atype = uc->config.atype;
1972 
1973 	ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
1974 	if (ret) {
1975 		dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
1976 		return ret;
1977 	}
1978 
1979 	flow_req.valid_params =
1980 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
1981 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
1982 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
1983 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
1984 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1985 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
1986 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
1987 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
1988 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
1989 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1990 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1991 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1992 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
1993 
1994 	flow_req.nav_id = tisci_rm->tisci_dev_id;
1995 	flow_req.flow_index = rchan->id;
1996 
1997 	if (uc->config.needs_epib)
1998 		flow_req.rx_einfo_present = 1;
1999 	else
2000 		flow_req.rx_einfo_present = 0;
2001 	if (uc->config.psd_size)
2002 		flow_req.rx_psinfo_present = 1;
2003 	else
2004 		flow_req.rx_psinfo_present = 0;
2005 	flow_req.rx_error_handling = 1;
2006 	flow_req.rx_dest_qnum = rx_ring;
2007 	flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE;
2008 	flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG;
2009 	flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI;
2010 	flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO;
2011 	flow_req.rx_fdq0_sz0_qnum = fd_ring;
2012 	flow_req.rx_fdq1_qnum = fd_ring;
2013 	flow_req.rx_fdq2_qnum = fd_ring;
2014 	flow_req.rx_fdq3_qnum = fd_ring;
2015 
2016 	ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
2017 
2018 	if (ret)
2019 		dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret);
2020 
2021 	return 0;
2022 }
2023 
2024 static int bcdma_tisci_rx_channel_config(struct udma_chan *uc)
2025 {
2026 	struct udma_dev *ud = uc->ud;
2027 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2028 	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2029 	struct udma_rchan *rchan = uc->rchan;
2030 	struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2031 	int ret = 0;
2032 
2033 	req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
2034 	req_rx.nav_id = tisci_rm->tisci_dev_id;
2035 	req_rx.index = rchan->id;
2036 
2037 	ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2038 	if (ret)
2039 		dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
2040 
2041 	return ret;
2042 }
2043 
2044 static int pktdma_tisci_rx_channel_config(struct udma_chan *uc)
2045 {
2046 	struct udma_dev *ud = uc->ud;
2047 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2048 	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2049 	struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2050 	struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
2051 	int ret = 0;
2052 
2053 	req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
2054 	req_rx.nav_id = tisci_rm->tisci_dev_id;
2055 	req_rx.index = uc->rchan->id;
2056 
2057 	ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2058 	if (ret) {
2059 		dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret);
2060 		return ret;
2061 	}
2062 
2063 	flow_req.valid_params =
2064 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
2065 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
2066 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID;
2067 
2068 	flow_req.nav_id = tisci_rm->tisci_dev_id;
2069 	flow_req.flow_index = uc->rflow->id;
2070 
2071 	if (uc->config.needs_epib)
2072 		flow_req.rx_einfo_present = 1;
2073 	else
2074 		flow_req.rx_einfo_present = 0;
2075 	if (uc->config.psd_size)
2076 		flow_req.rx_psinfo_present = 1;
2077 	else
2078 		flow_req.rx_psinfo_present = 0;
2079 	flow_req.rx_error_handling = 1;
2080 
2081 	ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
2082 
2083 	if (ret)
2084 		dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id,
2085 			ret);
2086 
2087 	return ret;
2088 }
2089 
2090 static int udma_alloc_chan_resources(struct dma_chan *chan)
2091 {
2092 	struct udma_chan *uc = to_udma_chan(chan);
2093 	struct udma_dev *ud = to_udma_dev(chan->device);
2094 	const struct udma_soc_data *soc_data = ud->soc_data;
2095 	struct k3_ring *irq_ring;
2096 	u32 irq_udma_idx;
2097 	int ret;
2098 
2099 	uc->dma_dev = ud->dev;
2100 
2101 	if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) {
2102 		uc->use_dma_pool = true;
2103 		/* in case of MEM_TO_MEM we have maximum of two TRs */
2104 		if (uc->config.dir == DMA_MEM_TO_MEM) {
2105 			uc->config.hdesc_size = cppi5_trdesc_calc_size(
2106 					sizeof(struct cppi5_tr_type15_t), 2);
2107 			uc->config.pkt_mode = false;
2108 		}
2109 	}
2110 
2111 	if (uc->use_dma_pool) {
2112 		uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
2113 						 uc->config.hdesc_size,
2114 						 ud->desc_align,
2115 						 0);
2116 		if (!uc->hdesc_pool) {
2117 			dev_err(ud->ddev.dev,
2118 				"Descriptor pool allocation failed\n");
2119 			uc->use_dma_pool = false;
2120 			ret = -ENOMEM;
2121 			goto err_cleanup;
2122 		}
2123 	}
2124 
2125 	/*
2126 	 * Make sure that the completion is in a known state:
2127 	 * No teardown, the channel is idle
2128 	 */
2129 	reinit_completion(&uc->teardown_completed);
2130 	complete_all(&uc->teardown_completed);
2131 	uc->state = UDMA_CHAN_IS_IDLE;
2132 
2133 	switch (uc->config.dir) {
2134 	case DMA_MEM_TO_MEM:
2135 		/* Non synchronized - mem to mem type of transfer */
2136 		dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
2137 			uc->id);
2138 
2139 		ret = udma_get_chan_pair(uc);
2140 		if (ret)
2141 			goto err_cleanup;
2142 
2143 		ret = udma_alloc_tx_resources(uc);
2144 		if (ret) {
2145 			udma_put_rchan(uc);
2146 			goto err_cleanup;
2147 		}
2148 
2149 		ret = udma_alloc_rx_resources(uc);
2150 		if (ret) {
2151 			udma_free_tx_resources(uc);
2152 			goto err_cleanup;
2153 		}
2154 
2155 		uc->config.src_thread = ud->psil_base + uc->tchan->id;
2156 		uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2157 					K3_PSIL_DST_THREAD_ID_OFFSET;
2158 
2159 		irq_ring = uc->tchan->tc_ring;
2160 		irq_udma_idx = uc->tchan->id;
2161 
2162 		ret = udma_tisci_m2m_channel_config(uc);
2163 		break;
2164 	case DMA_MEM_TO_DEV:
2165 		/* Slave transfer synchronized - mem to dev (TX) trasnfer */
2166 		dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2167 			uc->id);
2168 
2169 		ret = udma_alloc_tx_resources(uc);
2170 		if (ret)
2171 			goto err_cleanup;
2172 
2173 		uc->config.src_thread = ud->psil_base + uc->tchan->id;
2174 		uc->config.dst_thread = uc->config.remote_thread_id;
2175 		uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2176 
2177 		irq_ring = uc->tchan->tc_ring;
2178 		irq_udma_idx = uc->tchan->id;
2179 
2180 		ret = udma_tisci_tx_channel_config(uc);
2181 		break;
2182 	case DMA_DEV_TO_MEM:
2183 		/* Slave transfer synchronized - dev to mem (RX) trasnfer */
2184 		dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2185 			uc->id);
2186 
2187 		ret = udma_alloc_rx_resources(uc);
2188 		if (ret)
2189 			goto err_cleanup;
2190 
2191 		uc->config.src_thread = uc->config.remote_thread_id;
2192 		uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2193 					K3_PSIL_DST_THREAD_ID_OFFSET;
2194 
2195 		irq_ring = uc->rflow->r_ring;
2196 		irq_udma_idx = soc_data->oes.udma_rchan + uc->rchan->id;
2197 
2198 		ret = udma_tisci_rx_channel_config(uc);
2199 		break;
2200 	default:
2201 		/* Can not happen */
2202 		dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2203 			__func__, uc->id, uc->config.dir);
2204 		ret = -EINVAL;
2205 		goto err_cleanup;
2206 
2207 	}
2208 
2209 	/* check if the channel configuration was successful */
2210 	if (ret)
2211 		goto err_res_free;
2212 
2213 	if (udma_is_chan_running(uc)) {
2214 		dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2215 		udma_reset_chan(uc, false);
2216 		if (udma_is_chan_running(uc)) {
2217 			dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2218 			ret = -EBUSY;
2219 			goto err_res_free;
2220 		}
2221 	}
2222 
2223 	/* PSI-L pairing */
2224 	ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
2225 	if (ret) {
2226 		dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2227 			uc->config.src_thread, uc->config.dst_thread);
2228 		goto err_res_free;
2229 	}
2230 
2231 	uc->psil_paired = true;
2232 
2233 	uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring);
2234 	if (uc->irq_num_ring <= 0) {
2235 		dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2236 			k3_ringacc_get_ring_id(irq_ring));
2237 		ret = -EINVAL;
2238 		goto err_psi_free;
2239 	}
2240 
2241 	ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
2242 			  IRQF_TRIGGER_HIGH, uc->name, uc);
2243 	if (ret) {
2244 		dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2245 		goto err_irq_free;
2246 	}
2247 
2248 	/* Event from UDMA (TR events) only needed for slave TR mode channels */
2249 	if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) {
2250 		uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev,
2251 							    irq_udma_idx);
2252 		if (uc->irq_num_udma <= 0) {
2253 			dev_err(ud->dev, "Failed to get udma irq (index: %u)\n",
2254 				irq_udma_idx);
2255 			free_irq(uc->irq_num_ring, uc);
2256 			ret = -EINVAL;
2257 			goto err_irq_free;
2258 		}
2259 
2260 		ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
2261 				  uc->name, uc);
2262 		if (ret) {
2263 			dev_err(ud->dev, "chan%d: UDMA irq request failed\n",
2264 				uc->id);
2265 			free_irq(uc->irq_num_ring, uc);
2266 			goto err_irq_free;
2267 		}
2268 	} else {
2269 		uc->irq_num_udma = 0;
2270 	}
2271 
2272 	udma_reset_rings(uc);
2273 
2274 	return 0;
2275 
2276 err_irq_free:
2277 	uc->irq_num_ring = 0;
2278 	uc->irq_num_udma = 0;
2279 err_psi_free:
2280 	navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
2281 	uc->psil_paired = false;
2282 err_res_free:
2283 	udma_free_tx_resources(uc);
2284 	udma_free_rx_resources(uc);
2285 err_cleanup:
2286 	udma_reset_uchan(uc);
2287 
2288 	if (uc->use_dma_pool) {
2289 		dma_pool_destroy(uc->hdesc_pool);
2290 		uc->use_dma_pool = false;
2291 	}
2292 
2293 	return ret;
2294 }
2295 
2296 static int bcdma_alloc_chan_resources(struct dma_chan *chan)
2297 {
2298 	struct udma_chan *uc = to_udma_chan(chan);
2299 	struct udma_dev *ud = to_udma_dev(chan->device);
2300 	const struct udma_oes_offsets *oes = &ud->soc_data->oes;
2301 	u32 irq_udma_idx, irq_ring_idx;
2302 	int ret;
2303 
2304 	/* Only TR mode is supported */
2305 	uc->config.pkt_mode = false;
2306 
2307 	/*
2308 	 * Make sure that the completion is in a known state:
2309 	 * No teardown, the channel is idle
2310 	 */
2311 	reinit_completion(&uc->teardown_completed);
2312 	complete_all(&uc->teardown_completed);
2313 	uc->state = UDMA_CHAN_IS_IDLE;
2314 
2315 	switch (uc->config.dir) {
2316 	case DMA_MEM_TO_MEM:
2317 		/* Non synchronized - mem to mem type of transfer */
2318 		dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
2319 			uc->id);
2320 
2321 		ret = bcdma_alloc_bchan_resources(uc);
2322 		if (ret)
2323 			return ret;
2324 
2325 		irq_ring_idx = uc->bchan->id + oes->bcdma_bchan_ring;
2326 		irq_udma_idx = uc->bchan->id + oes->bcdma_bchan_data;
2327 
2328 		ret = bcdma_tisci_m2m_channel_config(uc);
2329 		break;
2330 	case DMA_MEM_TO_DEV:
2331 		/* Slave transfer synchronized - mem to dev (TX) trasnfer */
2332 		dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2333 			uc->id);
2334 
2335 		ret = udma_alloc_tx_resources(uc);
2336 		if (ret) {
2337 			uc->config.remote_thread_id = -1;
2338 			return ret;
2339 		}
2340 
2341 		uc->config.src_thread = ud->psil_base + uc->tchan->id;
2342 		uc->config.dst_thread = uc->config.remote_thread_id;
2343 		uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2344 
2345 		irq_ring_idx = uc->tchan->id + oes->bcdma_tchan_ring;
2346 		irq_udma_idx = uc->tchan->id + oes->bcdma_tchan_data;
2347 
2348 		ret = bcdma_tisci_tx_channel_config(uc);
2349 		break;
2350 	case DMA_DEV_TO_MEM:
2351 		/* Slave transfer synchronized - dev to mem (RX) trasnfer */
2352 		dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2353 			uc->id);
2354 
2355 		ret = udma_alloc_rx_resources(uc);
2356 		if (ret) {
2357 			uc->config.remote_thread_id = -1;
2358 			return ret;
2359 		}
2360 
2361 		uc->config.src_thread = uc->config.remote_thread_id;
2362 		uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2363 					K3_PSIL_DST_THREAD_ID_OFFSET;
2364 
2365 		irq_ring_idx = uc->rchan->id + oes->bcdma_rchan_ring;
2366 		irq_udma_idx = uc->rchan->id + oes->bcdma_rchan_data;
2367 
2368 		ret = bcdma_tisci_rx_channel_config(uc);
2369 		break;
2370 	default:
2371 		/* Can not happen */
2372 		dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2373 			__func__, uc->id, uc->config.dir);
2374 		return -EINVAL;
2375 	}
2376 
2377 	/* check if the channel configuration was successful */
2378 	if (ret)
2379 		goto err_res_free;
2380 
2381 	if (udma_is_chan_running(uc)) {
2382 		dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2383 		udma_reset_chan(uc, false);
2384 		if (udma_is_chan_running(uc)) {
2385 			dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2386 			ret = -EBUSY;
2387 			goto err_res_free;
2388 		}
2389 	}
2390 
2391 	uc->dma_dev = dmaengine_get_dma_device(chan);
2392 	if (uc->config.dir == DMA_MEM_TO_MEM  && !uc->config.tr_trigger_type) {
2393 		uc->config.hdesc_size = cppi5_trdesc_calc_size(
2394 					sizeof(struct cppi5_tr_type15_t), 2);
2395 
2396 		uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
2397 						 uc->config.hdesc_size,
2398 						 ud->desc_align,
2399 						 0);
2400 		if (!uc->hdesc_pool) {
2401 			dev_err(ud->ddev.dev,
2402 				"Descriptor pool allocation failed\n");
2403 			uc->use_dma_pool = false;
2404 			ret = -ENOMEM;
2405 			goto err_res_free;
2406 		}
2407 
2408 		uc->use_dma_pool = true;
2409 	} else if (uc->config.dir != DMA_MEM_TO_MEM) {
2410 		/* PSI-L pairing */
2411 		ret = navss_psil_pair(ud, uc->config.src_thread,
2412 				      uc->config.dst_thread);
2413 		if (ret) {
2414 			dev_err(ud->dev,
2415 				"PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2416 				uc->config.src_thread, uc->config.dst_thread);
2417 			goto err_res_free;
2418 		}
2419 
2420 		uc->psil_paired = true;
2421 	}
2422 
2423 	uc->irq_num_ring = ti_sci_inta_msi_get_virq(ud->dev, irq_ring_idx);
2424 	if (uc->irq_num_ring <= 0) {
2425 		dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2426 			irq_ring_idx);
2427 		ret = -EINVAL;
2428 		goto err_psi_free;
2429 	}
2430 
2431 	ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
2432 			  IRQF_TRIGGER_HIGH, uc->name, uc);
2433 	if (ret) {
2434 		dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2435 		goto err_irq_free;
2436 	}
2437 
2438 	/* Event from BCDMA (TR events) only needed for slave channels */
2439 	if (is_slave_direction(uc->config.dir)) {
2440 		uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev,
2441 							    irq_udma_idx);
2442 		if (uc->irq_num_udma <= 0) {
2443 			dev_err(ud->dev, "Failed to get bcdma irq (index: %u)\n",
2444 				irq_udma_idx);
2445 			free_irq(uc->irq_num_ring, uc);
2446 			ret = -EINVAL;
2447 			goto err_irq_free;
2448 		}
2449 
2450 		ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
2451 				  uc->name, uc);
2452 		if (ret) {
2453 			dev_err(ud->dev, "chan%d: BCDMA irq request failed\n",
2454 				uc->id);
2455 			free_irq(uc->irq_num_ring, uc);
2456 			goto err_irq_free;
2457 		}
2458 	} else {
2459 		uc->irq_num_udma = 0;
2460 	}
2461 
2462 	udma_reset_rings(uc);
2463 
2464 	INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
2465 				  udma_check_tx_completion);
2466 	return 0;
2467 
2468 err_irq_free:
2469 	uc->irq_num_ring = 0;
2470 	uc->irq_num_udma = 0;
2471 err_psi_free:
2472 	if (uc->psil_paired)
2473 		navss_psil_unpair(ud, uc->config.src_thread,
2474 				  uc->config.dst_thread);
2475 	uc->psil_paired = false;
2476 err_res_free:
2477 	bcdma_free_bchan_resources(uc);
2478 	udma_free_tx_resources(uc);
2479 	udma_free_rx_resources(uc);
2480 
2481 	udma_reset_uchan(uc);
2482 
2483 	if (uc->use_dma_pool) {
2484 		dma_pool_destroy(uc->hdesc_pool);
2485 		uc->use_dma_pool = false;
2486 	}
2487 
2488 	return ret;
2489 }
2490 
2491 static int bcdma_router_config(struct dma_chan *chan)
2492 {
2493 	struct k3_event_route_data *router_data = chan->route_data;
2494 	struct udma_chan *uc = to_udma_chan(chan);
2495 	u32 trigger_event;
2496 
2497 	if (!uc->bchan)
2498 		return -EINVAL;
2499 
2500 	if (uc->config.tr_trigger_type != 1 && uc->config.tr_trigger_type != 2)
2501 		return -EINVAL;
2502 
2503 	trigger_event = uc->ud->soc_data->bcdma_trigger_event_offset;
2504 	trigger_event += (uc->bchan->id * 2) + uc->config.tr_trigger_type - 1;
2505 
2506 	return router_data->set_event(router_data->priv, trigger_event);
2507 }
2508 
2509 static int pktdma_alloc_chan_resources(struct dma_chan *chan)
2510 {
2511 	struct udma_chan *uc = to_udma_chan(chan);
2512 	struct udma_dev *ud = to_udma_dev(chan->device);
2513 	const struct udma_oes_offsets *oes = &ud->soc_data->oes;
2514 	u32 irq_ring_idx;
2515 	int ret;
2516 
2517 	/*
2518 	 * Make sure that the completion is in a known state:
2519 	 * No teardown, the channel is idle
2520 	 */
2521 	reinit_completion(&uc->teardown_completed);
2522 	complete_all(&uc->teardown_completed);
2523 	uc->state = UDMA_CHAN_IS_IDLE;
2524 
2525 	switch (uc->config.dir) {
2526 	case DMA_MEM_TO_DEV:
2527 		/* Slave transfer synchronized - mem to dev (TX) trasnfer */
2528 		dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2529 			uc->id);
2530 
2531 		ret = udma_alloc_tx_resources(uc);
2532 		if (ret) {
2533 			uc->config.remote_thread_id = -1;
2534 			return ret;
2535 		}
2536 
2537 		uc->config.src_thread = ud->psil_base + uc->tchan->id;
2538 		uc->config.dst_thread = uc->config.remote_thread_id;
2539 		uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2540 
2541 		irq_ring_idx = uc->tchan->tflow_id + oes->pktdma_tchan_flow;
2542 
2543 		ret = pktdma_tisci_tx_channel_config(uc);
2544 		break;
2545 	case DMA_DEV_TO_MEM:
2546 		/* Slave transfer synchronized - dev to mem (RX) trasnfer */
2547 		dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2548 			uc->id);
2549 
2550 		ret = udma_alloc_rx_resources(uc);
2551 		if (ret) {
2552 			uc->config.remote_thread_id = -1;
2553 			return ret;
2554 		}
2555 
2556 		uc->config.src_thread = uc->config.remote_thread_id;
2557 		uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2558 					K3_PSIL_DST_THREAD_ID_OFFSET;
2559 
2560 		irq_ring_idx = uc->rflow->id + oes->pktdma_rchan_flow;
2561 
2562 		ret = pktdma_tisci_rx_channel_config(uc);
2563 		break;
2564 	default:
2565 		/* Can not happen */
2566 		dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2567 			__func__, uc->id, uc->config.dir);
2568 		return -EINVAL;
2569 	}
2570 
2571 	/* check if the channel configuration was successful */
2572 	if (ret)
2573 		goto err_res_free;
2574 
2575 	if (udma_is_chan_running(uc)) {
2576 		dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2577 		udma_reset_chan(uc, false);
2578 		if (udma_is_chan_running(uc)) {
2579 			dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2580 			ret = -EBUSY;
2581 			goto err_res_free;
2582 		}
2583 	}
2584 
2585 	uc->dma_dev = dmaengine_get_dma_device(chan);
2586 	uc->hdesc_pool = dma_pool_create(uc->name, uc->dma_dev,
2587 					 uc->config.hdesc_size, ud->desc_align,
2588 					 0);
2589 	if (!uc->hdesc_pool) {
2590 		dev_err(ud->ddev.dev,
2591 			"Descriptor pool allocation failed\n");
2592 		uc->use_dma_pool = false;
2593 		ret = -ENOMEM;
2594 		goto err_res_free;
2595 	}
2596 
2597 	uc->use_dma_pool = true;
2598 
2599 	/* PSI-L pairing */
2600 	ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
2601 	if (ret) {
2602 		dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2603 			uc->config.src_thread, uc->config.dst_thread);
2604 		goto err_res_free;
2605 	}
2606 
2607 	uc->psil_paired = true;
2608 
2609 	uc->irq_num_ring = ti_sci_inta_msi_get_virq(ud->dev, irq_ring_idx);
2610 	if (uc->irq_num_ring <= 0) {
2611 		dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2612 			irq_ring_idx);
2613 		ret = -EINVAL;
2614 		goto err_psi_free;
2615 	}
2616 
2617 	ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
2618 			  IRQF_TRIGGER_HIGH, uc->name, uc);
2619 	if (ret) {
2620 		dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2621 		goto err_irq_free;
2622 	}
2623 
2624 	uc->irq_num_udma = 0;
2625 
2626 	udma_reset_rings(uc);
2627 
2628 	INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
2629 				  udma_check_tx_completion);
2630 
2631 	if (uc->tchan)
2632 		dev_dbg(ud->dev,
2633 			"chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n",
2634 			uc->id, uc->tchan->id, uc->tchan->tflow_id,
2635 			uc->config.remote_thread_id);
2636 	else if (uc->rchan)
2637 		dev_dbg(ud->dev,
2638 			"chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n",
2639 			uc->id, uc->rchan->id, uc->rflow->id,
2640 			uc->config.remote_thread_id);
2641 	return 0;
2642 
2643 err_irq_free:
2644 	uc->irq_num_ring = 0;
2645 err_psi_free:
2646 	navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
2647 	uc->psil_paired = false;
2648 err_res_free:
2649 	udma_free_tx_resources(uc);
2650 	udma_free_rx_resources(uc);
2651 
2652 	udma_reset_uchan(uc);
2653 
2654 	dma_pool_destroy(uc->hdesc_pool);
2655 	uc->use_dma_pool = false;
2656 
2657 	return ret;
2658 }
2659 
2660 static int udma_slave_config(struct dma_chan *chan,
2661 			     struct dma_slave_config *cfg)
2662 {
2663 	struct udma_chan *uc = to_udma_chan(chan);
2664 
2665 	memcpy(&uc->cfg, cfg, sizeof(uc->cfg));
2666 
2667 	return 0;
2668 }
2669 
2670 static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc,
2671 					    size_t tr_size, int tr_count,
2672 					    enum dma_transfer_direction dir)
2673 {
2674 	struct udma_hwdesc *hwdesc;
2675 	struct cppi5_desc_hdr_t *tr_desc;
2676 	struct udma_desc *d;
2677 	u32 reload_count = 0;
2678 	u32 ring_id;
2679 
2680 	switch (tr_size) {
2681 	case 16:
2682 	case 32:
2683 	case 64:
2684 	case 128:
2685 		break;
2686 	default:
2687 		dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size);
2688 		return NULL;
2689 	}
2690 
2691 	/* We have only one descriptor containing multiple TRs */
2692 	d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT);
2693 	if (!d)
2694 		return NULL;
2695 
2696 	d->sglen = tr_count;
2697 
2698 	d->hwdesc_count = 1;
2699 	hwdesc = &d->hwdesc[0];
2700 
2701 	/* Allocate memory for DMA ring descriptor */
2702 	if (uc->use_dma_pool) {
2703 		hwdesc->cppi5_desc_size = uc->config.hdesc_size;
2704 		hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
2705 						GFP_NOWAIT,
2706 						&hwdesc->cppi5_desc_paddr);
2707 	} else {
2708 		hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size,
2709 								 tr_count);
2710 		hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
2711 						uc->ud->desc_align);
2712 		hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev,
2713 						hwdesc->cppi5_desc_size,
2714 						&hwdesc->cppi5_desc_paddr,
2715 						GFP_NOWAIT);
2716 	}
2717 
2718 	if (!hwdesc->cppi5_desc_vaddr) {
2719 		kfree(d);
2720 		return NULL;
2721 	}
2722 
2723 	/* Start of the TR req records */
2724 	hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
2725 	/* Start address of the TR response array */
2726 	hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count;
2727 
2728 	tr_desc = hwdesc->cppi5_desc_vaddr;
2729 
2730 	if (uc->cyclic)
2731 		reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE;
2732 
2733 	if (dir == DMA_DEV_TO_MEM)
2734 		ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2735 	else
2736 		ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
2737 
2738 	cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count);
2739 	cppi5_desc_set_pktids(tr_desc, uc->id,
2740 			      CPPI5_INFO1_DESC_FLOWID_DEFAULT);
2741 	cppi5_desc_set_retpolicy(tr_desc, 0, ring_id);
2742 
2743 	return d;
2744 }
2745 
2746 /**
2747  * udma_get_tr_counters - calculate TR counters for a given length
2748  * @len: Length of the trasnfer
2749  * @align_to: Preferred alignment
2750  * @tr0_cnt0: First TR icnt0
2751  * @tr0_cnt1: First TR icnt1
2752  * @tr1_cnt0: Second (if used) TR icnt0
2753  *
2754  * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated
2755  * For len >= SZ_64K two TRs are used in a simple way:
2756  * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1)
2757  * Second TR: the remaining length (tr1_cnt0)
2758  *
2759  * Returns the number of TRs the length needs (1 or 2)
2760  * -EINVAL if the length can not be supported
2761  */
2762 static int udma_get_tr_counters(size_t len, unsigned long align_to,
2763 				u16 *tr0_cnt0, u16 *tr0_cnt1, u16 *tr1_cnt0)
2764 {
2765 	if (len < SZ_64K) {
2766 		*tr0_cnt0 = len;
2767 		*tr0_cnt1 = 1;
2768 
2769 		return 1;
2770 	}
2771 
2772 	if (align_to > 3)
2773 		align_to = 3;
2774 
2775 realign:
2776 	*tr0_cnt0 = SZ_64K - BIT(align_to);
2777 	if (len / *tr0_cnt0 >= SZ_64K) {
2778 		if (align_to) {
2779 			align_to--;
2780 			goto realign;
2781 		}
2782 		return -EINVAL;
2783 	}
2784 
2785 	*tr0_cnt1 = len / *tr0_cnt0;
2786 	*tr1_cnt0 = len % *tr0_cnt0;
2787 
2788 	return 2;
2789 }
2790 
2791 static struct udma_desc *
2792 udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
2793 		      unsigned int sglen, enum dma_transfer_direction dir,
2794 		      unsigned long tx_flags, void *context)
2795 {
2796 	struct scatterlist *sgent;
2797 	struct udma_desc *d;
2798 	struct cppi5_tr_type1_t *tr_req = NULL;
2799 	u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
2800 	unsigned int i;
2801 	size_t tr_size;
2802 	int num_tr = 0;
2803 	int tr_idx = 0;
2804 	u64 asel;
2805 
2806 	/* estimate the number of TRs we will need */
2807 	for_each_sg(sgl, sgent, sglen, i) {
2808 		if (sg_dma_len(sgent) < SZ_64K)
2809 			num_tr++;
2810 		else
2811 			num_tr += 2;
2812 	}
2813 
2814 	/* Now allocate and setup the descriptor. */
2815 	tr_size = sizeof(struct cppi5_tr_type1_t);
2816 	d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
2817 	if (!d)
2818 		return NULL;
2819 
2820 	d->sglen = sglen;
2821 
2822 	if (uc->ud->match_data->type == DMA_TYPE_UDMA)
2823 		asel = 0;
2824 	else
2825 		asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
2826 
2827 	tr_req = d->hwdesc[0].tr_req_base;
2828 	for_each_sg(sgl, sgent, sglen, i) {
2829 		dma_addr_t sg_addr = sg_dma_address(sgent);
2830 
2831 		num_tr = udma_get_tr_counters(sg_dma_len(sgent), __ffs(sg_addr),
2832 					      &tr0_cnt0, &tr0_cnt1, &tr1_cnt0);
2833 		if (num_tr < 0) {
2834 			dev_err(uc->ud->dev, "size %u is not supported\n",
2835 				sg_dma_len(sgent));
2836 			udma_free_hwdesc(uc, d);
2837 			kfree(d);
2838 			return NULL;
2839 		}
2840 
2841 		cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
2842 			      false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2843 		cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
2844 
2845 		sg_addr |= asel;
2846 		tr_req[tr_idx].addr = sg_addr;
2847 		tr_req[tr_idx].icnt0 = tr0_cnt0;
2848 		tr_req[tr_idx].icnt1 = tr0_cnt1;
2849 		tr_req[tr_idx].dim1 = tr0_cnt0;
2850 		tr_idx++;
2851 
2852 		if (num_tr == 2) {
2853 			cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
2854 				      false, false,
2855 				      CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2856 			cppi5_tr_csf_set(&tr_req[tr_idx].flags,
2857 					 CPPI5_TR_CSF_SUPR_EVT);
2858 
2859 			tr_req[tr_idx].addr = sg_addr + tr0_cnt1 * tr0_cnt0;
2860 			tr_req[tr_idx].icnt0 = tr1_cnt0;
2861 			tr_req[tr_idx].icnt1 = 1;
2862 			tr_req[tr_idx].dim1 = tr1_cnt0;
2863 			tr_idx++;
2864 		}
2865 
2866 		d->residue += sg_dma_len(sgent);
2867 	}
2868 
2869 	cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
2870 			 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
2871 
2872 	return d;
2873 }
2874 
2875 static struct udma_desc *
2876 udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl,
2877 				unsigned int sglen,
2878 				enum dma_transfer_direction dir,
2879 				unsigned long tx_flags, void *context)
2880 {
2881 	struct scatterlist *sgent;
2882 	struct cppi5_tr_type15_t *tr_req = NULL;
2883 	enum dma_slave_buswidth dev_width;
2884 	u16 tr_cnt0, tr_cnt1;
2885 	dma_addr_t dev_addr;
2886 	struct udma_desc *d;
2887 	unsigned int i;
2888 	size_t tr_size, sg_len;
2889 	int num_tr = 0;
2890 	int tr_idx = 0;
2891 	u32 burst, trigger_size, port_window;
2892 	u64 asel;
2893 
2894 	if (dir == DMA_DEV_TO_MEM) {
2895 		dev_addr = uc->cfg.src_addr;
2896 		dev_width = uc->cfg.src_addr_width;
2897 		burst = uc->cfg.src_maxburst;
2898 		port_window = uc->cfg.src_port_window_size;
2899 	} else if (dir == DMA_MEM_TO_DEV) {
2900 		dev_addr = uc->cfg.dst_addr;
2901 		dev_width = uc->cfg.dst_addr_width;
2902 		burst = uc->cfg.dst_maxburst;
2903 		port_window = uc->cfg.dst_port_window_size;
2904 	} else {
2905 		dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
2906 		return NULL;
2907 	}
2908 
2909 	if (!burst)
2910 		burst = 1;
2911 
2912 	if (port_window) {
2913 		if (port_window != burst) {
2914 			dev_err(uc->ud->dev,
2915 				"The burst must be equal to port_window\n");
2916 			return NULL;
2917 		}
2918 
2919 		tr_cnt0 = dev_width * port_window;
2920 		tr_cnt1 = 1;
2921 	} else {
2922 		tr_cnt0 = dev_width;
2923 		tr_cnt1 = burst;
2924 	}
2925 	trigger_size = tr_cnt0 * tr_cnt1;
2926 
2927 	/* estimate the number of TRs we will need */
2928 	for_each_sg(sgl, sgent, sglen, i) {
2929 		sg_len = sg_dma_len(sgent);
2930 
2931 		if (sg_len % trigger_size) {
2932 			dev_err(uc->ud->dev,
2933 				"Not aligned SG entry (%zu for %u)\n", sg_len,
2934 				trigger_size);
2935 			return NULL;
2936 		}
2937 
2938 		if (sg_len / trigger_size < SZ_64K)
2939 			num_tr++;
2940 		else
2941 			num_tr += 2;
2942 	}
2943 
2944 	/* Now allocate and setup the descriptor. */
2945 	tr_size = sizeof(struct cppi5_tr_type15_t);
2946 	d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
2947 	if (!d)
2948 		return NULL;
2949 
2950 	d->sglen = sglen;
2951 
2952 	if (uc->ud->match_data->type == DMA_TYPE_UDMA) {
2953 		asel = 0;
2954 	} else {
2955 		asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
2956 		dev_addr |= asel;
2957 	}
2958 
2959 	tr_req = d->hwdesc[0].tr_req_base;
2960 	for_each_sg(sgl, sgent, sglen, i) {
2961 		u16 tr0_cnt2, tr0_cnt3, tr1_cnt2;
2962 		dma_addr_t sg_addr = sg_dma_address(sgent);
2963 
2964 		sg_len = sg_dma_len(sgent);
2965 		num_tr = udma_get_tr_counters(sg_len / trigger_size, 0,
2966 					      &tr0_cnt2, &tr0_cnt3, &tr1_cnt2);
2967 		if (num_tr < 0) {
2968 			dev_err(uc->ud->dev, "size %zu is not supported\n",
2969 				sg_len);
2970 			udma_free_hwdesc(uc, d);
2971 			kfree(d);
2972 			return NULL;
2973 		}
2974 
2975 		cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15, false,
2976 			      true, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2977 		cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
2978 		cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
2979 				     uc->config.tr_trigger_type,
2980 				     CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, 0, 0);
2981 
2982 		sg_addr |= asel;
2983 		if (dir == DMA_DEV_TO_MEM) {
2984 			tr_req[tr_idx].addr = dev_addr;
2985 			tr_req[tr_idx].icnt0 = tr_cnt0;
2986 			tr_req[tr_idx].icnt1 = tr_cnt1;
2987 			tr_req[tr_idx].icnt2 = tr0_cnt2;
2988 			tr_req[tr_idx].icnt3 = tr0_cnt3;
2989 			tr_req[tr_idx].dim1 = (-1) * tr_cnt0;
2990 
2991 			tr_req[tr_idx].daddr = sg_addr;
2992 			tr_req[tr_idx].dicnt0 = tr_cnt0;
2993 			tr_req[tr_idx].dicnt1 = tr_cnt1;
2994 			tr_req[tr_idx].dicnt2 = tr0_cnt2;
2995 			tr_req[tr_idx].dicnt3 = tr0_cnt3;
2996 			tr_req[tr_idx].ddim1 = tr_cnt0;
2997 			tr_req[tr_idx].ddim2 = trigger_size;
2998 			tr_req[tr_idx].ddim3 = trigger_size * tr0_cnt2;
2999 		} else {
3000 			tr_req[tr_idx].addr = sg_addr;
3001 			tr_req[tr_idx].icnt0 = tr_cnt0;
3002 			tr_req[tr_idx].icnt1 = tr_cnt1;
3003 			tr_req[tr_idx].icnt2 = tr0_cnt2;
3004 			tr_req[tr_idx].icnt3 = tr0_cnt3;
3005 			tr_req[tr_idx].dim1 = tr_cnt0;
3006 			tr_req[tr_idx].dim2 = trigger_size;
3007 			tr_req[tr_idx].dim3 = trigger_size * tr0_cnt2;
3008 
3009 			tr_req[tr_idx].daddr = dev_addr;
3010 			tr_req[tr_idx].dicnt0 = tr_cnt0;
3011 			tr_req[tr_idx].dicnt1 = tr_cnt1;
3012 			tr_req[tr_idx].dicnt2 = tr0_cnt2;
3013 			tr_req[tr_idx].dicnt3 = tr0_cnt3;
3014 			tr_req[tr_idx].ddim1 = (-1) * tr_cnt0;
3015 		}
3016 
3017 		tr_idx++;
3018 
3019 		if (num_tr == 2) {
3020 			cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15,
3021 				      false, true,
3022 				      CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3023 			cppi5_tr_csf_set(&tr_req[tr_idx].flags,
3024 					 CPPI5_TR_CSF_SUPR_EVT);
3025 			cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
3026 					     uc->config.tr_trigger_type,
3027 					     CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC,
3028 					     0, 0);
3029 
3030 			sg_addr += trigger_size * tr0_cnt2 * tr0_cnt3;
3031 			if (dir == DMA_DEV_TO_MEM) {
3032 				tr_req[tr_idx].addr = dev_addr;
3033 				tr_req[tr_idx].icnt0 = tr_cnt0;
3034 				tr_req[tr_idx].icnt1 = tr_cnt1;
3035 				tr_req[tr_idx].icnt2 = tr1_cnt2;
3036 				tr_req[tr_idx].icnt3 = 1;
3037 				tr_req[tr_idx].dim1 = (-1) * tr_cnt0;
3038 
3039 				tr_req[tr_idx].daddr = sg_addr;
3040 				tr_req[tr_idx].dicnt0 = tr_cnt0;
3041 				tr_req[tr_idx].dicnt1 = tr_cnt1;
3042 				tr_req[tr_idx].dicnt2 = tr1_cnt2;
3043 				tr_req[tr_idx].dicnt3 = 1;
3044 				tr_req[tr_idx].ddim1 = tr_cnt0;
3045 				tr_req[tr_idx].ddim2 = trigger_size;
3046 			} else {
3047 				tr_req[tr_idx].addr = sg_addr;
3048 				tr_req[tr_idx].icnt0 = tr_cnt0;
3049 				tr_req[tr_idx].icnt1 = tr_cnt1;
3050 				tr_req[tr_idx].icnt2 = tr1_cnt2;
3051 				tr_req[tr_idx].icnt3 = 1;
3052 				tr_req[tr_idx].dim1 = tr_cnt0;
3053 				tr_req[tr_idx].dim2 = trigger_size;
3054 
3055 				tr_req[tr_idx].daddr = dev_addr;
3056 				tr_req[tr_idx].dicnt0 = tr_cnt0;
3057 				tr_req[tr_idx].dicnt1 = tr_cnt1;
3058 				tr_req[tr_idx].dicnt2 = tr1_cnt2;
3059 				tr_req[tr_idx].dicnt3 = 1;
3060 				tr_req[tr_idx].ddim1 = (-1) * tr_cnt0;
3061 			}
3062 			tr_idx++;
3063 		}
3064 
3065 		d->residue += sg_len;
3066 	}
3067 
3068 	cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
3069 			 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
3070 
3071 	return d;
3072 }
3073 
3074 static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
3075 				   enum dma_slave_buswidth dev_width,
3076 				   u16 elcnt)
3077 {
3078 	if (uc->config.ep_type != PSIL_EP_PDMA_XY)
3079 		return 0;
3080 
3081 	/* Bus width translates to the element size (ES) */
3082 	switch (dev_width) {
3083 	case DMA_SLAVE_BUSWIDTH_1_BYTE:
3084 		d->static_tr.elsize = 0;
3085 		break;
3086 	case DMA_SLAVE_BUSWIDTH_2_BYTES:
3087 		d->static_tr.elsize = 1;
3088 		break;
3089 	case DMA_SLAVE_BUSWIDTH_3_BYTES:
3090 		d->static_tr.elsize = 2;
3091 		break;
3092 	case DMA_SLAVE_BUSWIDTH_4_BYTES:
3093 		d->static_tr.elsize = 3;
3094 		break;
3095 	case DMA_SLAVE_BUSWIDTH_8_BYTES:
3096 		d->static_tr.elsize = 4;
3097 		break;
3098 	default: /* not reached */
3099 		return -EINVAL;
3100 	}
3101 
3102 	d->static_tr.elcnt = elcnt;
3103 
3104 	/*
3105 	 * PDMA must to close the packet when the channel is in packet mode.
3106 	 * For TR mode when the channel is not cyclic we also need PDMA to close
3107 	 * the packet otherwise the transfer will stall because PDMA holds on
3108 	 * the data it has received from the peripheral.
3109 	 */
3110 	if (uc->config.pkt_mode || !uc->cyclic) {
3111 		unsigned int div = dev_width * elcnt;
3112 
3113 		if (uc->cyclic)
3114 			d->static_tr.bstcnt = d->residue / d->sglen / div;
3115 		else
3116 			d->static_tr.bstcnt = d->residue / div;
3117 
3118 		if (uc->config.dir == DMA_DEV_TO_MEM &&
3119 		    d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
3120 			return -EINVAL;
3121 	} else {
3122 		d->static_tr.bstcnt = 0;
3123 	}
3124 
3125 	return 0;
3126 }
3127 
3128 static struct udma_desc *
3129 udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
3130 		       unsigned int sglen, enum dma_transfer_direction dir,
3131 		       unsigned long tx_flags, void *context)
3132 {
3133 	struct scatterlist *sgent;
3134 	struct cppi5_host_desc_t *h_desc = NULL;
3135 	struct udma_desc *d;
3136 	u32 ring_id;
3137 	unsigned int i;
3138 	u64 asel;
3139 
3140 	d = kzalloc(struct_size(d, hwdesc, sglen), GFP_NOWAIT);
3141 	if (!d)
3142 		return NULL;
3143 
3144 	d->sglen = sglen;
3145 	d->hwdesc_count = sglen;
3146 
3147 	if (dir == DMA_DEV_TO_MEM)
3148 		ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
3149 	else
3150 		ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
3151 
3152 	if (uc->ud->match_data->type == DMA_TYPE_UDMA)
3153 		asel = 0;
3154 	else
3155 		asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
3156 
3157 	for_each_sg(sgl, sgent, sglen, i) {
3158 		struct udma_hwdesc *hwdesc = &d->hwdesc[i];
3159 		dma_addr_t sg_addr = sg_dma_address(sgent);
3160 		struct cppi5_host_desc_t *desc;
3161 		size_t sg_len = sg_dma_len(sgent);
3162 
3163 		hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
3164 						GFP_NOWAIT,
3165 						&hwdesc->cppi5_desc_paddr);
3166 		if (!hwdesc->cppi5_desc_vaddr) {
3167 			dev_err(uc->ud->dev,
3168 				"descriptor%d allocation failed\n", i);
3169 
3170 			udma_free_hwdesc(uc, d);
3171 			kfree(d);
3172 			return NULL;
3173 		}
3174 
3175 		d->residue += sg_len;
3176 		hwdesc->cppi5_desc_size = uc->config.hdesc_size;
3177 		desc = hwdesc->cppi5_desc_vaddr;
3178 
3179 		if (i == 0) {
3180 			cppi5_hdesc_init(desc, 0, 0);
3181 			/* Flow and Packed ID */
3182 			cppi5_desc_set_pktids(&desc->hdr, uc->id,
3183 					      CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3184 			cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id);
3185 		} else {
3186 			cppi5_hdesc_reset_hbdesc(desc);
3187 			cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff);
3188 		}
3189 
3190 		/* attach the sg buffer to the descriptor */
3191 		sg_addr |= asel;
3192 		cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len);
3193 
3194 		/* Attach link as host buffer descriptor */
3195 		if (h_desc)
3196 			cppi5_hdesc_link_hbdesc(h_desc,
3197 						hwdesc->cppi5_desc_paddr | asel);
3198 
3199 		if (uc->ud->match_data->type == DMA_TYPE_PKTDMA ||
3200 		    dir == DMA_MEM_TO_DEV)
3201 			h_desc = desc;
3202 	}
3203 
3204 	if (d->residue >= SZ_4M) {
3205 		dev_err(uc->ud->dev,
3206 			"%s: Transfer size %u is over the supported 4M range\n",
3207 			__func__, d->residue);
3208 		udma_free_hwdesc(uc, d);
3209 		kfree(d);
3210 		return NULL;
3211 	}
3212 
3213 	h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3214 	cppi5_hdesc_set_pktlen(h_desc, d->residue);
3215 
3216 	return d;
3217 }
3218 
3219 static int udma_attach_metadata(struct dma_async_tx_descriptor *desc,
3220 				void *data, size_t len)
3221 {
3222 	struct udma_desc *d = to_udma_desc(desc);
3223 	struct udma_chan *uc = to_udma_chan(desc->chan);
3224 	struct cppi5_host_desc_t *h_desc;
3225 	u32 psd_size = len;
3226 	u32 flags = 0;
3227 
3228 	if (!uc->config.pkt_mode || !uc->config.metadata_size)
3229 		return -ENOTSUPP;
3230 
3231 	if (!data || len > uc->config.metadata_size)
3232 		return -EINVAL;
3233 
3234 	if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE)
3235 		return -EINVAL;
3236 
3237 	h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3238 	if (d->dir == DMA_MEM_TO_DEV)
3239 		memcpy(h_desc->epib, data, len);
3240 
3241 	if (uc->config.needs_epib)
3242 		psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
3243 
3244 	d->metadata = data;
3245 	d->metadata_size = len;
3246 	if (uc->config.needs_epib)
3247 		flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
3248 
3249 	cppi5_hdesc_update_flags(h_desc, flags);
3250 	cppi5_hdesc_update_psdata_size(h_desc, psd_size);
3251 
3252 	return 0;
3253 }
3254 
3255 static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
3256 				   size_t *payload_len, size_t *max_len)
3257 {
3258 	struct udma_desc *d = to_udma_desc(desc);
3259 	struct udma_chan *uc = to_udma_chan(desc->chan);
3260 	struct cppi5_host_desc_t *h_desc;
3261 
3262 	if (!uc->config.pkt_mode || !uc->config.metadata_size)
3263 		return ERR_PTR(-ENOTSUPP);
3264 
3265 	h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3266 
3267 	*max_len = uc->config.metadata_size;
3268 
3269 	*payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ?
3270 		       CPPI5_INFO0_HDESC_EPIB_SIZE : 0;
3271 	*payload_len += cppi5_hdesc_get_psdata_size(h_desc);
3272 
3273 	return h_desc->epib;
3274 }
3275 
3276 static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc,
3277 				 size_t payload_len)
3278 {
3279 	struct udma_desc *d = to_udma_desc(desc);
3280 	struct udma_chan *uc = to_udma_chan(desc->chan);
3281 	struct cppi5_host_desc_t *h_desc;
3282 	u32 psd_size = payload_len;
3283 	u32 flags = 0;
3284 
3285 	if (!uc->config.pkt_mode || !uc->config.metadata_size)
3286 		return -ENOTSUPP;
3287 
3288 	if (payload_len > uc->config.metadata_size)
3289 		return -EINVAL;
3290 
3291 	if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE)
3292 		return -EINVAL;
3293 
3294 	h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3295 
3296 	if (uc->config.needs_epib) {
3297 		psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
3298 		flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
3299 	}
3300 
3301 	cppi5_hdesc_update_flags(h_desc, flags);
3302 	cppi5_hdesc_update_psdata_size(h_desc, psd_size);
3303 
3304 	return 0;
3305 }
3306 
3307 static struct dma_descriptor_metadata_ops metadata_ops = {
3308 	.attach = udma_attach_metadata,
3309 	.get_ptr = udma_get_metadata_ptr,
3310 	.set_len = udma_set_metadata_len,
3311 };
3312 
3313 static struct dma_async_tx_descriptor *
3314 udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
3315 		   unsigned int sglen, enum dma_transfer_direction dir,
3316 		   unsigned long tx_flags, void *context)
3317 {
3318 	struct udma_chan *uc = to_udma_chan(chan);
3319 	enum dma_slave_buswidth dev_width;
3320 	struct udma_desc *d;
3321 	u32 burst;
3322 
3323 	if (dir != uc->config.dir &&
3324 	    (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type)) {
3325 		dev_err(chan->device->dev,
3326 			"%s: chan%d is for %s, not supporting %s\n",
3327 			__func__, uc->id,
3328 			dmaengine_get_direction_text(uc->config.dir),
3329 			dmaengine_get_direction_text(dir));
3330 		return NULL;
3331 	}
3332 
3333 	if (dir == DMA_DEV_TO_MEM) {
3334 		dev_width = uc->cfg.src_addr_width;
3335 		burst = uc->cfg.src_maxburst;
3336 	} else if (dir == DMA_MEM_TO_DEV) {
3337 		dev_width = uc->cfg.dst_addr_width;
3338 		burst = uc->cfg.dst_maxburst;
3339 	} else {
3340 		dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
3341 		return NULL;
3342 	}
3343 
3344 	if (!burst)
3345 		burst = 1;
3346 
3347 	if (uc->config.pkt_mode)
3348 		d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
3349 					   context);
3350 	else if (is_slave_direction(uc->config.dir))
3351 		d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags,
3352 					  context);
3353 	else
3354 		d = udma_prep_slave_sg_triggered_tr(uc, sgl, sglen, dir,
3355 						    tx_flags, context);
3356 
3357 	if (!d)
3358 		return NULL;
3359 
3360 	d->dir = dir;
3361 	d->desc_idx = 0;
3362 	d->tr_idx = 0;
3363 
3364 	/* static TR for remote PDMA */
3365 	if (udma_configure_statictr(uc, d, dev_width, burst)) {
3366 		dev_err(uc->ud->dev,
3367 			"%s: StaticTR Z is limited to maximum 4095 (%u)\n",
3368 			__func__, d->static_tr.bstcnt);
3369 
3370 		udma_free_hwdesc(uc, d);
3371 		kfree(d);
3372 		return NULL;
3373 	}
3374 
3375 	if (uc->config.metadata_size)
3376 		d->vd.tx.metadata_ops = &metadata_ops;
3377 
3378 	return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
3379 }
3380 
3381 static struct udma_desc *
3382 udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
3383 			size_t buf_len, size_t period_len,
3384 			enum dma_transfer_direction dir, unsigned long flags)
3385 {
3386 	struct udma_desc *d;
3387 	size_t tr_size, period_addr;
3388 	struct cppi5_tr_type1_t *tr_req;
3389 	unsigned int periods = buf_len / period_len;
3390 	u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
3391 	unsigned int i;
3392 	int num_tr;
3393 
3394 	num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
3395 				      &tr0_cnt1, &tr1_cnt0);
3396 	if (num_tr < 0) {
3397 		dev_err(uc->ud->dev, "size %zu is not supported\n",
3398 			period_len);
3399 		return NULL;
3400 	}
3401 
3402 	/* Now allocate and setup the descriptor. */
3403 	tr_size = sizeof(struct cppi5_tr_type1_t);
3404 	d = udma_alloc_tr_desc(uc, tr_size, periods * num_tr, dir);
3405 	if (!d)
3406 		return NULL;
3407 
3408 	tr_req = d->hwdesc[0].tr_req_base;
3409 	if (uc->ud->match_data->type == DMA_TYPE_UDMA)
3410 		period_addr = buf_addr;
3411 	else
3412 		period_addr = buf_addr |
3413 			((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT);
3414 
3415 	for (i = 0; i < periods; i++) {
3416 		int tr_idx = i * num_tr;
3417 
3418 		cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
3419 			      false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3420 
3421 		tr_req[tr_idx].addr = period_addr;
3422 		tr_req[tr_idx].icnt0 = tr0_cnt0;
3423 		tr_req[tr_idx].icnt1 = tr0_cnt1;
3424 		tr_req[tr_idx].dim1 = tr0_cnt0;
3425 
3426 		if (num_tr == 2) {
3427 			cppi5_tr_csf_set(&tr_req[tr_idx].flags,
3428 					 CPPI5_TR_CSF_SUPR_EVT);
3429 			tr_idx++;
3430 
3431 			cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
3432 				      false, false,
3433 				      CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3434 
3435 			tr_req[tr_idx].addr = period_addr + tr0_cnt1 * tr0_cnt0;
3436 			tr_req[tr_idx].icnt0 = tr1_cnt0;
3437 			tr_req[tr_idx].icnt1 = 1;
3438 			tr_req[tr_idx].dim1 = tr1_cnt0;
3439 		}
3440 
3441 		if (!(flags & DMA_PREP_INTERRUPT))
3442 			cppi5_tr_csf_set(&tr_req[tr_idx].flags,
3443 					 CPPI5_TR_CSF_SUPR_EVT);
3444 
3445 		period_addr += period_len;
3446 	}
3447 
3448 	return d;
3449 }
3450 
3451 static struct udma_desc *
3452 udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr,
3453 			 size_t buf_len, size_t period_len,
3454 			 enum dma_transfer_direction dir, unsigned long flags)
3455 {
3456 	struct udma_desc *d;
3457 	u32 ring_id;
3458 	int i;
3459 	int periods = buf_len / period_len;
3460 
3461 	if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1))
3462 		return NULL;
3463 
3464 	if (period_len >= SZ_4M)
3465 		return NULL;
3466 
3467 	d = kzalloc(struct_size(d, hwdesc, periods), GFP_NOWAIT);
3468 	if (!d)
3469 		return NULL;
3470 
3471 	d->hwdesc_count = periods;
3472 
3473 	/* TODO: re-check this... */
3474 	if (dir == DMA_DEV_TO_MEM)
3475 		ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
3476 	else
3477 		ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
3478 
3479 	if (uc->ud->match_data->type != DMA_TYPE_UDMA)
3480 		buf_addr |= (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
3481 
3482 	for (i = 0; i < periods; i++) {
3483 		struct udma_hwdesc *hwdesc = &d->hwdesc[i];
3484 		dma_addr_t period_addr = buf_addr + (period_len * i);
3485 		struct cppi5_host_desc_t *h_desc;
3486 
3487 		hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
3488 						GFP_NOWAIT,
3489 						&hwdesc->cppi5_desc_paddr);
3490 		if (!hwdesc->cppi5_desc_vaddr) {
3491 			dev_err(uc->ud->dev,
3492 				"descriptor%d allocation failed\n", i);
3493 
3494 			udma_free_hwdesc(uc, d);
3495 			kfree(d);
3496 			return NULL;
3497 		}
3498 
3499 		hwdesc->cppi5_desc_size = uc->config.hdesc_size;
3500 		h_desc = hwdesc->cppi5_desc_vaddr;
3501 
3502 		cppi5_hdesc_init(h_desc, 0, 0);
3503 		cppi5_hdesc_set_pktlen(h_desc, period_len);
3504 
3505 		/* Flow and Packed ID */
3506 		cppi5_desc_set_pktids(&h_desc->hdr, uc->id,
3507 				      CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3508 		cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id);
3509 
3510 		/* attach each period to a new descriptor */
3511 		cppi5_hdesc_attach_buf(h_desc,
3512 				       period_addr, period_len,
3513 				       period_addr, period_len);
3514 	}
3515 
3516 	return d;
3517 }
3518 
3519 static struct dma_async_tx_descriptor *
3520 udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
3521 		     size_t period_len, enum dma_transfer_direction dir,
3522 		     unsigned long flags)
3523 {
3524 	struct udma_chan *uc = to_udma_chan(chan);
3525 	enum dma_slave_buswidth dev_width;
3526 	struct udma_desc *d;
3527 	u32 burst;
3528 
3529 	if (dir != uc->config.dir) {
3530 		dev_err(chan->device->dev,
3531 			"%s: chan%d is for %s, not supporting %s\n",
3532 			__func__, uc->id,
3533 			dmaengine_get_direction_text(uc->config.dir),
3534 			dmaengine_get_direction_text(dir));
3535 		return NULL;
3536 	}
3537 
3538 	uc->cyclic = true;
3539 
3540 	if (dir == DMA_DEV_TO_MEM) {
3541 		dev_width = uc->cfg.src_addr_width;
3542 		burst = uc->cfg.src_maxburst;
3543 	} else if (dir == DMA_MEM_TO_DEV) {
3544 		dev_width = uc->cfg.dst_addr_width;
3545 		burst = uc->cfg.dst_maxburst;
3546 	} else {
3547 		dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
3548 		return NULL;
3549 	}
3550 
3551 	if (!burst)
3552 		burst = 1;
3553 
3554 	if (uc->config.pkt_mode)
3555 		d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len,
3556 					     dir, flags);
3557 	else
3558 		d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len,
3559 					    dir, flags);
3560 
3561 	if (!d)
3562 		return NULL;
3563 
3564 	d->sglen = buf_len / period_len;
3565 
3566 	d->dir = dir;
3567 	d->residue = buf_len;
3568 
3569 	/* static TR for remote PDMA */
3570 	if (udma_configure_statictr(uc, d, dev_width, burst)) {
3571 		dev_err(uc->ud->dev,
3572 			"%s: StaticTR Z is limited to maximum 4095 (%u)\n",
3573 			__func__, d->static_tr.bstcnt);
3574 
3575 		udma_free_hwdesc(uc, d);
3576 		kfree(d);
3577 		return NULL;
3578 	}
3579 
3580 	if (uc->config.metadata_size)
3581 		d->vd.tx.metadata_ops = &metadata_ops;
3582 
3583 	return vchan_tx_prep(&uc->vc, &d->vd, flags);
3584 }
3585 
3586 static struct dma_async_tx_descriptor *
3587 udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
3588 		     size_t len, unsigned long tx_flags)
3589 {
3590 	struct udma_chan *uc = to_udma_chan(chan);
3591 	struct udma_desc *d;
3592 	struct cppi5_tr_type15_t *tr_req;
3593 	int num_tr;
3594 	size_t tr_size = sizeof(struct cppi5_tr_type15_t);
3595 	u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
3596 
3597 	if (uc->config.dir != DMA_MEM_TO_MEM) {
3598 		dev_err(chan->device->dev,
3599 			"%s: chan%d is for %s, not supporting %s\n",
3600 			__func__, uc->id,
3601 			dmaengine_get_direction_text(uc->config.dir),
3602 			dmaengine_get_direction_text(DMA_MEM_TO_MEM));
3603 		return NULL;
3604 	}
3605 
3606 	num_tr = udma_get_tr_counters(len, __ffs(src | dest), &tr0_cnt0,
3607 				      &tr0_cnt1, &tr1_cnt0);
3608 	if (num_tr < 0) {
3609 		dev_err(uc->ud->dev, "size %zu is not supported\n",
3610 			len);
3611 		return NULL;
3612 	}
3613 
3614 	d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM);
3615 	if (!d)
3616 		return NULL;
3617 
3618 	d->dir = DMA_MEM_TO_MEM;
3619 	d->desc_idx = 0;
3620 	d->tr_idx = 0;
3621 	d->residue = len;
3622 
3623 	if (uc->ud->match_data->type != DMA_TYPE_UDMA) {
3624 		src |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
3625 		dest |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
3626 	}
3627 
3628 	tr_req = d->hwdesc[0].tr_req_base;
3629 
3630 	cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
3631 		      CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3632 	cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
3633 
3634 	tr_req[0].addr = src;
3635 	tr_req[0].icnt0 = tr0_cnt0;
3636 	tr_req[0].icnt1 = tr0_cnt1;
3637 	tr_req[0].icnt2 = 1;
3638 	tr_req[0].icnt3 = 1;
3639 	tr_req[0].dim1 = tr0_cnt0;
3640 
3641 	tr_req[0].daddr = dest;
3642 	tr_req[0].dicnt0 = tr0_cnt0;
3643 	tr_req[0].dicnt1 = tr0_cnt1;
3644 	tr_req[0].dicnt2 = 1;
3645 	tr_req[0].dicnt3 = 1;
3646 	tr_req[0].ddim1 = tr0_cnt0;
3647 
3648 	if (num_tr == 2) {
3649 		cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
3650 			      CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3651 		cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
3652 
3653 		tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
3654 		tr_req[1].icnt0 = tr1_cnt0;
3655 		tr_req[1].icnt1 = 1;
3656 		tr_req[1].icnt2 = 1;
3657 		tr_req[1].icnt3 = 1;
3658 
3659 		tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
3660 		tr_req[1].dicnt0 = tr1_cnt0;
3661 		tr_req[1].dicnt1 = 1;
3662 		tr_req[1].dicnt2 = 1;
3663 		tr_req[1].dicnt3 = 1;
3664 	}
3665 
3666 	cppi5_tr_csf_set(&tr_req[num_tr - 1].flags,
3667 			 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
3668 
3669 	if (uc->config.metadata_size)
3670 		d->vd.tx.metadata_ops = &metadata_ops;
3671 
3672 	return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
3673 }
3674 
3675 static void udma_issue_pending(struct dma_chan *chan)
3676 {
3677 	struct udma_chan *uc = to_udma_chan(chan);
3678 	unsigned long flags;
3679 
3680 	spin_lock_irqsave(&uc->vc.lock, flags);
3681 
3682 	/* If we have something pending and no active descriptor, then */
3683 	if (vchan_issue_pending(&uc->vc) && !uc->desc) {
3684 		/*
3685 		 * start a descriptor if the channel is NOT [marked as
3686 		 * terminating _and_ it is still running (teardown has not
3687 		 * completed yet)].
3688 		 */
3689 		if (!(uc->state == UDMA_CHAN_IS_TERMINATING &&
3690 		      udma_is_chan_running(uc)))
3691 			udma_start(uc);
3692 	}
3693 
3694 	spin_unlock_irqrestore(&uc->vc.lock, flags);
3695 }
3696 
3697 static enum dma_status udma_tx_status(struct dma_chan *chan,
3698 				      dma_cookie_t cookie,
3699 				      struct dma_tx_state *txstate)
3700 {
3701 	struct udma_chan *uc = to_udma_chan(chan);
3702 	enum dma_status ret;
3703 	unsigned long flags;
3704 
3705 	spin_lock_irqsave(&uc->vc.lock, flags);
3706 
3707 	ret = dma_cookie_status(chan, cookie, txstate);
3708 
3709 	if (!udma_is_chan_running(uc))
3710 		ret = DMA_COMPLETE;
3711 
3712 	if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc))
3713 		ret = DMA_PAUSED;
3714 
3715 	if (ret == DMA_COMPLETE || !txstate)
3716 		goto out;
3717 
3718 	if (uc->desc && uc->desc->vd.tx.cookie == cookie) {
3719 		u32 peer_bcnt = 0;
3720 		u32 bcnt = 0;
3721 		u32 residue = uc->desc->residue;
3722 		u32 delay = 0;
3723 
3724 		if (uc->desc->dir == DMA_MEM_TO_DEV) {
3725 			bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
3726 
3727 			if (uc->config.ep_type != PSIL_EP_NATIVE) {
3728 				peer_bcnt = udma_tchanrt_read(uc,
3729 						UDMA_CHAN_RT_PEER_BCNT_REG);
3730 
3731 				if (bcnt > peer_bcnt)
3732 					delay = bcnt - peer_bcnt;
3733 			}
3734 		} else if (uc->desc->dir == DMA_DEV_TO_MEM) {
3735 			bcnt = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
3736 
3737 			if (uc->config.ep_type != PSIL_EP_NATIVE) {
3738 				peer_bcnt = udma_rchanrt_read(uc,
3739 						UDMA_CHAN_RT_PEER_BCNT_REG);
3740 
3741 				if (peer_bcnt > bcnt)
3742 					delay = peer_bcnt - bcnt;
3743 			}
3744 		} else {
3745 			bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
3746 		}
3747 
3748 		bcnt -= uc->bcnt;
3749 		if (bcnt && !(bcnt % uc->desc->residue))
3750 			residue = 0;
3751 		else
3752 			residue -= bcnt % uc->desc->residue;
3753 
3754 		if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) {
3755 			ret = DMA_COMPLETE;
3756 			delay = 0;
3757 		}
3758 
3759 		dma_set_residue(txstate, residue);
3760 		dma_set_in_flight_bytes(txstate, delay);
3761 
3762 	} else {
3763 		ret = DMA_COMPLETE;
3764 	}
3765 
3766 out:
3767 	spin_unlock_irqrestore(&uc->vc.lock, flags);
3768 	return ret;
3769 }
3770 
3771 static int udma_pause(struct dma_chan *chan)
3772 {
3773 	struct udma_chan *uc = to_udma_chan(chan);
3774 
3775 	/* pause the channel */
3776 	switch (uc->config.dir) {
3777 	case DMA_DEV_TO_MEM:
3778 		udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3779 					 UDMA_PEER_RT_EN_PAUSE,
3780 					 UDMA_PEER_RT_EN_PAUSE);
3781 		break;
3782 	case DMA_MEM_TO_DEV:
3783 		udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3784 					 UDMA_PEER_RT_EN_PAUSE,
3785 					 UDMA_PEER_RT_EN_PAUSE);
3786 		break;
3787 	case DMA_MEM_TO_MEM:
3788 		udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG,
3789 					 UDMA_CHAN_RT_CTL_PAUSE,
3790 					 UDMA_CHAN_RT_CTL_PAUSE);
3791 		break;
3792 	default:
3793 		return -EINVAL;
3794 	}
3795 
3796 	return 0;
3797 }
3798 
3799 static int udma_resume(struct dma_chan *chan)
3800 {
3801 	struct udma_chan *uc = to_udma_chan(chan);
3802 
3803 	/* resume the channel */
3804 	switch (uc->config.dir) {
3805 	case DMA_DEV_TO_MEM:
3806 		udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3807 					 UDMA_PEER_RT_EN_PAUSE, 0);
3808 
3809 		break;
3810 	case DMA_MEM_TO_DEV:
3811 		udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3812 					 UDMA_PEER_RT_EN_PAUSE, 0);
3813 		break;
3814 	case DMA_MEM_TO_MEM:
3815 		udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG,
3816 					 UDMA_CHAN_RT_CTL_PAUSE, 0);
3817 		break;
3818 	default:
3819 		return -EINVAL;
3820 	}
3821 
3822 	return 0;
3823 }
3824 
3825 static int udma_terminate_all(struct dma_chan *chan)
3826 {
3827 	struct udma_chan *uc = to_udma_chan(chan);
3828 	unsigned long flags;
3829 	LIST_HEAD(head);
3830 
3831 	spin_lock_irqsave(&uc->vc.lock, flags);
3832 
3833 	if (udma_is_chan_running(uc))
3834 		udma_stop(uc);
3835 
3836 	if (uc->desc) {
3837 		uc->terminated_desc = uc->desc;
3838 		uc->desc = NULL;
3839 		uc->terminated_desc->terminated = true;
3840 		cancel_delayed_work(&uc->tx_drain.work);
3841 	}
3842 
3843 	uc->paused = false;
3844 
3845 	vchan_get_all_descriptors(&uc->vc, &head);
3846 	spin_unlock_irqrestore(&uc->vc.lock, flags);
3847 	vchan_dma_desc_free_list(&uc->vc, &head);
3848 
3849 	return 0;
3850 }
3851 
3852 static void udma_synchronize(struct dma_chan *chan)
3853 {
3854 	struct udma_chan *uc = to_udma_chan(chan);
3855 	unsigned long timeout = msecs_to_jiffies(1000);
3856 
3857 	vchan_synchronize(&uc->vc);
3858 
3859 	if (uc->state == UDMA_CHAN_IS_TERMINATING) {
3860 		timeout = wait_for_completion_timeout(&uc->teardown_completed,
3861 						      timeout);
3862 		if (!timeout) {
3863 			dev_warn(uc->ud->dev, "chan%d teardown timeout!\n",
3864 				 uc->id);
3865 			udma_dump_chan_stdata(uc);
3866 			udma_reset_chan(uc, true);
3867 		}
3868 	}
3869 
3870 	udma_reset_chan(uc, false);
3871 	if (udma_is_chan_running(uc))
3872 		dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id);
3873 
3874 	cancel_delayed_work_sync(&uc->tx_drain.work);
3875 	udma_reset_rings(uc);
3876 }
3877 
3878 static void udma_desc_pre_callback(struct virt_dma_chan *vc,
3879 				   struct virt_dma_desc *vd,
3880 				   struct dmaengine_result *result)
3881 {
3882 	struct udma_chan *uc = to_udma_chan(&vc->chan);
3883 	struct udma_desc *d;
3884 
3885 	if (!vd)
3886 		return;
3887 
3888 	d = to_udma_desc(&vd->tx);
3889 
3890 	if (d->metadata_size)
3891 		udma_fetch_epib(uc, d);
3892 
3893 	/* Provide residue information for the client */
3894 	if (result) {
3895 		void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
3896 
3897 		if (cppi5_desc_get_type(desc_vaddr) ==
3898 		    CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
3899 			result->residue = d->residue -
3900 					  cppi5_hdesc_get_pktlen(desc_vaddr);
3901 			if (result->residue)
3902 				result->result = DMA_TRANS_ABORTED;
3903 			else
3904 				result->result = DMA_TRANS_NOERROR;
3905 		} else {
3906 			result->residue = 0;
3907 			result->result = DMA_TRANS_NOERROR;
3908 		}
3909 	}
3910 }
3911 
3912 /*
3913  * This tasklet handles the completion of a DMA descriptor by
3914  * calling its callback and freeing it.
3915  */
3916 static void udma_vchan_complete(struct tasklet_struct *t)
3917 {
3918 	struct virt_dma_chan *vc = from_tasklet(vc, t, task);
3919 	struct virt_dma_desc *vd, *_vd;
3920 	struct dmaengine_desc_callback cb;
3921 	LIST_HEAD(head);
3922 
3923 	spin_lock_irq(&vc->lock);
3924 	list_splice_tail_init(&vc->desc_completed, &head);
3925 	vd = vc->cyclic;
3926 	if (vd) {
3927 		vc->cyclic = NULL;
3928 		dmaengine_desc_get_callback(&vd->tx, &cb);
3929 	} else {
3930 		memset(&cb, 0, sizeof(cb));
3931 	}
3932 	spin_unlock_irq(&vc->lock);
3933 
3934 	udma_desc_pre_callback(vc, vd, NULL);
3935 	dmaengine_desc_callback_invoke(&cb, NULL);
3936 
3937 	list_for_each_entry_safe(vd, _vd, &head, node) {
3938 		struct dmaengine_result result;
3939 
3940 		dmaengine_desc_get_callback(&vd->tx, &cb);
3941 
3942 		list_del(&vd->node);
3943 
3944 		udma_desc_pre_callback(vc, vd, &result);
3945 		dmaengine_desc_callback_invoke(&cb, &result);
3946 
3947 		vchan_vdesc_fini(vd);
3948 	}
3949 }
3950 
3951 static void udma_free_chan_resources(struct dma_chan *chan)
3952 {
3953 	struct udma_chan *uc = to_udma_chan(chan);
3954 	struct udma_dev *ud = to_udma_dev(chan->device);
3955 
3956 	udma_terminate_all(chan);
3957 	if (uc->terminated_desc) {
3958 		udma_reset_chan(uc, false);
3959 		udma_reset_rings(uc);
3960 	}
3961 
3962 	cancel_delayed_work_sync(&uc->tx_drain.work);
3963 
3964 	if (uc->irq_num_ring > 0) {
3965 		free_irq(uc->irq_num_ring, uc);
3966 
3967 		uc->irq_num_ring = 0;
3968 	}
3969 	if (uc->irq_num_udma > 0) {
3970 		free_irq(uc->irq_num_udma, uc);
3971 
3972 		uc->irq_num_udma = 0;
3973 	}
3974 
3975 	/* Release PSI-L pairing */
3976 	if (uc->psil_paired) {
3977 		navss_psil_unpair(ud, uc->config.src_thread,
3978 				  uc->config.dst_thread);
3979 		uc->psil_paired = false;
3980 	}
3981 
3982 	vchan_free_chan_resources(&uc->vc);
3983 	tasklet_kill(&uc->vc.task);
3984 
3985 	bcdma_free_bchan_resources(uc);
3986 	udma_free_tx_resources(uc);
3987 	udma_free_rx_resources(uc);
3988 	udma_reset_uchan(uc);
3989 
3990 	if (uc->use_dma_pool) {
3991 		dma_pool_destroy(uc->hdesc_pool);
3992 		uc->use_dma_pool = false;
3993 	}
3994 }
3995 
3996 static struct platform_driver udma_driver;
3997 static struct platform_driver bcdma_driver;
3998 static struct platform_driver pktdma_driver;
3999 
4000 struct udma_filter_param {
4001 	int remote_thread_id;
4002 	u32 atype;
4003 	u32 asel;
4004 	u32 tr_trigger_type;
4005 };
4006 
4007 static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
4008 {
4009 	struct udma_chan_config *ucc;
4010 	struct psil_endpoint_config *ep_config;
4011 	struct udma_filter_param *filter_param;
4012 	struct udma_chan *uc;
4013 	struct udma_dev *ud;
4014 
4015 	if (chan->device->dev->driver != &udma_driver.driver &&
4016 	    chan->device->dev->driver != &bcdma_driver.driver &&
4017 	    chan->device->dev->driver != &pktdma_driver.driver)
4018 		return false;
4019 
4020 	uc = to_udma_chan(chan);
4021 	ucc = &uc->config;
4022 	ud = uc->ud;
4023 	filter_param = param;
4024 
4025 	if (filter_param->atype > 2) {
4026 		dev_err(ud->dev, "Invalid channel atype: %u\n",
4027 			filter_param->atype);
4028 		return false;
4029 	}
4030 
4031 	if (filter_param->asel > 15) {
4032 		dev_err(ud->dev, "Invalid channel asel: %u\n",
4033 			filter_param->asel);
4034 		return false;
4035 	}
4036 
4037 	ucc->remote_thread_id = filter_param->remote_thread_id;
4038 	ucc->atype = filter_param->atype;
4039 	ucc->asel = filter_param->asel;
4040 	ucc->tr_trigger_type = filter_param->tr_trigger_type;
4041 
4042 	if (ucc->tr_trigger_type) {
4043 		ucc->dir = DMA_MEM_TO_MEM;
4044 		goto triggered_bchan;
4045 	} else if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET) {
4046 		ucc->dir = DMA_MEM_TO_DEV;
4047 	} else {
4048 		ucc->dir = DMA_DEV_TO_MEM;
4049 	}
4050 
4051 	ep_config = psil_get_ep_config(ucc->remote_thread_id);
4052 	if (IS_ERR(ep_config)) {
4053 		dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
4054 			ucc->remote_thread_id);
4055 		ucc->dir = DMA_MEM_TO_MEM;
4056 		ucc->remote_thread_id = -1;
4057 		ucc->atype = 0;
4058 		ucc->asel = 0;
4059 		return false;
4060 	}
4061 
4062 	if (ud->match_data->type == DMA_TYPE_BCDMA &&
4063 	    ep_config->pkt_mode) {
4064 		dev_err(ud->dev,
4065 			"Only TR mode is supported (psi-l thread 0x%04x)\n",
4066 			ucc->remote_thread_id);
4067 		ucc->dir = DMA_MEM_TO_MEM;
4068 		ucc->remote_thread_id = -1;
4069 		ucc->atype = 0;
4070 		ucc->asel = 0;
4071 		return false;
4072 	}
4073 
4074 	ucc->pkt_mode = ep_config->pkt_mode;
4075 	ucc->channel_tpl = ep_config->channel_tpl;
4076 	ucc->notdpkt = ep_config->notdpkt;
4077 	ucc->ep_type = ep_config->ep_type;
4078 
4079 	if (ud->match_data->type == DMA_TYPE_PKTDMA &&
4080 	    ep_config->mapped_channel_id >= 0) {
4081 		ucc->mapped_channel_id = ep_config->mapped_channel_id;
4082 		ucc->default_flow_id = ep_config->default_flow_id;
4083 	} else {
4084 		ucc->mapped_channel_id = -1;
4085 		ucc->default_flow_id = -1;
4086 	}
4087 
4088 	if (ucc->ep_type != PSIL_EP_NATIVE) {
4089 		const struct udma_match_data *match_data = ud->match_data;
4090 
4091 		if (match_data->flags & UDMA_FLAG_PDMA_ACC32)
4092 			ucc->enable_acc32 = ep_config->pdma_acc32;
4093 		if (match_data->flags & UDMA_FLAG_PDMA_BURST)
4094 			ucc->enable_burst = ep_config->pdma_burst;
4095 	}
4096 
4097 	ucc->needs_epib = ep_config->needs_epib;
4098 	ucc->psd_size = ep_config->psd_size;
4099 	ucc->metadata_size =
4100 			(ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) +
4101 			ucc->psd_size;
4102 
4103 	if (ucc->pkt_mode)
4104 		ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
4105 				 ucc->metadata_size, ud->desc_align);
4106 
4107 	dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id,
4108 		ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir));
4109 
4110 	return true;
4111 
4112 triggered_bchan:
4113 	dev_dbg(ud->dev, "chan%d: triggered channel (type: %u)\n", uc->id,
4114 		ucc->tr_trigger_type);
4115 
4116 	return true;
4117 
4118 }
4119 
4120 static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
4121 				      struct of_dma *ofdma)
4122 {
4123 	struct udma_dev *ud = ofdma->of_dma_data;
4124 	dma_cap_mask_t mask = ud->ddev.cap_mask;
4125 	struct udma_filter_param filter_param;
4126 	struct dma_chan *chan;
4127 
4128 	if (ud->match_data->type == DMA_TYPE_BCDMA) {
4129 		if (dma_spec->args_count != 3)
4130 			return NULL;
4131 
4132 		filter_param.tr_trigger_type = dma_spec->args[0];
4133 		filter_param.remote_thread_id = dma_spec->args[1];
4134 		filter_param.asel = dma_spec->args[2];
4135 		filter_param.atype = 0;
4136 	} else {
4137 		if (dma_spec->args_count != 1 && dma_spec->args_count != 2)
4138 			return NULL;
4139 
4140 		filter_param.remote_thread_id = dma_spec->args[0];
4141 		filter_param.tr_trigger_type = 0;
4142 		if (dma_spec->args_count == 2) {
4143 			if (ud->match_data->type == DMA_TYPE_UDMA) {
4144 				filter_param.atype = dma_spec->args[1];
4145 				filter_param.asel = 0;
4146 			} else {
4147 				filter_param.atype = 0;
4148 				filter_param.asel = dma_spec->args[1];
4149 			}
4150 		} else {
4151 			filter_param.atype = 0;
4152 			filter_param.asel = 0;
4153 		}
4154 	}
4155 
4156 	chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
4157 				     ofdma->of_node);
4158 	if (!chan) {
4159 		dev_err(ud->dev, "get channel fail in %s.\n", __func__);
4160 		return ERR_PTR(-EINVAL);
4161 	}
4162 
4163 	return chan;
4164 }
4165 
4166 static struct udma_match_data am654_main_data = {
4167 	.type = DMA_TYPE_UDMA,
4168 	.psil_base = 0x1000,
4169 	.enable_memcpy_support = true,
4170 	.statictr_z_mask = GENMASK(11, 0),
4171 };
4172 
4173 static struct udma_match_data am654_mcu_data = {
4174 	.type = DMA_TYPE_UDMA,
4175 	.psil_base = 0x6000,
4176 	.enable_memcpy_support = false,
4177 	.statictr_z_mask = GENMASK(11, 0),
4178 };
4179 
4180 static struct udma_match_data j721e_main_data = {
4181 	.type = DMA_TYPE_UDMA,
4182 	.psil_base = 0x1000,
4183 	.enable_memcpy_support = true,
4184 	.flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
4185 	.statictr_z_mask = GENMASK(23, 0),
4186 };
4187 
4188 static struct udma_match_data j721e_mcu_data = {
4189 	.type = DMA_TYPE_UDMA,
4190 	.psil_base = 0x6000,
4191 	.enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
4192 	.flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
4193 	.statictr_z_mask = GENMASK(23, 0),
4194 };
4195 
4196 static struct udma_match_data am64_bcdma_data = {
4197 	.type = DMA_TYPE_BCDMA,
4198 	.psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */
4199 	.enable_memcpy_support = true, /* Supported via bchan */
4200 	.flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
4201 	.statictr_z_mask = GENMASK(23, 0),
4202 };
4203 
4204 static struct udma_match_data am64_pktdma_data = {
4205 	.type = DMA_TYPE_PKTDMA,
4206 	.psil_base = 0x1000,
4207 	.enable_memcpy_support = false, /* PKTDMA does not support MEM_TO_MEM */
4208 	.flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
4209 	.statictr_z_mask = GENMASK(23, 0),
4210 };
4211 
4212 static const struct of_device_id udma_of_match[] = {
4213 	{
4214 		.compatible = "ti,am654-navss-main-udmap",
4215 		.data = &am654_main_data,
4216 	},
4217 	{
4218 		.compatible = "ti,am654-navss-mcu-udmap",
4219 		.data = &am654_mcu_data,
4220 	}, {
4221 		.compatible = "ti,j721e-navss-main-udmap",
4222 		.data = &j721e_main_data,
4223 	}, {
4224 		.compatible = "ti,j721e-navss-mcu-udmap",
4225 		.data = &j721e_mcu_data,
4226 	},
4227 	{ /* Sentinel */ },
4228 };
4229 
4230 static const struct of_device_id bcdma_of_match[] = {
4231 	{
4232 		.compatible = "ti,am64-dmss-bcdma",
4233 		.data = &am64_bcdma_data,
4234 	},
4235 	{ /* Sentinel */ },
4236 };
4237 
4238 static const struct of_device_id pktdma_of_match[] = {
4239 	{
4240 		.compatible = "ti,am64-dmss-pktdma",
4241 		.data = &am64_pktdma_data,
4242 	},
4243 	{ /* Sentinel */ },
4244 };
4245 
4246 static struct udma_soc_data am654_soc_data = {
4247 	.oes = {
4248 		.udma_rchan = 0x200,
4249 	},
4250 };
4251 
4252 static struct udma_soc_data j721e_soc_data = {
4253 	.oes = {
4254 		.udma_rchan = 0x400,
4255 	},
4256 };
4257 
4258 static struct udma_soc_data j7200_soc_data = {
4259 	.oes = {
4260 		.udma_rchan = 0x80,
4261 	},
4262 };
4263 
4264 static struct udma_soc_data am64_soc_data = {
4265 	.oes = {
4266 		.bcdma_bchan_data = 0x2200,
4267 		.bcdma_bchan_ring = 0x2400,
4268 		.bcdma_tchan_data = 0x2800,
4269 		.bcdma_tchan_ring = 0x2a00,
4270 		.bcdma_rchan_data = 0x2e00,
4271 		.bcdma_rchan_ring = 0x3000,
4272 		.pktdma_tchan_flow = 0x1200,
4273 		.pktdma_rchan_flow = 0x1600,
4274 	},
4275 	.bcdma_trigger_event_offset = 0xc400,
4276 };
4277 
4278 static const struct soc_device_attribute k3_soc_devices[] = {
4279 	{ .family = "AM65X", .data = &am654_soc_data },
4280 	{ .family = "J721E", .data = &j721e_soc_data },
4281 	{ .family = "J7200", .data = &j7200_soc_data },
4282 	{ .family = "AM64X", .data = &am64_soc_data },
4283 	{ /* sentinel */ }
4284 };
4285 
4286 static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
4287 {
4288 	u32 cap2, cap3, cap4;
4289 	int i;
4290 
4291 	ud->mmrs[MMR_GCFG] = devm_platform_ioremap_resource_byname(pdev, mmr_names[MMR_GCFG]);
4292 	if (IS_ERR(ud->mmrs[MMR_GCFG]))
4293 		return PTR_ERR(ud->mmrs[MMR_GCFG]);
4294 
4295 	cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
4296 	cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4297 
4298 	switch (ud->match_data->type) {
4299 	case DMA_TYPE_UDMA:
4300 		ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
4301 		ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
4302 		ud->echan_cnt = UDMA_CAP2_ECHAN_CNT(cap2);
4303 		ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
4304 		break;
4305 	case DMA_TYPE_BCDMA:
4306 		ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2);
4307 		ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2);
4308 		ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2);
4309 		break;
4310 	case DMA_TYPE_PKTDMA:
4311 		cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30);
4312 		ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
4313 		ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
4314 		ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
4315 		ud->tflow_cnt = PKTDMA_CAP4_TFLOW_CNT(cap4);
4316 		break;
4317 	default:
4318 		return -EINVAL;
4319 	}
4320 
4321 	for (i = 1; i < MMR_LAST; i++) {
4322 		if (i == MMR_BCHANRT && ud->bchan_cnt == 0)
4323 			continue;
4324 		if (i == MMR_TCHANRT && ud->tchan_cnt == 0)
4325 			continue;
4326 		if (i == MMR_RCHANRT && ud->rchan_cnt == 0)
4327 			continue;
4328 
4329 		ud->mmrs[i] = devm_platform_ioremap_resource_byname(pdev, mmr_names[i]);
4330 		if (IS_ERR(ud->mmrs[i]))
4331 			return PTR_ERR(ud->mmrs[i]);
4332 	}
4333 
4334 	return 0;
4335 }
4336 
4337 static void udma_mark_resource_ranges(struct udma_dev *ud, unsigned long *map,
4338 				      struct ti_sci_resource_desc *rm_desc,
4339 				      char *name)
4340 {
4341 	bitmap_clear(map, rm_desc->start, rm_desc->num);
4342 	bitmap_clear(map, rm_desc->start_sec, rm_desc->num_sec);
4343 	dev_dbg(ud->dev, "ti_sci resource range for %s: %d:%d | %d:%d\n", name,
4344 		rm_desc->start, rm_desc->num, rm_desc->start_sec,
4345 		rm_desc->num_sec);
4346 }
4347 
4348 static const char * const range_names[] = {
4349 	[RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan",
4350 	[RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan",
4351 	[RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan",
4352 	[RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow",
4353 	[RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow",
4354 };
4355 
4356 static int udma_setup_resources(struct udma_dev *ud)
4357 {
4358 	int ret, i, j;
4359 	struct device *dev = ud->dev;
4360 	struct ti_sci_resource *rm_res, irq_res;
4361 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4362 	u32 cap3;
4363 
4364 	/* Set up the throughput level start indexes */
4365 	cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4366 	if (of_device_is_compatible(dev->of_node,
4367 				    "ti,am654-navss-main-udmap")) {
4368 		ud->tchan_tpl.levels = 2;
4369 		ud->tchan_tpl.start_idx[0] = 8;
4370 	} else if (of_device_is_compatible(dev->of_node,
4371 					   "ti,am654-navss-mcu-udmap")) {
4372 		ud->tchan_tpl.levels = 2;
4373 		ud->tchan_tpl.start_idx[0] = 2;
4374 	} else if (UDMA_CAP3_UCHAN_CNT(cap3)) {
4375 		ud->tchan_tpl.levels = 3;
4376 		ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
4377 		ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4378 	} else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
4379 		ud->tchan_tpl.levels = 2;
4380 		ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4381 	} else {
4382 		ud->tchan_tpl.levels = 1;
4383 	}
4384 
4385 	ud->rchan_tpl.levels = ud->tchan_tpl.levels;
4386 	ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
4387 	ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
4388 
4389 	ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4390 					   sizeof(unsigned long), GFP_KERNEL);
4391 	ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4392 				  GFP_KERNEL);
4393 	ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4394 					   sizeof(unsigned long), GFP_KERNEL);
4395 	ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4396 				  GFP_KERNEL);
4397 	ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
4398 					      sizeof(unsigned long),
4399 					      GFP_KERNEL);
4400 	ud->rflow_gp_map_allocated = devm_kcalloc(dev,
4401 						  BITS_TO_LONGS(ud->rflow_cnt),
4402 						  sizeof(unsigned long),
4403 						  GFP_KERNEL);
4404 	ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
4405 					sizeof(unsigned long),
4406 					GFP_KERNEL);
4407 	ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
4408 				  GFP_KERNEL);
4409 
4410 	if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map ||
4411 	    !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans ||
4412 	    !ud->rflows || !ud->rflow_in_use)
4413 		return -ENOMEM;
4414 
4415 	/*
4416 	 * RX flows with the same Ids as RX channels are reserved to be used
4417 	 * as default flows if remote HW can't generate flow_ids. Those
4418 	 * RX flows can be requested only explicitly by id.
4419 	 */
4420 	bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt);
4421 
4422 	/* by default no GP rflows are assigned to Linux */
4423 	bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);
4424 
4425 	/* Get resource ranges from tisci */
4426 	for (i = 0; i < RM_RANGE_LAST; i++) {
4427 		if (i == RM_RANGE_BCHAN || i == RM_RANGE_TFLOW)
4428 			continue;
4429 
4430 		tisci_rm->rm_ranges[i] =
4431 			devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
4432 						    tisci_rm->tisci_dev_id,
4433 						    (char *)range_names[i]);
4434 	}
4435 
4436 	/* tchan ranges */
4437 	rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4438 	if (IS_ERR(rm_res)) {
4439 		bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4440 	} else {
4441 		bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4442 		for (i = 0; i < rm_res->sets; i++)
4443 			udma_mark_resource_ranges(ud, ud->tchan_map,
4444 						  &rm_res->desc[i], "tchan");
4445 	}
4446 	irq_res.sets = rm_res->sets;
4447 
4448 	/* rchan and matching default flow ranges */
4449 	rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4450 	if (IS_ERR(rm_res)) {
4451 		bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4452 	} else {
4453 		bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4454 		for (i = 0; i < rm_res->sets; i++)
4455 			udma_mark_resource_ranges(ud, ud->rchan_map,
4456 						  &rm_res->desc[i], "rchan");
4457 	}
4458 
4459 	irq_res.sets += rm_res->sets;
4460 	irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
4461 	rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4462 	for (i = 0; i < rm_res->sets; i++) {
4463 		irq_res.desc[i].start = rm_res->desc[i].start;
4464 		irq_res.desc[i].num = rm_res->desc[i].num;
4465 		irq_res.desc[i].start_sec = rm_res->desc[i].start_sec;
4466 		irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
4467 	}
4468 	rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4469 	for (j = 0; j < rm_res->sets; j++, i++) {
4470 		if (rm_res->desc[j].num) {
4471 			irq_res.desc[i].start = rm_res->desc[j].start +
4472 					ud->soc_data->oes.udma_rchan;
4473 			irq_res.desc[i].num = rm_res->desc[j].num;
4474 		}
4475 		if (rm_res->desc[j].num_sec) {
4476 			irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
4477 					ud->soc_data->oes.udma_rchan;
4478 			irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
4479 		}
4480 	}
4481 	ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
4482 	kfree(irq_res.desc);
4483 	if (ret) {
4484 		dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
4485 		return ret;
4486 	}
4487 
4488 	/* GP rflow ranges */
4489 	rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
4490 	if (IS_ERR(rm_res)) {
4491 		/* all gp flows are assigned exclusively to Linux */
4492 		bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
4493 			     ud->rflow_cnt - ud->rchan_cnt);
4494 	} else {
4495 		for (i = 0; i < rm_res->sets; i++)
4496 			udma_mark_resource_ranges(ud, ud->rflow_gp_map,
4497 						  &rm_res->desc[i], "gp-rflow");
4498 	}
4499 
4500 	return 0;
4501 }
4502 
4503 static int bcdma_setup_resources(struct udma_dev *ud)
4504 {
4505 	int ret, i, j;
4506 	struct device *dev = ud->dev;
4507 	struct ti_sci_resource *rm_res, irq_res;
4508 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4509 	const struct udma_oes_offsets *oes = &ud->soc_data->oes;
4510 	u32 cap;
4511 
4512 	/* Set up the throughput level start indexes */
4513 	cap = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4514 	if (BCDMA_CAP3_UBCHAN_CNT(cap)) {
4515 		ud->bchan_tpl.levels = 3;
4516 		ud->bchan_tpl.start_idx[1] = BCDMA_CAP3_UBCHAN_CNT(cap);
4517 		ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap);
4518 	} else if (BCDMA_CAP3_HBCHAN_CNT(cap)) {
4519 		ud->bchan_tpl.levels = 2;
4520 		ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap);
4521 	} else {
4522 		ud->bchan_tpl.levels = 1;
4523 	}
4524 
4525 	cap = udma_read(ud->mmrs[MMR_GCFG], 0x30);
4526 	if (BCDMA_CAP4_URCHAN_CNT(cap)) {
4527 		ud->rchan_tpl.levels = 3;
4528 		ud->rchan_tpl.start_idx[1] = BCDMA_CAP4_URCHAN_CNT(cap);
4529 		ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap);
4530 	} else if (BCDMA_CAP4_HRCHAN_CNT(cap)) {
4531 		ud->rchan_tpl.levels = 2;
4532 		ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap);
4533 	} else {
4534 		ud->rchan_tpl.levels = 1;
4535 	}
4536 
4537 	if (BCDMA_CAP4_UTCHAN_CNT(cap)) {
4538 		ud->tchan_tpl.levels = 3;
4539 		ud->tchan_tpl.start_idx[1] = BCDMA_CAP4_UTCHAN_CNT(cap);
4540 		ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap);
4541 	} else if (BCDMA_CAP4_HTCHAN_CNT(cap)) {
4542 		ud->tchan_tpl.levels = 2;
4543 		ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap);
4544 	} else {
4545 		ud->tchan_tpl.levels = 1;
4546 	}
4547 
4548 	ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt),
4549 					   sizeof(unsigned long), GFP_KERNEL);
4550 	ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans),
4551 				  GFP_KERNEL);
4552 	ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4553 					   sizeof(unsigned long), GFP_KERNEL);
4554 	ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4555 				  GFP_KERNEL);
4556 	ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4557 					   sizeof(unsigned long), GFP_KERNEL);
4558 	ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4559 				  GFP_KERNEL);
4560 	/* BCDMA do not really have flows, but the driver expect it */
4561 	ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rchan_cnt),
4562 					sizeof(unsigned long),
4563 					GFP_KERNEL);
4564 	ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows),
4565 				  GFP_KERNEL);
4566 
4567 	if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map ||
4568 	    !ud->rflow_in_use || !ud->bchans || !ud->tchans || !ud->rchans ||
4569 	    !ud->rflows)
4570 		return -ENOMEM;
4571 
4572 	/* Get resource ranges from tisci */
4573 	for (i = 0; i < RM_RANGE_LAST; i++) {
4574 		if (i == RM_RANGE_RFLOW || i == RM_RANGE_TFLOW)
4575 			continue;
4576 		if (i == RM_RANGE_BCHAN && ud->bchan_cnt == 0)
4577 			continue;
4578 		if (i == RM_RANGE_TCHAN && ud->tchan_cnt == 0)
4579 			continue;
4580 		if (i == RM_RANGE_RCHAN && ud->rchan_cnt == 0)
4581 			continue;
4582 
4583 		tisci_rm->rm_ranges[i] =
4584 			devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
4585 						    tisci_rm->tisci_dev_id,
4586 						    (char *)range_names[i]);
4587 	}
4588 
4589 	irq_res.sets = 0;
4590 
4591 	/* bchan ranges */
4592 	if (ud->bchan_cnt) {
4593 		rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
4594 		if (IS_ERR(rm_res)) {
4595 			bitmap_zero(ud->bchan_map, ud->bchan_cnt);
4596 		} else {
4597 			bitmap_fill(ud->bchan_map, ud->bchan_cnt);
4598 			for (i = 0; i < rm_res->sets; i++)
4599 				udma_mark_resource_ranges(ud, ud->bchan_map,
4600 							  &rm_res->desc[i],
4601 							  "bchan");
4602 		}
4603 		irq_res.sets += rm_res->sets;
4604 	}
4605 
4606 	/* tchan ranges */
4607 	if (ud->tchan_cnt) {
4608 		rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4609 		if (IS_ERR(rm_res)) {
4610 			bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4611 		} else {
4612 			bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4613 			for (i = 0; i < rm_res->sets; i++)
4614 				udma_mark_resource_ranges(ud, ud->tchan_map,
4615 							  &rm_res->desc[i],
4616 							  "tchan");
4617 		}
4618 		irq_res.sets += rm_res->sets * 2;
4619 	}
4620 
4621 	/* rchan ranges */
4622 	if (ud->rchan_cnt) {
4623 		rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4624 		if (IS_ERR(rm_res)) {
4625 			bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4626 		} else {
4627 			bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4628 			for (i = 0; i < rm_res->sets; i++)
4629 				udma_mark_resource_ranges(ud, ud->rchan_map,
4630 							  &rm_res->desc[i],
4631 							  "rchan");
4632 		}
4633 		irq_res.sets += rm_res->sets * 2;
4634 	}
4635 
4636 	irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
4637 	if (ud->bchan_cnt) {
4638 		rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
4639 		for (i = 0; i < rm_res->sets; i++) {
4640 			irq_res.desc[i].start = rm_res->desc[i].start +
4641 						oes->bcdma_bchan_ring;
4642 			irq_res.desc[i].num = rm_res->desc[i].num;
4643 		}
4644 	}
4645 	if (ud->tchan_cnt) {
4646 		rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4647 		for (j = 0; j < rm_res->sets; j++, i += 2) {
4648 			irq_res.desc[i].start = rm_res->desc[j].start +
4649 						oes->bcdma_tchan_data;
4650 			irq_res.desc[i].num = rm_res->desc[j].num;
4651 
4652 			irq_res.desc[i + 1].start = rm_res->desc[j].start +
4653 						oes->bcdma_tchan_ring;
4654 			irq_res.desc[i + 1].num = rm_res->desc[j].num;
4655 		}
4656 	}
4657 	if (ud->rchan_cnt) {
4658 		rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4659 		for (j = 0; j < rm_res->sets; j++, i += 2) {
4660 			irq_res.desc[i].start = rm_res->desc[j].start +
4661 						oes->bcdma_rchan_data;
4662 			irq_res.desc[i].num = rm_res->desc[j].num;
4663 
4664 			irq_res.desc[i + 1].start = rm_res->desc[j].start +
4665 						oes->bcdma_rchan_ring;
4666 			irq_res.desc[i + 1].num = rm_res->desc[j].num;
4667 		}
4668 	}
4669 
4670 	ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
4671 	kfree(irq_res.desc);
4672 	if (ret) {
4673 		dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
4674 		return ret;
4675 	}
4676 
4677 	return 0;
4678 }
4679 
4680 static int pktdma_setup_resources(struct udma_dev *ud)
4681 {
4682 	int ret, i, j;
4683 	struct device *dev = ud->dev;
4684 	struct ti_sci_resource *rm_res, irq_res;
4685 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4686 	const struct udma_oes_offsets *oes = &ud->soc_data->oes;
4687 	u32 cap3;
4688 
4689 	/* Set up the throughput level start indexes */
4690 	cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4691 	if (UDMA_CAP3_UCHAN_CNT(cap3)) {
4692 		ud->tchan_tpl.levels = 3;
4693 		ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
4694 		ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4695 	} else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
4696 		ud->tchan_tpl.levels = 2;
4697 		ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4698 	} else {
4699 		ud->tchan_tpl.levels = 1;
4700 	}
4701 
4702 	ud->rchan_tpl.levels = ud->tchan_tpl.levels;
4703 	ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
4704 	ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
4705 
4706 	ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4707 					   sizeof(unsigned long), GFP_KERNEL);
4708 	ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4709 				  GFP_KERNEL);
4710 	ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4711 					   sizeof(unsigned long), GFP_KERNEL);
4712 	ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4713 				  GFP_KERNEL);
4714 	ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
4715 					sizeof(unsigned long),
4716 					GFP_KERNEL);
4717 	ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
4718 				  GFP_KERNEL);
4719 	ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt),
4720 					   sizeof(unsigned long), GFP_KERNEL);
4721 
4722 	if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans ||
4723 	    !ud->rchans || !ud->rflows || !ud->rflow_in_use)
4724 		return -ENOMEM;
4725 
4726 	/* Get resource ranges from tisci */
4727 	for (i = 0; i < RM_RANGE_LAST; i++) {
4728 		if (i == RM_RANGE_BCHAN)
4729 			continue;
4730 
4731 		tisci_rm->rm_ranges[i] =
4732 			devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
4733 						    tisci_rm->tisci_dev_id,
4734 						    (char *)range_names[i]);
4735 	}
4736 
4737 	/* tchan ranges */
4738 	rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4739 	if (IS_ERR(rm_res)) {
4740 		bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4741 	} else {
4742 		bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4743 		for (i = 0; i < rm_res->sets; i++)
4744 			udma_mark_resource_ranges(ud, ud->tchan_map,
4745 						  &rm_res->desc[i], "tchan");
4746 	}
4747 
4748 	/* rchan ranges */
4749 	rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4750 	if (IS_ERR(rm_res)) {
4751 		bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4752 	} else {
4753 		bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4754 		for (i = 0; i < rm_res->sets; i++)
4755 			udma_mark_resource_ranges(ud, ud->rchan_map,
4756 						  &rm_res->desc[i], "rchan");
4757 	}
4758 
4759 	/* rflow ranges */
4760 	rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
4761 	if (IS_ERR(rm_res)) {
4762 		/* all rflows are assigned exclusively to Linux */
4763 		bitmap_zero(ud->rflow_in_use, ud->rflow_cnt);
4764 	} else {
4765 		bitmap_fill(ud->rflow_in_use, ud->rflow_cnt);
4766 		for (i = 0; i < rm_res->sets; i++)
4767 			udma_mark_resource_ranges(ud, ud->rflow_in_use,
4768 						  &rm_res->desc[i], "rflow");
4769 	}
4770 	irq_res.sets = rm_res->sets;
4771 
4772 	/* tflow ranges */
4773 	rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
4774 	if (IS_ERR(rm_res)) {
4775 		/* all tflows are assigned exclusively to Linux */
4776 		bitmap_zero(ud->tflow_map, ud->tflow_cnt);
4777 	} else {
4778 		bitmap_fill(ud->tflow_map, ud->tflow_cnt);
4779 		for (i = 0; i < rm_res->sets; i++)
4780 			udma_mark_resource_ranges(ud, ud->tflow_map,
4781 						  &rm_res->desc[i], "tflow");
4782 	}
4783 	irq_res.sets += rm_res->sets;
4784 
4785 	irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
4786 	rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
4787 	for (i = 0; i < rm_res->sets; i++) {
4788 		irq_res.desc[i].start = rm_res->desc[i].start +
4789 					oes->pktdma_tchan_flow;
4790 		irq_res.desc[i].num = rm_res->desc[i].num;
4791 	}
4792 	rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
4793 	for (j = 0; j < rm_res->sets; j++, i++) {
4794 		irq_res.desc[i].start = rm_res->desc[j].start +
4795 					oes->pktdma_rchan_flow;
4796 		irq_res.desc[i].num = rm_res->desc[j].num;
4797 	}
4798 	ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
4799 	kfree(irq_res.desc);
4800 	if (ret) {
4801 		dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
4802 		return ret;
4803 	}
4804 
4805 	return 0;
4806 }
4807 
4808 static int setup_resources(struct udma_dev *ud)
4809 {
4810 	struct device *dev = ud->dev;
4811 	int ch_count, ret;
4812 
4813 	switch (ud->match_data->type) {
4814 	case DMA_TYPE_UDMA:
4815 		ret = udma_setup_resources(ud);
4816 		break;
4817 	case DMA_TYPE_BCDMA:
4818 		ret = bcdma_setup_resources(ud);
4819 		break;
4820 	case DMA_TYPE_PKTDMA:
4821 		ret = pktdma_setup_resources(ud);
4822 		break;
4823 	default:
4824 		return -EINVAL;
4825 	}
4826 
4827 	if (ret)
4828 		return ret;
4829 
4830 	ch_count  = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt;
4831 	if (ud->bchan_cnt)
4832 		ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt);
4833 	ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
4834 	ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
4835 	if (!ch_count)
4836 		return -ENODEV;
4837 
4838 	ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
4839 				    GFP_KERNEL);
4840 	if (!ud->channels)
4841 		return -ENOMEM;
4842 
4843 	switch (ud->match_data->type) {
4844 	case DMA_TYPE_UDMA:
4845 		dev_info(dev,
4846 			 "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
4847 			 ch_count,
4848 			 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
4849 						       ud->tchan_cnt),
4850 			 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
4851 						       ud->rchan_cnt),
4852 			 ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
4853 						       ud->rflow_cnt));
4854 		break;
4855 	case DMA_TYPE_BCDMA:
4856 		dev_info(dev,
4857 			 "Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n",
4858 			 ch_count,
4859 			 ud->bchan_cnt - bitmap_weight(ud->bchan_map,
4860 						       ud->bchan_cnt),
4861 			 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
4862 						       ud->tchan_cnt),
4863 			 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
4864 						       ud->rchan_cnt));
4865 		break;
4866 	case DMA_TYPE_PKTDMA:
4867 		dev_info(dev,
4868 			 "Channels: %d (tchan: %u, rchan: %u)\n",
4869 			 ch_count,
4870 			 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
4871 						       ud->tchan_cnt),
4872 			 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
4873 						       ud->rchan_cnt));
4874 	default:
4875 		break;
4876 	}
4877 
4878 	return ch_count;
4879 }
4880 
4881 static int udma_setup_rx_flush(struct udma_dev *ud)
4882 {
4883 	struct udma_rx_flush *rx_flush = &ud->rx_flush;
4884 	struct cppi5_desc_hdr_t *tr_desc;
4885 	struct cppi5_tr_type1_t *tr_req;
4886 	struct cppi5_host_desc_t *desc;
4887 	struct device *dev = ud->dev;
4888 	struct udma_hwdesc *hwdesc;
4889 	size_t tr_size;
4890 
4891 	/* Allocate 1K buffer for discarded data on RX channel teardown */
4892 	rx_flush->buffer_size = SZ_1K;
4893 	rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size,
4894 					      GFP_KERNEL);
4895 	if (!rx_flush->buffer_vaddr)
4896 		return -ENOMEM;
4897 
4898 	rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr,
4899 						rx_flush->buffer_size,
4900 						DMA_TO_DEVICE);
4901 	if (dma_mapping_error(dev, rx_flush->buffer_paddr))
4902 		return -ENOMEM;
4903 
4904 	/* Set up descriptor to be used for TR mode */
4905 	hwdesc = &rx_flush->hwdescs[0];
4906 	tr_size = sizeof(struct cppi5_tr_type1_t);
4907 	hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1);
4908 	hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
4909 					ud->desc_align);
4910 
4911 	hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
4912 						GFP_KERNEL);
4913 	if (!hwdesc->cppi5_desc_vaddr)
4914 		return -ENOMEM;
4915 
4916 	hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
4917 						  hwdesc->cppi5_desc_size,
4918 						  DMA_TO_DEVICE);
4919 	if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
4920 		return -ENOMEM;
4921 
4922 	/* Start of the TR req records */
4923 	hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
4924 	/* Start address of the TR response array */
4925 	hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size;
4926 
4927 	tr_desc = hwdesc->cppi5_desc_vaddr;
4928 	cppi5_trdesc_init(tr_desc, 1, tr_size, 0, 0);
4929 	cppi5_desc_set_pktids(tr_desc, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
4930 	cppi5_desc_set_retpolicy(tr_desc, 0, 0);
4931 
4932 	tr_req = hwdesc->tr_req_base;
4933 	cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false,
4934 		      CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
4935 	cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT);
4936 
4937 	tr_req->addr = rx_flush->buffer_paddr;
4938 	tr_req->icnt0 = rx_flush->buffer_size;
4939 	tr_req->icnt1 = 1;
4940 
4941 	dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
4942 				   hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
4943 
4944 	/* Set up descriptor to be used for packet mode */
4945 	hwdesc = &rx_flush->hwdescs[1];
4946 	hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
4947 					CPPI5_INFO0_HDESC_EPIB_SIZE +
4948 					CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE,
4949 					ud->desc_align);
4950 
4951 	hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
4952 						GFP_KERNEL);
4953 	if (!hwdesc->cppi5_desc_vaddr)
4954 		return -ENOMEM;
4955 
4956 	hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
4957 						  hwdesc->cppi5_desc_size,
4958 						  DMA_TO_DEVICE);
4959 	if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
4960 		return -ENOMEM;
4961 
4962 	desc = hwdesc->cppi5_desc_vaddr;
4963 	cppi5_hdesc_init(desc, 0, 0);
4964 	cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
4965 	cppi5_desc_set_retpolicy(&desc->hdr, 0, 0);
4966 
4967 	cppi5_hdesc_attach_buf(desc,
4968 			       rx_flush->buffer_paddr, rx_flush->buffer_size,
4969 			       rx_flush->buffer_paddr, rx_flush->buffer_size);
4970 
4971 	dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
4972 				   hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
4973 	return 0;
4974 }
4975 
4976 #ifdef CONFIG_DEBUG_FS
4977 static void udma_dbg_summary_show_chan(struct seq_file *s,
4978 				       struct dma_chan *chan)
4979 {
4980 	struct udma_chan *uc = to_udma_chan(chan);
4981 	struct udma_chan_config *ucc = &uc->config;
4982 
4983 	seq_printf(s, " %-13s| %s", dma_chan_name(chan),
4984 		   chan->dbg_client_name ?: "in-use");
4985 	if (ucc->tr_trigger_type)
4986 		seq_puts(s, " (triggered, ");
4987 	else
4988 		seq_printf(s, " (%s, ",
4989 			   dmaengine_get_direction_text(uc->config.dir));
4990 
4991 	switch (uc->config.dir) {
4992 	case DMA_MEM_TO_MEM:
4993 		if (uc->ud->match_data->type == DMA_TYPE_BCDMA) {
4994 			seq_printf(s, "bchan%d)\n", uc->bchan->id);
4995 			return;
4996 		}
4997 
4998 		seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id,
4999 			   ucc->src_thread, ucc->dst_thread);
5000 		break;
5001 	case DMA_DEV_TO_MEM:
5002 		seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id,
5003 			   ucc->src_thread, ucc->dst_thread);
5004 		if (uc->ud->match_data->type == DMA_TYPE_PKTDMA)
5005 			seq_printf(s, "rflow%d, ", uc->rflow->id);
5006 		break;
5007 	case DMA_MEM_TO_DEV:
5008 		seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id,
5009 			   ucc->src_thread, ucc->dst_thread);
5010 		if (uc->ud->match_data->type == DMA_TYPE_PKTDMA)
5011 			seq_printf(s, "tflow%d, ", uc->tchan->tflow_id);
5012 		break;
5013 	default:
5014 		seq_printf(s, ")\n");
5015 		return;
5016 	}
5017 
5018 	if (ucc->ep_type == PSIL_EP_NATIVE) {
5019 		seq_printf(s, "PSI-L Native");
5020 		if (ucc->metadata_size) {
5021 			seq_printf(s, "[%s", ucc->needs_epib ? " EPIB" : "");
5022 			if (ucc->psd_size)
5023 				seq_printf(s, " PSDsize:%u", ucc->psd_size);
5024 			seq_printf(s, " ]");
5025 		}
5026 	} else {
5027 		seq_printf(s, "PDMA");
5028 		if (ucc->enable_acc32 || ucc->enable_burst)
5029 			seq_printf(s, "[%s%s ]",
5030 				   ucc->enable_acc32 ? " ACC32" : "",
5031 				   ucc->enable_burst ? " BURST" : "");
5032 	}
5033 
5034 	seq_printf(s, ", %s)\n", ucc->pkt_mode ? "Packet mode" : "TR mode");
5035 }
5036 
5037 static void udma_dbg_summary_show(struct seq_file *s,
5038 				  struct dma_device *dma_dev)
5039 {
5040 	struct dma_chan *chan;
5041 
5042 	list_for_each_entry(chan, &dma_dev->channels, device_node) {
5043 		if (chan->client_count)
5044 			udma_dbg_summary_show_chan(s, chan);
5045 	}
5046 }
5047 #endif /* CONFIG_DEBUG_FS */
5048 
5049 #define TI_UDMAC_BUSWIDTHS	(BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
5050 				 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
5051 				 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
5052 				 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
5053 				 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
5054 
5055 static int udma_probe(struct platform_device *pdev)
5056 {
5057 	struct device_node *navss_node = pdev->dev.parent->of_node;
5058 	const struct soc_device_attribute *soc;
5059 	struct device *dev = &pdev->dev;
5060 	struct udma_dev *ud;
5061 	const struct of_device_id *match;
5062 	int i, ret;
5063 	int ch_count;
5064 
5065 	ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
5066 	if (ret)
5067 		dev_err(dev, "failed to set dma mask stuff\n");
5068 
5069 	ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL);
5070 	if (!ud)
5071 		return -ENOMEM;
5072 
5073 	match = of_match_node(udma_of_match, dev->of_node);
5074 	if (!match)
5075 		match = of_match_node(bcdma_of_match, dev->of_node);
5076 	if (!match) {
5077 		match = of_match_node(pktdma_of_match, dev->of_node);
5078 		if (!match) {
5079 			dev_err(dev, "No compatible match found\n");
5080 			return -ENODEV;
5081 		}
5082 	}
5083 	ud->match_data = match->data;
5084 
5085 	soc = soc_device_match(k3_soc_devices);
5086 	if (!soc) {
5087 		dev_err(dev, "No compatible SoC found\n");
5088 		return -ENODEV;
5089 	}
5090 	ud->soc_data = soc->data;
5091 
5092 	ret = udma_get_mmrs(pdev, ud);
5093 	if (ret)
5094 		return ret;
5095 
5096 	ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci");
5097 	if (IS_ERR(ud->tisci_rm.tisci))
5098 		return PTR_ERR(ud->tisci_rm.tisci);
5099 
5100 	ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id",
5101 				   &ud->tisci_rm.tisci_dev_id);
5102 	if (ret) {
5103 		dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
5104 		return ret;
5105 	}
5106 	pdev->id = ud->tisci_rm.tisci_dev_id;
5107 
5108 	ret = of_property_read_u32(navss_node, "ti,sci-dev-id",
5109 				   &ud->tisci_rm.tisci_navss_dev_id);
5110 	if (ret) {
5111 		dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret);
5112 		return ret;
5113 	}
5114 
5115 	if (ud->match_data->type == DMA_TYPE_UDMA) {
5116 		ret = of_property_read_u32(dev->of_node, "ti,udma-atype",
5117 					   &ud->atype);
5118 		if (!ret && ud->atype > 2) {
5119 			dev_err(dev, "Invalid atype: %u\n", ud->atype);
5120 			return -EINVAL;
5121 		}
5122 	} else {
5123 		ret = of_property_read_u32(dev->of_node, "ti,asel",
5124 					   &ud->asel);
5125 		if (!ret && ud->asel > 15) {
5126 			dev_err(dev, "Invalid asel: %u\n", ud->asel);
5127 			return -EINVAL;
5128 		}
5129 	}
5130 
5131 	ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
5132 	ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
5133 
5134 	if (ud->match_data->type == DMA_TYPE_UDMA) {
5135 		ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
5136 	} else {
5137 		struct k3_ringacc_init_data ring_init_data;
5138 
5139 		ring_init_data.tisci = ud->tisci_rm.tisci;
5140 		ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id;
5141 		if (ud->match_data->type == DMA_TYPE_BCDMA) {
5142 			ring_init_data.num_rings = ud->bchan_cnt +
5143 						   ud->tchan_cnt +
5144 						   ud->rchan_cnt;
5145 		} else {
5146 			ring_init_data.num_rings = ud->rflow_cnt +
5147 						   ud->tflow_cnt;
5148 		}
5149 
5150 		ud->ringacc = k3_ringacc_dmarings_init(pdev, &ring_init_data);
5151 	}
5152 
5153 	if (IS_ERR(ud->ringacc))
5154 		return PTR_ERR(ud->ringacc);
5155 
5156 	dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
5157 					    DOMAIN_BUS_TI_SCI_INTA_MSI);
5158 	if (!dev->msi_domain) {
5159 		dev_err(dev, "Failed to get MSI domain\n");
5160 		return -EPROBE_DEFER;
5161 	}
5162 
5163 	dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
5164 	/* cyclic operation is not supported via PKTDMA */
5165 	if (ud->match_data->type != DMA_TYPE_PKTDMA) {
5166 		dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
5167 		ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
5168 	}
5169 
5170 	ud->ddev.device_config = udma_slave_config;
5171 	ud->ddev.device_prep_slave_sg = udma_prep_slave_sg;
5172 	ud->ddev.device_issue_pending = udma_issue_pending;
5173 	ud->ddev.device_tx_status = udma_tx_status;
5174 	ud->ddev.device_pause = udma_pause;
5175 	ud->ddev.device_resume = udma_resume;
5176 	ud->ddev.device_terminate_all = udma_terminate_all;
5177 	ud->ddev.device_synchronize = udma_synchronize;
5178 #ifdef CONFIG_DEBUG_FS
5179 	ud->ddev.dbg_summary_show = udma_dbg_summary_show;
5180 #endif
5181 
5182 	switch (ud->match_data->type) {
5183 	case DMA_TYPE_UDMA:
5184 		ud->ddev.device_alloc_chan_resources =
5185 					udma_alloc_chan_resources;
5186 		break;
5187 	case DMA_TYPE_BCDMA:
5188 		ud->ddev.device_alloc_chan_resources =
5189 					bcdma_alloc_chan_resources;
5190 		ud->ddev.device_router_config = bcdma_router_config;
5191 		break;
5192 	case DMA_TYPE_PKTDMA:
5193 		ud->ddev.device_alloc_chan_resources =
5194 					pktdma_alloc_chan_resources;
5195 		break;
5196 	default:
5197 		return -EINVAL;
5198 	}
5199 	ud->ddev.device_free_chan_resources = udma_free_chan_resources;
5200 
5201 	ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
5202 	ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
5203 	ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
5204 	ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
5205 	ud->ddev.copy_align = DMAENGINE_ALIGN_8_BYTES;
5206 	ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
5207 				       DESC_METADATA_ENGINE;
5208 	if (ud->match_data->enable_memcpy_support &&
5209 	    !(ud->match_data->type == DMA_TYPE_BCDMA && ud->bchan_cnt == 0)) {
5210 		dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask);
5211 		ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy;
5212 		ud->ddev.directions |= BIT(DMA_MEM_TO_MEM);
5213 	}
5214 
5215 	ud->ddev.dev = dev;
5216 	ud->dev = dev;
5217 	ud->psil_base = ud->match_data->psil_base;
5218 
5219 	INIT_LIST_HEAD(&ud->ddev.channels);
5220 	INIT_LIST_HEAD(&ud->desc_to_purge);
5221 
5222 	ch_count = setup_resources(ud);
5223 	if (ch_count <= 0)
5224 		return ch_count;
5225 
5226 	spin_lock_init(&ud->lock);
5227 	INIT_WORK(&ud->purge_work, udma_purge_desc_work);
5228 
5229 	ud->desc_align = 64;
5230 	if (ud->desc_align < dma_get_cache_alignment())
5231 		ud->desc_align = dma_get_cache_alignment();
5232 
5233 	ret = udma_setup_rx_flush(ud);
5234 	if (ret)
5235 		return ret;
5236 
5237 	for (i = 0; i < ud->bchan_cnt; i++) {
5238 		struct udma_bchan *bchan = &ud->bchans[i];
5239 
5240 		bchan->id = i;
5241 		bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000;
5242 	}
5243 
5244 	for (i = 0; i < ud->tchan_cnt; i++) {
5245 		struct udma_tchan *tchan = &ud->tchans[i];
5246 
5247 		tchan->id = i;
5248 		tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000;
5249 	}
5250 
5251 	for (i = 0; i < ud->rchan_cnt; i++) {
5252 		struct udma_rchan *rchan = &ud->rchans[i];
5253 
5254 		rchan->id = i;
5255 		rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000;
5256 	}
5257 
5258 	for (i = 0; i < ud->rflow_cnt; i++) {
5259 		struct udma_rflow *rflow = &ud->rflows[i];
5260 
5261 		rflow->id = i;
5262 	}
5263 
5264 	for (i = 0; i < ch_count; i++) {
5265 		struct udma_chan *uc = &ud->channels[i];
5266 
5267 		uc->ud = ud;
5268 		uc->vc.desc_free = udma_desc_free;
5269 		uc->id = i;
5270 		uc->bchan = NULL;
5271 		uc->tchan = NULL;
5272 		uc->rchan = NULL;
5273 		uc->config.remote_thread_id = -1;
5274 		uc->config.mapped_channel_id = -1;
5275 		uc->config.default_flow_id = -1;
5276 		uc->config.dir = DMA_MEM_TO_MEM;
5277 		uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
5278 					  dev_name(dev), i);
5279 
5280 		vchan_init(&uc->vc, &ud->ddev);
5281 		/* Use custom vchan completion handling */
5282 		tasklet_setup(&uc->vc.task, udma_vchan_complete);
5283 		init_completion(&uc->teardown_completed);
5284 		INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion);
5285 	}
5286 
5287 	ret = dma_async_device_register(&ud->ddev);
5288 	if (ret) {
5289 		dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
5290 		return ret;
5291 	}
5292 
5293 	platform_set_drvdata(pdev, ud);
5294 
5295 	ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud);
5296 	if (ret) {
5297 		dev_err(dev, "failed to register of_dma controller\n");
5298 		dma_async_device_unregister(&ud->ddev);
5299 	}
5300 
5301 	return ret;
5302 }
5303 
5304 static struct platform_driver udma_driver = {
5305 	.driver = {
5306 		.name	= "ti-udma",
5307 		.of_match_table = udma_of_match,
5308 		.suppress_bind_attrs = true,
5309 	},
5310 	.probe		= udma_probe,
5311 };
5312 builtin_platform_driver(udma_driver);
5313 
5314 static struct platform_driver bcdma_driver = {
5315 	.driver = {
5316 		.name	= "ti-bcdma",
5317 		.of_match_table = bcdma_of_match,
5318 		.suppress_bind_attrs = true,
5319 	},
5320 	.probe		= udma_probe,
5321 };
5322 builtin_platform_driver(bcdma_driver);
5323 
5324 static struct platform_driver pktdma_driver = {
5325 	.driver = {
5326 		.name	= "ti-pktdma",
5327 		.of_match_table = pktdma_of_match,
5328 		.suppress_bind_attrs = true,
5329 	},
5330 	.probe		= udma_probe,
5331 };
5332 builtin_platform_driver(pktdma_driver);
5333 
5334 /* Private interfaces to UDMA */
5335 #include "k3-udma-private.c"
5336