1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5 */
6
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/delay.h>
10 #include <linux/dmaengine.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmapool.h>
13 #include <linux/err.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/list.h>
17 #include <linux/platform_device.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/sys_soc.h>
21 #include <linux/of.h>
22 #include <linux/of_dma.h>
23 #include <linux/of_irq.h>
24 #include <linux/workqueue.h>
25 #include <linux/completion.h>
26 #include <linux/soc/ti/k3-ringacc.h>
27 #include <linux/soc/ti/ti_sci_protocol.h>
28 #include <linux/soc/ti/ti_sci_inta_msi.h>
29 #include <linux/dma/k3-event-router.h>
30 #include <linux/dma/ti-cppi5.h>
31
32 #include "../virt-dma.h"
33 #include "k3-udma.h"
34 #include "k3-psil-priv.h"
35
36 struct udma_static_tr {
37 u8 elsize; /* RPSTR0 */
38 u16 elcnt; /* RPSTR0 */
39 u16 bstcnt; /* RPSTR1 */
40 };
41
42 #define K3_UDMA_MAX_RFLOWS 1024
43 #define K3_UDMA_DEFAULT_RING_SIZE 16
44
45 /* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */
46 #define UDMA_RFLOW_SRCTAG_NONE 0
47 #define UDMA_RFLOW_SRCTAG_CFG_TAG 1
48 #define UDMA_RFLOW_SRCTAG_FLOW_ID 2
49 #define UDMA_RFLOW_SRCTAG_SRC_TAG 4
50
51 #define UDMA_RFLOW_DSTTAG_NONE 0
52 #define UDMA_RFLOW_DSTTAG_CFG_TAG 1
53 #define UDMA_RFLOW_DSTTAG_FLOW_ID 2
54 #define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4
55 #define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5
56
57 struct udma_chan;
58
59 enum k3_dma_type {
60 DMA_TYPE_UDMA = 0,
61 DMA_TYPE_BCDMA,
62 DMA_TYPE_PKTDMA,
63 };
64
65 enum udma_mmr {
66 MMR_GCFG = 0,
67 MMR_BCHANRT,
68 MMR_RCHANRT,
69 MMR_TCHANRT,
70 MMR_LAST,
71 };
72
73 static const char * const mmr_names[] = {
74 [MMR_GCFG] = "gcfg",
75 [MMR_BCHANRT] = "bchanrt",
76 [MMR_RCHANRT] = "rchanrt",
77 [MMR_TCHANRT] = "tchanrt",
78 };
79
80 struct udma_tchan {
81 void __iomem *reg_rt;
82
83 int id;
84 struct k3_ring *t_ring; /* Transmit ring */
85 struct k3_ring *tc_ring; /* Transmit Completion ring */
86 int tflow_id; /* applicable only for PKTDMA */
87
88 };
89
90 #define udma_bchan udma_tchan
91
92 struct udma_rflow {
93 int id;
94 struct k3_ring *fd_ring; /* Free Descriptor ring */
95 struct k3_ring *r_ring; /* Receive ring */
96 };
97
98 struct udma_rchan {
99 void __iomem *reg_rt;
100
101 int id;
102 };
103
104 struct udma_oes_offsets {
105 /* K3 UDMA Output Event Offset */
106 u32 udma_rchan;
107
108 /* BCDMA Output Event Offsets */
109 u32 bcdma_bchan_data;
110 u32 bcdma_bchan_ring;
111 u32 bcdma_tchan_data;
112 u32 bcdma_tchan_ring;
113 u32 bcdma_rchan_data;
114 u32 bcdma_rchan_ring;
115
116 /* PKTDMA Output Event Offsets */
117 u32 pktdma_tchan_flow;
118 u32 pktdma_rchan_flow;
119 };
120
121 #define UDMA_FLAG_PDMA_ACC32 BIT(0)
122 #define UDMA_FLAG_PDMA_BURST BIT(1)
123 #define UDMA_FLAG_TDTYPE BIT(2)
124 #define UDMA_FLAG_BURST_SIZE BIT(3)
125 #define UDMA_FLAGS_J7_CLASS (UDMA_FLAG_PDMA_ACC32 | \
126 UDMA_FLAG_PDMA_BURST | \
127 UDMA_FLAG_TDTYPE | \
128 UDMA_FLAG_BURST_SIZE)
129
130 struct udma_match_data {
131 enum k3_dma_type type;
132 u32 psil_base;
133 bool enable_memcpy_support;
134 u32 flags;
135 u32 statictr_z_mask;
136 u8 burst_size[3];
137 struct udma_soc_data *soc_data;
138 };
139
140 struct udma_soc_data {
141 struct udma_oes_offsets oes;
142 u32 bcdma_trigger_event_offset;
143 };
144
145 struct udma_hwdesc {
146 size_t cppi5_desc_size;
147 void *cppi5_desc_vaddr;
148 dma_addr_t cppi5_desc_paddr;
149
150 /* TR descriptor internal pointers */
151 void *tr_req_base;
152 struct cppi5_tr_resp_t *tr_resp_base;
153 };
154
155 struct udma_rx_flush {
156 struct udma_hwdesc hwdescs[2];
157
158 size_t buffer_size;
159 void *buffer_vaddr;
160 dma_addr_t buffer_paddr;
161 };
162
163 struct udma_tpl {
164 u8 levels;
165 u32 start_idx[3];
166 };
167
168 struct udma_dev {
169 struct dma_device ddev;
170 struct device *dev;
171 void __iomem *mmrs[MMR_LAST];
172 const struct udma_match_data *match_data;
173 const struct udma_soc_data *soc_data;
174
175 struct udma_tpl bchan_tpl;
176 struct udma_tpl tchan_tpl;
177 struct udma_tpl rchan_tpl;
178
179 size_t desc_align; /* alignment to use for descriptors */
180
181 struct udma_tisci_rm tisci_rm;
182
183 struct k3_ringacc *ringacc;
184
185 struct work_struct purge_work;
186 struct list_head desc_to_purge;
187 spinlock_t lock;
188
189 struct udma_rx_flush rx_flush;
190
191 int bchan_cnt;
192 int tchan_cnt;
193 int echan_cnt;
194 int rchan_cnt;
195 int rflow_cnt;
196 int tflow_cnt;
197 unsigned long *bchan_map;
198 unsigned long *tchan_map;
199 unsigned long *rchan_map;
200 unsigned long *rflow_gp_map;
201 unsigned long *rflow_gp_map_allocated;
202 unsigned long *rflow_in_use;
203 unsigned long *tflow_map;
204
205 struct udma_bchan *bchans;
206 struct udma_tchan *tchans;
207 struct udma_rchan *rchans;
208 struct udma_rflow *rflows;
209
210 struct udma_chan *channels;
211 u32 psil_base;
212 u32 atype;
213 u32 asel;
214 };
215
216 struct udma_desc {
217 struct virt_dma_desc vd;
218
219 bool terminated;
220
221 enum dma_transfer_direction dir;
222
223 struct udma_static_tr static_tr;
224 u32 residue;
225
226 unsigned int sglen;
227 unsigned int desc_idx; /* Only used for cyclic in packet mode */
228 unsigned int tr_idx;
229
230 u32 metadata_size;
231 void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */
232
233 unsigned int hwdesc_count;
234 struct udma_hwdesc hwdesc[];
235 };
236
237 enum udma_chan_state {
238 UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */
239 UDMA_CHAN_IS_ACTIVE, /* Normal operation */
240 UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */
241 };
242
243 struct udma_tx_drain {
244 struct delayed_work work;
245 ktime_t tstamp;
246 u32 residue;
247 };
248
249 struct udma_chan_config {
250 bool pkt_mode; /* TR or packet */
251 bool needs_epib; /* EPIB is needed for the communication or not */
252 u32 psd_size; /* size of Protocol Specific Data */
253 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
254 u32 hdesc_size; /* Size of a packet descriptor in packet mode */
255 bool notdpkt; /* Suppress sending TDC packet */
256 int remote_thread_id;
257 u32 atype;
258 u32 asel;
259 u32 src_thread;
260 u32 dst_thread;
261 enum psil_endpoint_type ep_type;
262 bool enable_acc32;
263 bool enable_burst;
264 enum udma_tp_level channel_tpl; /* Channel Throughput Level */
265
266 u32 tr_trigger_type;
267 unsigned long tx_flags;
268
269 /* PKDMA mapped channel */
270 int mapped_channel_id;
271 /* PKTDMA default tflow or rflow for mapped channel */
272 int default_flow_id;
273
274 enum dma_transfer_direction dir;
275 };
276
277 struct udma_chan {
278 struct virt_dma_chan vc;
279 struct dma_slave_config cfg;
280 struct udma_dev *ud;
281 struct device *dma_dev;
282 struct udma_desc *desc;
283 struct udma_desc *terminated_desc;
284 struct udma_static_tr static_tr;
285 char *name;
286
287 struct udma_bchan *bchan;
288 struct udma_tchan *tchan;
289 struct udma_rchan *rchan;
290 struct udma_rflow *rflow;
291
292 bool psil_paired;
293
294 int irq_num_ring;
295 int irq_num_udma;
296
297 bool cyclic;
298 bool paused;
299
300 enum udma_chan_state state;
301 struct completion teardown_completed;
302
303 struct udma_tx_drain tx_drain;
304
305 /* Channel configuration parameters */
306 struct udma_chan_config config;
307 /* Channel configuration parameters (backup) */
308 struct udma_chan_config backup_config;
309
310 /* dmapool for packet mode descriptors */
311 bool use_dma_pool;
312 struct dma_pool *hdesc_pool;
313
314 u32 id;
315 };
316
to_udma_dev(struct dma_device * d)317 static inline struct udma_dev *to_udma_dev(struct dma_device *d)
318 {
319 return container_of(d, struct udma_dev, ddev);
320 }
321
to_udma_chan(struct dma_chan * c)322 static inline struct udma_chan *to_udma_chan(struct dma_chan *c)
323 {
324 return container_of(c, struct udma_chan, vc.chan);
325 }
326
to_udma_desc(struct dma_async_tx_descriptor * t)327 static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t)
328 {
329 return container_of(t, struct udma_desc, vd.tx);
330 }
331
332 /* Generic register access functions */
udma_read(void __iomem * base,int reg)333 static inline u32 udma_read(void __iomem *base, int reg)
334 {
335 return readl(base + reg);
336 }
337
udma_write(void __iomem * base,int reg,u32 val)338 static inline void udma_write(void __iomem *base, int reg, u32 val)
339 {
340 writel(val, base + reg);
341 }
342
udma_update_bits(void __iomem * base,int reg,u32 mask,u32 val)343 static inline void udma_update_bits(void __iomem *base, int reg,
344 u32 mask, u32 val)
345 {
346 u32 tmp, orig;
347
348 orig = readl(base + reg);
349 tmp = orig & ~mask;
350 tmp |= (val & mask);
351
352 if (tmp != orig)
353 writel(tmp, base + reg);
354 }
355
356 /* TCHANRT */
udma_tchanrt_read(struct udma_chan * uc,int reg)357 static inline u32 udma_tchanrt_read(struct udma_chan *uc, int reg)
358 {
359 if (!uc->tchan)
360 return 0;
361 return udma_read(uc->tchan->reg_rt, reg);
362 }
363
udma_tchanrt_write(struct udma_chan * uc,int reg,u32 val)364 static inline void udma_tchanrt_write(struct udma_chan *uc, int reg, u32 val)
365 {
366 if (!uc->tchan)
367 return;
368 udma_write(uc->tchan->reg_rt, reg, val);
369 }
370
udma_tchanrt_update_bits(struct udma_chan * uc,int reg,u32 mask,u32 val)371 static inline void udma_tchanrt_update_bits(struct udma_chan *uc, int reg,
372 u32 mask, u32 val)
373 {
374 if (!uc->tchan)
375 return;
376 udma_update_bits(uc->tchan->reg_rt, reg, mask, val);
377 }
378
379 /* RCHANRT */
udma_rchanrt_read(struct udma_chan * uc,int reg)380 static inline u32 udma_rchanrt_read(struct udma_chan *uc, int reg)
381 {
382 if (!uc->rchan)
383 return 0;
384 return udma_read(uc->rchan->reg_rt, reg);
385 }
386
udma_rchanrt_write(struct udma_chan * uc,int reg,u32 val)387 static inline void udma_rchanrt_write(struct udma_chan *uc, int reg, u32 val)
388 {
389 if (!uc->rchan)
390 return;
391 udma_write(uc->rchan->reg_rt, reg, val);
392 }
393
udma_rchanrt_update_bits(struct udma_chan * uc,int reg,u32 mask,u32 val)394 static inline void udma_rchanrt_update_bits(struct udma_chan *uc, int reg,
395 u32 mask, u32 val)
396 {
397 if (!uc->rchan)
398 return;
399 udma_update_bits(uc->rchan->reg_rt, reg, mask, val);
400 }
401
navss_psil_pair(struct udma_dev * ud,u32 src_thread,u32 dst_thread)402 static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
403 {
404 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
405
406 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
407 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
408 tisci_rm->tisci_navss_dev_id,
409 src_thread, dst_thread);
410 }
411
navss_psil_unpair(struct udma_dev * ud,u32 src_thread,u32 dst_thread)412 static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
413 u32 dst_thread)
414 {
415 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
416
417 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
418 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
419 tisci_rm->tisci_navss_dev_id,
420 src_thread, dst_thread);
421 }
422
k3_configure_chan_coherency(struct dma_chan * chan,u32 asel)423 static void k3_configure_chan_coherency(struct dma_chan *chan, u32 asel)
424 {
425 struct device *chan_dev = &chan->dev->device;
426
427 if (asel == 0) {
428 /* No special handling for the channel */
429 chan->dev->chan_dma_dev = false;
430
431 chan_dev->dma_coherent = false;
432 chan_dev->dma_parms = NULL;
433 } else if (asel == 14 || asel == 15) {
434 chan->dev->chan_dma_dev = true;
435
436 chan_dev->dma_coherent = true;
437 dma_coerce_mask_and_coherent(chan_dev, DMA_BIT_MASK(48));
438 chan_dev->dma_parms = chan_dev->parent->dma_parms;
439 } else {
440 dev_warn(chan->device->dev, "Invalid ASEL value: %u\n", asel);
441
442 chan_dev->dma_coherent = false;
443 chan_dev->dma_parms = NULL;
444 }
445 }
446
udma_get_chan_tpl_index(struct udma_tpl * tpl_map,int chan_id)447 static u8 udma_get_chan_tpl_index(struct udma_tpl *tpl_map, int chan_id)
448 {
449 int i;
450
451 for (i = 0; i < tpl_map->levels; i++) {
452 if (chan_id >= tpl_map->start_idx[i])
453 return i;
454 }
455
456 return 0;
457 }
458
udma_reset_uchan(struct udma_chan * uc)459 static void udma_reset_uchan(struct udma_chan *uc)
460 {
461 memset(&uc->config, 0, sizeof(uc->config));
462 uc->config.remote_thread_id = -1;
463 uc->config.mapped_channel_id = -1;
464 uc->config.default_flow_id = -1;
465 uc->state = UDMA_CHAN_IS_IDLE;
466 }
467
udma_dump_chan_stdata(struct udma_chan * uc)468 static void udma_dump_chan_stdata(struct udma_chan *uc)
469 {
470 struct device *dev = uc->ud->dev;
471 u32 offset;
472 int i;
473
474 if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) {
475 dev_dbg(dev, "TCHAN State data:\n");
476 for (i = 0; i < 32; i++) {
477 offset = UDMA_CHAN_RT_STDATA_REG + i * 4;
478 dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i,
479 udma_tchanrt_read(uc, offset));
480 }
481 }
482
483 if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) {
484 dev_dbg(dev, "RCHAN State data:\n");
485 for (i = 0; i < 32; i++) {
486 offset = UDMA_CHAN_RT_STDATA_REG + i * 4;
487 dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i,
488 udma_rchanrt_read(uc, offset));
489 }
490 }
491 }
492
udma_curr_cppi5_desc_paddr(struct udma_desc * d,int idx)493 static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d,
494 int idx)
495 {
496 return d->hwdesc[idx].cppi5_desc_paddr;
497 }
498
udma_curr_cppi5_desc_vaddr(struct udma_desc * d,int idx)499 static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx)
500 {
501 return d->hwdesc[idx].cppi5_desc_vaddr;
502 }
503
udma_udma_desc_from_paddr(struct udma_chan * uc,dma_addr_t paddr)504 static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc,
505 dma_addr_t paddr)
506 {
507 struct udma_desc *d = uc->terminated_desc;
508
509 if (d) {
510 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
511 d->desc_idx);
512
513 if (desc_paddr != paddr)
514 d = NULL;
515 }
516
517 if (!d) {
518 d = uc->desc;
519 if (d) {
520 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
521 d->desc_idx);
522
523 if (desc_paddr != paddr)
524 d = NULL;
525 }
526 }
527
528 return d;
529 }
530
udma_free_hwdesc(struct udma_chan * uc,struct udma_desc * d)531 static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d)
532 {
533 if (uc->use_dma_pool) {
534 int i;
535
536 for (i = 0; i < d->hwdesc_count; i++) {
537 if (!d->hwdesc[i].cppi5_desc_vaddr)
538 continue;
539
540 dma_pool_free(uc->hdesc_pool,
541 d->hwdesc[i].cppi5_desc_vaddr,
542 d->hwdesc[i].cppi5_desc_paddr);
543
544 d->hwdesc[i].cppi5_desc_vaddr = NULL;
545 }
546 } else if (d->hwdesc[0].cppi5_desc_vaddr) {
547 dma_free_coherent(uc->dma_dev, d->hwdesc[0].cppi5_desc_size,
548 d->hwdesc[0].cppi5_desc_vaddr,
549 d->hwdesc[0].cppi5_desc_paddr);
550
551 d->hwdesc[0].cppi5_desc_vaddr = NULL;
552 }
553 }
554
udma_purge_desc_work(struct work_struct * work)555 static void udma_purge_desc_work(struct work_struct *work)
556 {
557 struct udma_dev *ud = container_of(work, typeof(*ud), purge_work);
558 struct virt_dma_desc *vd, *_vd;
559 unsigned long flags;
560 LIST_HEAD(head);
561
562 spin_lock_irqsave(&ud->lock, flags);
563 list_splice_tail_init(&ud->desc_to_purge, &head);
564 spin_unlock_irqrestore(&ud->lock, flags);
565
566 list_for_each_entry_safe(vd, _vd, &head, node) {
567 struct udma_chan *uc = to_udma_chan(vd->tx.chan);
568 struct udma_desc *d = to_udma_desc(&vd->tx);
569
570 udma_free_hwdesc(uc, d);
571 list_del(&vd->node);
572 kfree(d);
573 }
574
575 /* If more to purge, schedule the work again */
576 if (!list_empty(&ud->desc_to_purge))
577 schedule_work(&ud->purge_work);
578 }
579
udma_desc_free(struct virt_dma_desc * vd)580 static void udma_desc_free(struct virt_dma_desc *vd)
581 {
582 struct udma_dev *ud = to_udma_dev(vd->tx.chan->device);
583 struct udma_chan *uc = to_udma_chan(vd->tx.chan);
584 struct udma_desc *d = to_udma_desc(&vd->tx);
585 unsigned long flags;
586
587 if (uc->terminated_desc == d)
588 uc->terminated_desc = NULL;
589
590 if (uc->use_dma_pool) {
591 udma_free_hwdesc(uc, d);
592 kfree(d);
593 return;
594 }
595
596 spin_lock_irqsave(&ud->lock, flags);
597 list_add_tail(&vd->node, &ud->desc_to_purge);
598 spin_unlock_irqrestore(&ud->lock, flags);
599
600 schedule_work(&ud->purge_work);
601 }
602
udma_is_chan_running(struct udma_chan * uc)603 static bool udma_is_chan_running(struct udma_chan *uc)
604 {
605 u32 trt_ctl = 0;
606 u32 rrt_ctl = 0;
607
608 if (uc->tchan)
609 trt_ctl = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
610 if (uc->rchan)
611 rrt_ctl = udma_rchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
612
613 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
614 return true;
615
616 return false;
617 }
618
udma_is_chan_paused(struct udma_chan * uc)619 static bool udma_is_chan_paused(struct udma_chan *uc)
620 {
621 u32 val, pause_mask;
622
623 switch (uc->config.dir) {
624 case DMA_DEV_TO_MEM:
625 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG);
626 pause_mask = UDMA_PEER_RT_EN_PAUSE;
627 break;
628 case DMA_MEM_TO_DEV:
629 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG);
630 pause_mask = UDMA_PEER_RT_EN_PAUSE;
631 break;
632 case DMA_MEM_TO_MEM:
633 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
634 pause_mask = UDMA_CHAN_RT_CTL_PAUSE;
635 break;
636 default:
637 return false;
638 }
639
640 if (val & pause_mask)
641 return true;
642
643 return false;
644 }
645
udma_get_rx_flush_hwdesc_paddr(struct udma_chan * uc)646 static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc)
647 {
648 return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr;
649 }
650
udma_push_to_ring(struct udma_chan * uc,int idx)651 static int udma_push_to_ring(struct udma_chan *uc, int idx)
652 {
653 struct udma_desc *d = uc->desc;
654 struct k3_ring *ring = NULL;
655 dma_addr_t paddr;
656
657 switch (uc->config.dir) {
658 case DMA_DEV_TO_MEM:
659 ring = uc->rflow->fd_ring;
660 break;
661 case DMA_MEM_TO_DEV:
662 case DMA_MEM_TO_MEM:
663 ring = uc->tchan->t_ring;
664 break;
665 default:
666 return -EINVAL;
667 }
668
669 /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */
670 if (idx == -1) {
671 paddr = udma_get_rx_flush_hwdesc_paddr(uc);
672 } else {
673 paddr = udma_curr_cppi5_desc_paddr(d, idx);
674
675 wmb(); /* Ensure that writes are not moved over this point */
676 }
677
678 return k3_ringacc_ring_push(ring, &paddr);
679 }
680
udma_desc_is_rx_flush(struct udma_chan * uc,dma_addr_t addr)681 static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr)
682 {
683 if (uc->config.dir != DMA_DEV_TO_MEM)
684 return false;
685
686 if (addr == udma_get_rx_flush_hwdesc_paddr(uc))
687 return true;
688
689 return false;
690 }
691
udma_pop_from_ring(struct udma_chan * uc,dma_addr_t * addr)692 static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
693 {
694 struct k3_ring *ring = NULL;
695 int ret;
696
697 switch (uc->config.dir) {
698 case DMA_DEV_TO_MEM:
699 ring = uc->rflow->r_ring;
700 break;
701 case DMA_MEM_TO_DEV:
702 case DMA_MEM_TO_MEM:
703 ring = uc->tchan->tc_ring;
704 break;
705 default:
706 return -ENOENT;
707 }
708
709 ret = k3_ringacc_ring_pop(ring, addr);
710 if (ret)
711 return ret;
712
713 rmb(); /* Ensure that reads are not moved before this point */
714
715 /* Teardown completion */
716 if (cppi5_desc_is_tdcm(*addr))
717 return 0;
718
719 /* Check for flush descriptor */
720 if (udma_desc_is_rx_flush(uc, *addr))
721 return -ENOENT;
722
723 return 0;
724 }
725
udma_reset_rings(struct udma_chan * uc)726 static void udma_reset_rings(struct udma_chan *uc)
727 {
728 struct k3_ring *ring1 = NULL;
729 struct k3_ring *ring2 = NULL;
730
731 switch (uc->config.dir) {
732 case DMA_DEV_TO_MEM:
733 if (uc->rchan) {
734 ring1 = uc->rflow->fd_ring;
735 ring2 = uc->rflow->r_ring;
736 }
737 break;
738 case DMA_MEM_TO_DEV:
739 case DMA_MEM_TO_MEM:
740 if (uc->tchan) {
741 ring1 = uc->tchan->t_ring;
742 ring2 = uc->tchan->tc_ring;
743 }
744 break;
745 default:
746 break;
747 }
748
749 if (ring1)
750 k3_ringacc_ring_reset_dma(ring1,
751 k3_ringacc_ring_get_occ(ring1));
752 if (ring2)
753 k3_ringacc_ring_reset(ring2);
754
755 /* make sure we are not leaking memory by stalled descriptor */
756 if (uc->terminated_desc) {
757 udma_desc_free(&uc->terminated_desc->vd);
758 uc->terminated_desc = NULL;
759 }
760 }
761
udma_decrement_byte_counters(struct udma_chan * uc,u32 val)762 static void udma_decrement_byte_counters(struct udma_chan *uc, u32 val)
763 {
764 if (uc->desc->dir == DMA_DEV_TO_MEM) {
765 udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
766 udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
767 if (uc->config.ep_type != PSIL_EP_NATIVE)
768 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
769 } else {
770 udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
771 udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
772 if (!uc->bchan && uc->config.ep_type != PSIL_EP_NATIVE)
773 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
774 }
775 }
776
udma_reset_counters(struct udma_chan * uc)777 static void udma_reset_counters(struct udma_chan *uc)
778 {
779 u32 val;
780
781 if (uc->tchan) {
782 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
783 udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
784
785 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
786 udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
787
788 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
789 udma_tchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
790
791 if (!uc->bchan) {
792 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
793 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
794 }
795 }
796
797 if (uc->rchan) {
798 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
799 udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
800
801 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
802 udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
803
804 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
805 udma_rchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
806
807 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
808 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
809 }
810 }
811
udma_reset_chan(struct udma_chan * uc,bool hard)812 static int udma_reset_chan(struct udma_chan *uc, bool hard)
813 {
814 switch (uc->config.dir) {
815 case DMA_DEV_TO_MEM:
816 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
817 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
818 break;
819 case DMA_MEM_TO_DEV:
820 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
821 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
822 break;
823 case DMA_MEM_TO_MEM:
824 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
825 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
826 break;
827 default:
828 return -EINVAL;
829 }
830
831 /* Reset all counters */
832 udma_reset_counters(uc);
833
834 /* Hard reset: re-initialize the channel to reset */
835 if (hard) {
836 struct udma_chan_config ucc_backup;
837 int ret;
838
839 memcpy(&ucc_backup, &uc->config, sizeof(uc->config));
840 uc->ud->ddev.device_free_chan_resources(&uc->vc.chan);
841
842 /* restore the channel configuration */
843 memcpy(&uc->config, &ucc_backup, sizeof(uc->config));
844 ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan);
845 if (ret)
846 return ret;
847
848 /*
849 * Setting forced teardown after forced reset helps recovering
850 * the rchan.
851 */
852 if (uc->config.dir == DMA_DEV_TO_MEM)
853 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
854 UDMA_CHAN_RT_CTL_EN |
855 UDMA_CHAN_RT_CTL_TDOWN |
856 UDMA_CHAN_RT_CTL_FTDOWN);
857 }
858 uc->state = UDMA_CHAN_IS_IDLE;
859
860 return 0;
861 }
862
udma_start_desc(struct udma_chan * uc)863 static void udma_start_desc(struct udma_chan *uc)
864 {
865 struct udma_chan_config *ucc = &uc->config;
866
867 if (uc->ud->match_data->type == DMA_TYPE_UDMA && ucc->pkt_mode &&
868 (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) {
869 int i;
870
871 /*
872 * UDMA only: Push all descriptors to ring for packet mode
873 * cyclic or RX
874 * PKTDMA supports pre-linked descriptor and cyclic is not
875 * supported
876 */
877 for (i = 0; i < uc->desc->sglen; i++)
878 udma_push_to_ring(uc, i);
879 } else {
880 udma_push_to_ring(uc, 0);
881 }
882 }
883
udma_chan_needs_reconfiguration(struct udma_chan * uc)884 static bool udma_chan_needs_reconfiguration(struct udma_chan *uc)
885 {
886 /* Only PDMAs have staticTR */
887 if (uc->config.ep_type == PSIL_EP_NATIVE)
888 return false;
889
890 /* Check if the staticTR configuration has changed for TX */
891 if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr)))
892 return true;
893
894 return false;
895 }
896
udma_start(struct udma_chan * uc)897 static int udma_start(struct udma_chan *uc)
898 {
899 struct virt_dma_desc *vd = vchan_next_desc(&uc->vc);
900
901 if (!vd) {
902 uc->desc = NULL;
903 return -ENOENT;
904 }
905
906 list_del(&vd->node);
907
908 uc->desc = to_udma_desc(&vd->tx);
909
910 /* Channel is already running and does not need reconfiguration */
911 if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) {
912 udma_start_desc(uc);
913 goto out;
914 }
915
916 /* Make sure that we clear the teardown bit, if it is set */
917 udma_reset_chan(uc, false);
918
919 /* Push descriptors before we start the channel */
920 udma_start_desc(uc);
921
922 switch (uc->desc->dir) {
923 case DMA_DEV_TO_MEM:
924 /* Config remote TR */
925 if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
926 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
927 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
928 const struct udma_match_data *match_data =
929 uc->ud->match_data;
930
931 if (uc->config.enable_acc32)
932 val |= PDMA_STATIC_TR_XY_ACC32;
933 if (uc->config.enable_burst)
934 val |= PDMA_STATIC_TR_XY_BURST;
935
936 udma_rchanrt_write(uc,
937 UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG,
938 val);
939
940 udma_rchanrt_write(uc,
941 UDMA_CHAN_RT_PEER_STATIC_TR_Z_REG,
942 PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt,
943 match_data->statictr_z_mask));
944
945 /* save the current staticTR configuration */
946 memcpy(&uc->static_tr, &uc->desc->static_tr,
947 sizeof(uc->static_tr));
948 }
949
950 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
951 UDMA_CHAN_RT_CTL_EN);
952
953 /* Enable remote */
954 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
955 UDMA_PEER_RT_EN_ENABLE);
956
957 break;
958 case DMA_MEM_TO_DEV:
959 /* Config remote TR */
960 if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
961 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
962 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
963
964 if (uc->config.enable_acc32)
965 val |= PDMA_STATIC_TR_XY_ACC32;
966 if (uc->config.enable_burst)
967 val |= PDMA_STATIC_TR_XY_BURST;
968
969 udma_tchanrt_write(uc,
970 UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG,
971 val);
972
973 /* save the current staticTR configuration */
974 memcpy(&uc->static_tr, &uc->desc->static_tr,
975 sizeof(uc->static_tr));
976 }
977
978 /* Enable remote */
979 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
980 UDMA_PEER_RT_EN_ENABLE);
981
982 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
983 UDMA_CHAN_RT_CTL_EN);
984
985 break;
986 case DMA_MEM_TO_MEM:
987 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
988 UDMA_CHAN_RT_CTL_EN);
989 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
990 UDMA_CHAN_RT_CTL_EN);
991
992 break;
993 default:
994 return -EINVAL;
995 }
996
997 uc->state = UDMA_CHAN_IS_ACTIVE;
998 out:
999
1000 return 0;
1001 }
1002
udma_stop(struct udma_chan * uc)1003 static int udma_stop(struct udma_chan *uc)
1004 {
1005 enum udma_chan_state old_state = uc->state;
1006
1007 uc->state = UDMA_CHAN_IS_TERMINATING;
1008 reinit_completion(&uc->teardown_completed);
1009
1010 switch (uc->config.dir) {
1011 case DMA_DEV_TO_MEM:
1012 if (!uc->cyclic && !uc->desc)
1013 udma_push_to_ring(uc, -1);
1014
1015 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
1016 UDMA_PEER_RT_EN_ENABLE |
1017 UDMA_PEER_RT_EN_TEARDOWN);
1018 break;
1019 case DMA_MEM_TO_DEV:
1020 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
1021 UDMA_PEER_RT_EN_ENABLE |
1022 UDMA_PEER_RT_EN_FLUSH);
1023 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
1024 UDMA_CHAN_RT_CTL_EN |
1025 UDMA_CHAN_RT_CTL_TDOWN);
1026 break;
1027 case DMA_MEM_TO_MEM:
1028 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
1029 UDMA_CHAN_RT_CTL_EN |
1030 UDMA_CHAN_RT_CTL_TDOWN);
1031 break;
1032 default:
1033 uc->state = old_state;
1034 complete_all(&uc->teardown_completed);
1035 return -EINVAL;
1036 }
1037
1038 return 0;
1039 }
1040
udma_cyclic_packet_elapsed(struct udma_chan * uc)1041 static void udma_cyclic_packet_elapsed(struct udma_chan *uc)
1042 {
1043 struct udma_desc *d = uc->desc;
1044 struct cppi5_host_desc_t *h_desc;
1045
1046 h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr;
1047 cppi5_hdesc_reset_to_original(h_desc);
1048 udma_push_to_ring(uc, d->desc_idx);
1049 d->desc_idx = (d->desc_idx + 1) % d->sglen;
1050 }
1051
udma_fetch_epib(struct udma_chan * uc,struct udma_desc * d)1052 static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d)
1053 {
1054 struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr;
1055
1056 memcpy(d->metadata, h_desc->epib, d->metadata_size);
1057 }
1058
udma_is_desc_really_done(struct udma_chan * uc,struct udma_desc * d)1059 static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
1060 {
1061 u32 peer_bcnt, bcnt;
1062
1063 /*
1064 * Only TX towards PDMA is affected.
1065 * If DMA_PREP_INTERRUPT is not set by consumer then skip the transfer
1066 * completion calculation, consumer must ensure that there is no stale
1067 * data in DMA fabric in this case.
1068 */
1069 if (uc->config.ep_type == PSIL_EP_NATIVE ||
1070 uc->config.dir != DMA_MEM_TO_DEV || !(uc->config.tx_flags & DMA_PREP_INTERRUPT))
1071 return true;
1072
1073 peer_bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
1074 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
1075
1076 /* Transfer is incomplete, store current residue and time stamp */
1077 if (peer_bcnt < bcnt) {
1078 uc->tx_drain.residue = bcnt - peer_bcnt;
1079 uc->tx_drain.tstamp = ktime_get();
1080 return false;
1081 }
1082
1083 return true;
1084 }
1085
udma_check_tx_completion(struct work_struct * work)1086 static void udma_check_tx_completion(struct work_struct *work)
1087 {
1088 struct udma_chan *uc = container_of(work, typeof(*uc),
1089 tx_drain.work.work);
1090 bool desc_done = true;
1091 u32 residue_diff;
1092 ktime_t time_diff;
1093 unsigned long delay;
1094
1095 while (1) {
1096 if (uc->desc) {
1097 /* Get previous residue and time stamp */
1098 residue_diff = uc->tx_drain.residue;
1099 time_diff = uc->tx_drain.tstamp;
1100 /*
1101 * Get current residue and time stamp or see if
1102 * transfer is complete
1103 */
1104 desc_done = udma_is_desc_really_done(uc, uc->desc);
1105 }
1106
1107 if (!desc_done) {
1108 /*
1109 * Find the time delta and residue delta w.r.t
1110 * previous poll
1111 */
1112 time_diff = ktime_sub(uc->tx_drain.tstamp,
1113 time_diff) + 1;
1114 residue_diff -= uc->tx_drain.residue;
1115 if (residue_diff) {
1116 /*
1117 * Try to guess when we should check
1118 * next time by calculating rate at
1119 * which data is being drained at the
1120 * peer device
1121 */
1122 delay = (time_diff / residue_diff) *
1123 uc->tx_drain.residue;
1124 } else {
1125 /* No progress, check again in 1 second */
1126 schedule_delayed_work(&uc->tx_drain.work, HZ);
1127 break;
1128 }
1129
1130 usleep_range(ktime_to_us(delay),
1131 ktime_to_us(delay) + 10);
1132 continue;
1133 }
1134
1135 if (uc->desc) {
1136 struct udma_desc *d = uc->desc;
1137
1138 udma_decrement_byte_counters(uc, d->residue);
1139 udma_start(uc);
1140 vchan_cookie_complete(&d->vd);
1141 break;
1142 }
1143
1144 break;
1145 }
1146 }
1147
udma_ring_irq_handler(int irq,void * data)1148 static irqreturn_t udma_ring_irq_handler(int irq, void *data)
1149 {
1150 struct udma_chan *uc = data;
1151 struct udma_desc *d;
1152 dma_addr_t paddr = 0;
1153
1154 if (udma_pop_from_ring(uc, &paddr) || !paddr)
1155 return IRQ_HANDLED;
1156
1157 spin_lock(&uc->vc.lock);
1158
1159 /* Teardown completion message */
1160 if (cppi5_desc_is_tdcm(paddr)) {
1161 complete_all(&uc->teardown_completed);
1162
1163 if (uc->terminated_desc) {
1164 udma_desc_free(&uc->terminated_desc->vd);
1165 uc->terminated_desc = NULL;
1166 }
1167
1168 if (!uc->desc)
1169 udma_start(uc);
1170
1171 goto out;
1172 }
1173
1174 d = udma_udma_desc_from_paddr(uc, paddr);
1175
1176 if (d) {
1177 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
1178 d->desc_idx);
1179 if (desc_paddr != paddr) {
1180 dev_err(uc->ud->dev, "not matching descriptors!\n");
1181 goto out;
1182 }
1183
1184 if (d == uc->desc) {
1185 /* active descriptor */
1186 if (uc->cyclic) {
1187 udma_cyclic_packet_elapsed(uc);
1188 vchan_cyclic_callback(&d->vd);
1189 } else {
1190 if (udma_is_desc_really_done(uc, d)) {
1191 udma_decrement_byte_counters(uc, d->residue);
1192 udma_start(uc);
1193 vchan_cookie_complete(&d->vd);
1194 } else {
1195 schedule_delayed_work(&uc->tx_drain.work,
1196 0);
1197 }
1198 }
1199 } else {
1200 /*
1201 * terminated descriptor, mark the descriptor as
1202 * completed to update the channel's cookie marker
1203 */
1204 dma_cookie_complete(&d->vd.tx);
1205 }
1206 }
1207 out:
1208 spin_unlock(&uc->vc.lock);
1209
1210 return IRQ_HANDLED;
1211 }
1212
udma_udma_irq_handler(int irq,void * data)1213 static irqreturn_t udma_udma_irq_handler(int irq, void *data)
1214 {
1215 struct udma_chan *uc = data;
1216 struct udma_desc *d;
1217
1218 spin_lock(&uc->vc.lock);
1219 d = uc->desc;
1220 if (d) {
1221 d->tr_idx = (d->tr_idx + 1) % d->sglen;
1222
1223 if (uc->cyclic) {
1224 vchan_cyclic_callback(&d->vd);
1225 } else {
1226 /* TODO: figure out the real amount of data */
1227 udma_decrement_byte_counters(uc, d->residue);
1228 udma_start(uc);
1229 vchan_cookie_complete(&d->vd);
1230 }
1231 }
1232
1233 spin_unlock(&uc->vc.lock);
1234
1235 return IRQ_HANDLED;
1236 }
1237
1238 /**
1239 * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
1240 * @ud: UDMA device
1241 * @from: Start the search from this flow id number
1242 * @cnt: Number of consecutive flow ids to allocate
1243 *
1244 * Allocate range of RX flow ids for future use, those flows can be requested
1245 * only using explicit flow id number. if @from is set to -1 it will try to find
1246 * first free range. if @from is positive value it will force allocation only
1247 * of the specified range of flows.
1248 *
1249 * Returns -ENOMEM if can't find free range.
1250 * -EEXIST if requested range is busy.
1251 * -EINVAL if wrong input values passed.
1252 * Returns flow id on success.
1253 */
__udma_alloc_gp_rflow_range(struct udma_dev * ud,int from,int cnt)1254 static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1255 {
1256 int start, tmp_from;
1257 DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
1258
1259 tmp_from = from;
1260 if (tmp_from < 0)
1261 tmp_from = ud->rchan_cnt;
1262 /* default flows can't be allocated and accessible only by id */
1263 if (tmp_from < ud->rchan_cnt)
1264 return -EINVAL;
1265
1266 if (tmp_from + cnt > ud->rflow_cnt)
1267 return -EINVAL;
1268
1269 bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated,
1270 ud->rflow_cnt);
1271
1272 start = bitmap_find_next_zero_area(tmp,
1273 ud->rflow_cnt,
1274 tmp_from, cnt, 0);
1275 if (start >= ud->rflow_cnt)
1276 return -ENOMEM;
1277
1278 if (from >= 0 && start != from)
1279 return -EEXIST;
1280
1281 bitmap_set(ud->rflow_gp_map_allocated, start, cnt);
1282 return start;
1283 }
1284
__udma_free_gp_rflow_range(struct udma_dev * ud,int from,int cnt)1285 static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1286 {
1287 if (from < ud->rchan_cnt)
1288 return -EINVAL;
1289 if (from + cnt > ud->rflow_cnt)
1290 return -EINVAL;
1291
1292 bitmap_clear(ud->rflow_gp_map_allocated, from, cnt);
1293 return 0;
1294 }
1295
__udma_get_rflow(struct udma_dev * ud,int id)1296 static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
1297 {
1298 /*
1299 * Attempt to request rflow by ID can be made for any rflow
1300 * if not in use with assumption that caller knows what's doing.
1301 * TI-SCI FW will perform additional permission check ant way, it's
1302 * safe
1303 */
1304
1305 if (id < 0 || id >= ud->rflow_cnt)
1306 return ERR_PTR(-ENOENT);
1307
1308 if (test_bit(id, ud->rflow_in_use))
1309 return ERR_PTR(-ENOENT);
1310
1311 if (ud->rflow_gp_map) {
1312 /* GP rflow has to be allocated first */
1313 if (!test_bit(id, ud->rflow_gp_map) &&
1314 !test_bit(id, ud->rflow_gp_map_allocated))
1315 return ERR_PTR(-EINVAL);
1316 }
1317
1318 dev_dbg(ud->dev, "get rflow%d\n", id);
1319 set_bit(id, ud->rflow_in_use);
1320 return &ud->rflows[id];
1321 }
1322
__udma_put_rflow(struct udma_dev * ud,struct udma_rflow * rflow)1323 static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow)
1324 {
1325 if (!test_bit(rflow->id, ud->rflow_in_use)) {
1326 dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id);
1327 return;
1328 }
1329
1330 dev_dbg(ud->dev, "put rflow%d\n", rflow->id);
1331 clear_bit(rflow->id, ud->rflow_in_use);
1332 }
1333
1334 #define UDMA_RESERVE_RESOURCE(res) \
1335 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
1336 enum udma_tp_level tpl, \
1337 int id) \
1338 { \
1339 if (id >= 0) { \
1340 if (test_bit(id, ud->res##_map)) { \
1341 dev_err(ud->dev, "res##%d is in use\n", id); \
1342 return ERR_PTR(-ENOENT); \
1343 } \
1344 } else { \
1345 int start; \
1346 \
1347 if (tpl >= ud->res##_tpl.levels) \
1348 tpl = ud->res##_tpl.levels - 1; \
1349 \
1350 start = ud->res##_tpl.start_idx[tpl]; \
1351 \
1352 id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \
1353 start); \
1354 if (id == ud->res##_cnt) { \
1355 return ERR_PTR(-ENOENT); \
1356 } \
1357 } \
1358 \
1359 set_bit(id, ud->res##_map); \
1360 return &ud->res##s[id]; \
1361 }
1362
1363 UDMA_RESERVE_RESOURCE(bchan);
1364 UDMA_RESERVE_RESOURCE(tchan);
1365 UDMA_RESERVE_RESOURCE(rchan);
1366
bcdma_get_bchan(struct udma_chan * uc)1367 static int bcdma_get_bchan(struct udma_chan *uc)
1368 {
1369 struct udma_dev *ud = uc->ud;
1370 enum udma_tp_level tpl;
1371 int ret;
1372
1373 if (uc->bchan) {
1374 dev_dbg(ud->dev, "chan%d: already have bchan%d allocated\n",
1375 uc->id, uc->bchan->id);
1376 return 0;
1377 }
1378
1379 /*
1380 * Use normal channels for peripherals, and highest TPL channel for
1381 * mem2mem
1382 */
1383 if (uc->config.tr_trigger_type)
1384 tpl = 0;
1385 else
1386 tpl = ud->bchan_tpl.levels - 1;
1387
1388 uc->bchan = __udma_reserve_bchan(ud, tpl, -1);
1389 if (IS_ERR(uc->bchan)) {
1390 ret = PTR_ERR(uc->bchan);
1391 uc->bchan = NULL;
1392 return ret;
1393 }
1394
1395 uc->tchan = uc->bchan;
1396
1397 return 0;
1398 }
1399
udma_get_tchan(struct udma_chan * uc)1400 static int udma_get_tchan(struct udma_chan *uc)
1401 {
1402 struct udma_dev *ud = uc->ud;
1403 int ret;
1404
1405 if (uc->tchan) {
1406 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
1407 uc->id, uc->tchan->id);
1408 return 0;
1409 }
1410
1411 /*
1412 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
1413 * For PKTDMA mapped channels it is configured to a channel which must
1414 * be used to service the peripheral.
1415 */
1416 uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl,
1417 uc->config.mapped_channel_id);
1418 if (IS_ERR(uc->tchan)) {
1419 ret = PTR_ERR(uc->tchan);
1420 uc->tchan = NULL;
1421 return ret;
1422 }
1423
1424 if (ud->tflow_cnt) {
1425 int tflow_id;
1426
1427 /* Only PKTDMA have support for tx flows */
1428 if (uc->config.default_flow_id >= 0)
1429 tflow_id = uc->config.default_flow_id;
1430 else
1431 tflow_id = uc->tchan->id;
1432
1433 if (test_bit(tflow_id, ud->tflow_map)) {
1434 dev_err(ud->dev, "tflow%d is in use\n", tflow_id);
1435 clear_bit(uc->tchan->id, ud->tchan_map);
1436 uc->tchan = NULL;
1437 return -ENOENT;
1438 }
1439
1440 uc->tchan->tflow_id = tflow_id;
1441 set_bit(tflow_id, ud->tflow_map);
1442 } else {
1443 uc->tchan->tflow_id = -1;
1444 }
1445
1446 return 0;
1447 }
1448
udma_get_rchan(struct udma_chan * uc)1449 static int udma_get_rchan(struct udma_chan *uc)
1450 {
1451 struct udma_dev *ud = uc->ud;
1452 int ret;
1453
1454 if (uc->rchan) {
1455 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
1456 uc->id, uc->rchan->id);
1457 return 0;
1458 }
1459
1460 /*
1461 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
1462 * For PKTDMA mapped channels it is configured to a channel which must
1463 * be used to service the peripheral.
1464 */
1465 uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl,
1466 uc->config.mapped_channel_id);
1467 if (IS_ERR(uc->rchan)) {
1468 ret = PTR_ERR(uc->rchan);
1469 uc->rchan = NULL;
1470 return ret;
1471 }
1472
1473 return 0;
1474 }
1475
udma_get_chan_pair(struct udma_chan * uc)1476 static int udma_get_chan_pair(struct udma_chan *uc)
1477 {
1478 struct udma_dev *ud = uc->ud;
1479 int chan_id, end;
1480
1481 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
1482 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
1483 uc->id, uc->tchan->id);
1484 return 0;
1485 }
1486
1487 if (uc->tchan) {
1488 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
1489 uc->id, uc->tchan->id);
1490 return -EBUSY;
1491 } else if (uc->rchan) {
1492 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
1493 uc->id, uc->rchan->id);
1494 return -EBUSY;
1495 }
1496
1497 /* Can be optimized, but let's have it like this for now */
1498 end = min(ud->tchan_cnt, ud->rchan_cnt);
1499 /*
1500 * Try to use the highest TPL channel pair for MEM_TO_MEM channels
1501 * Note: in UDMAP the channel TPL is symmetric between tchan and rchan
1502 */
1503 chan_id = ud->tchan_tpl.start_idx[ud->tchan_tpl.levels - 1];
1504 for (; chan_id < end; chan_id++) {
1505 if (!test_bit(chan_id, ud->tchan_map) &&
1506 !test_bit(chan_id, ud->rchan_map))
1507 break;
1508 }
1509
1510 if (chan_id == end)
1511 return -ENOENT;
1512
1513 set_bit(chan_id, ud->tchan_map);
1514 set_bit(chan_id, ud->rchan_map);
1515 uc->tchan = &ud->tchans[chan_id];
1516 uc->rchan = &ud->rchans[chan_id];
1517
1518 /* UDMA does not use tx flows */
1519 uc->tchan->tflow_id = -1;
1520
1521 return 0;
1522 }
1523
udma_get_rflow(struct udma_chan * uc,int flow_id)1524 static int udma_get_rflow(struct udma_chan *uc, int flow_id)
1525 {
1526 struct udma_dev *ud = uc->ud;
1527 int ret;
1528
1529 if (!uc->rchan) {
1530 dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
1531 return -EINVAL;
1532 }
1533
1534 if (uc->rflow) {
1535 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
1536 uc->id, uc->rflow->id);
1537 return 0;
1538 }
1539
1540 uc->rflow = __udma_get_rflow(ud, flow_id);
1541 if (IS_ERR(uc->rflow)) {
1542 ret = PTR_ERR(uc->rflow);
1543 uc->rflow = NULL;
1544 return ret;
1545 }
1546
1547 return 0;
1548 }
1549
bcdma_put_bchan(struct udma_chan * uc)1550 static void bcdma_put_bchan(struct udma_chan *uc)
1551 {
1552 struct udma_dev *ud = uc->ud;
1553
1554 if (uc->bchan) {
1555 dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id,
1556 uc->bchan->id);
1557 clear_bit(uc->bchan->id, ud->bchan_map);
1558 uc->bchan = NULL;
1559 uc->tchan = NULL;
1560 }
1561 }
1562
udma_put_rchan(struct udma_chan * uc)1563 static void udma_put_rchan(struct udma_chan *uc)
1564 {
1565 struct udma_dev *ud = uc->ud;
1566
1567 if (uc->rchan) {
1568 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
1569 uc->rchan->id);
1570 clear_bit(uc->rchan->id, ud->rchan_map);
1571 uc->rchan = NULL;
1572 }
1573 }
1574
udma_put_tchan(struct udma_chan * uc)1575 static void udma_put_tchan(struct udma_chan *uc)
1576 {
1577 struct udma_dev *ud = uc->ud;
1578
1579 if (uc->tchan) {
1580 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
1581 uc->tchan->id);
1582 clear_bit(uc->tchan->id, ud->tchan_map);
1583
1584 if (uc->tchan->tflow_id >= 0)
1585 clear_bit(uc->tchan->tflow_id, ud->tflow_map);
1586
1587 uc->tchan = NULL;
1588 }
1589 }
1590
udma_put_rflow(struct udma_chan * uc)1591 static void udma_put_rflow(struct udma_chan *uc)
1592 {
1593 struct udma_dev *ud = uc->ud;
1594
1595 if (uc->rflow) {
1596 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
1597 uc->rflow->id);
1598 __udma_put_rflow(ud, uc->rflow);
1599 uc->rflow = NULL;
1600 }
1601 }
1602
bcdma_free_bchan_resources(struct udma_chan * uc)1603 static void bcdma_free_bchan_resources(struct udma_chan *uc)
1604 {
1605 if (!uc->bchan)
1606 return;
1607
1608 k3_ringacc_ring_free(uc->bchan->tc_ring);
1609 k3_ringacc_ring_free(uc->bchan->t_ring);
1610 uc->bchan->tc_ring = NULL;
1611 uc->bchan->t_ring = NULL;
1612 k3_configure_chan_coherency(&uc->vc.chan, 0);
1613
1614 bcdma_put_bchan(uc);
1615 }
1616
bcdma_alloc_bchan_resources(struct udma_chan * uc)1617 static int bcdma_alloc_bchan_resources(struct udma_chan *uc)
1618 {
1619 struct k3_ring_cfg ring_cfg;
1620 struct udma_dev *ud = uc->ud;
1621 int ret;
1622
1623 ret = bcdma_get_bchan(uc);
1624 if (ret)
1625 return ret;
1626
1627 ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1,
1628 &uc->bchan->t_ring,
1629 &uc->bchan->tc_ring);
1630 if (ret) {
1631 ret = -EBUSY;
1632 goto err_ring;
1633 }
1634
1635 memset(&ring_cfg, 0, sizeof(ring_cfg));
1636 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1637 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1638 ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
1639
1640 k3_configure_chan_coherency(&uc->vc.chan, ud->asel);
1641 ring_cfg.asel = ud->asel;
1642 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
1643
1644 ret = k3_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg);
1645 if (ret)
1646 goto err_ringcfg;
1647
1648 return 0;
1649
1650 err_ringcfg:
1651 k3_ringacc_ring_free(uc->bchan->tc_ring);
1652 uc->bchan->tc_ring = NULL;
1653 k3_ringacc_ring_free(uc->bchan->t_ring);
1654 uc->bchan->t_ring = NULL;
1655 k3_configure_chan_coherency(&uc->vc.chan, 0);
1656 err_ring:
1657 bcdma_put_bchan(uc);
1658
1659 return ret;
1660 }
1661
udma_free_tx_resources(struct udma_chan * uc)1662 static void udma_free_tx_resources(struct udma_chan *uc)
1663 {
1664 if (!uc->tchan)
1665 return;
1666
1667 k3_ringacc_ring_free(uc->tchan->t_ring);
1668 k3_ringacc_ring_free(uc->tchan->tc_ring);
1669 uc->tchan->t_ring = NULL;
1670 uc->tchan->tc_ring = NULL;
1671
1672 udma_put_tchan(uc);
1673 }
1674
udma_alloc_tx_resources(struct udma_chan * uc)1675 static int udma_alloc_tx_resources(struct udma_chan *uc)
1676 {
1677 struct k3_ring_cfg ring_cfg;
1678 struct udma_dev *ud = uc->ud;
1679 struct udma_tchan *tchan;
1680 int ring_idx, ret;
1681
1682 ret = udma_get_tchan(uc);
1683 if (ret)
1684 return ret;
1685
1686 tchan = uc->tchan;
1687 if (tchan->tflow_id >= 0)
1688 ring_idx = tchan->tflow_id;
1689 else
1690 ring_idx = ud->bchan_cnt + tchan->id;
1691
1692 ret = k3_ringacc_request_rings_pair(ud->ringacc, ring_idx, -1,
1693 &tchan->t_ring,
1694 &tchan->tc_ring);
1695 if (ret) {
1696 ret = -EBUSY;
1697 goto err_ring;
1698 }
1699
1700 memset(&ring_cfg, 0, sizeof(ring_cfg));
1701 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1702 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1703 if (ud->match_data->type == DMA_TYPE_UDMA) {
1704 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1705 } else {
1706 ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
1707
1708 k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel);
1709 ring_cfg.asel = uc->config.asel;
1710 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
1711 }
1712
1713 ret = k3_ringacc_ring_cfg(tchan->t_ring, &ring_cfg);
1714 ret |= k3_ringacc_ring_cfg(tchan->tc_ring, &ring_cfg);
1715
1716 if (ret)
1717 goto err_ringcfg;
1718
1719 return 0;
1720
1721 err_ringcfg:
1722 k3_ringacc_ring_free(uc->tchan->tc_ring);
1723 uc->tchan->tc_ring = NULL;
1724 k3_ringacc_ring_free(uc->tchan->t_ring);
1725 uc->tchan->t_ring = NULL;
1726 err_ring:
1727 udma_put_tchan(uc);
1728
1729 return ret;
1730 }
1731
udma_free_rx_resources(struct udma_chan * uc)1732 static void udma_free_rx_resources(struct udma_chan *uc)
1733 {
1734 if (!uc->rchan)
1735 return;
1736
1737 if (uc->rflow) {
1738 struct udma_rflow *rflow = uc->rflow;
1739
1740 k3_ringacc_ring_free(rflow->fd_ring);
1741 k3_ringacc_ring_free(rflow->r_ring);
1742 rflow->fd_ring = NULL;
1743 rflow->r_ring = NULL;
1744
1745 udma_put_rflow(uc);
1746 }
1747
1748 udma_put_rchan(uc);
1749 }
1750
udma_alloc_rx_resources(struct udma_chan * uc)1751 static int udma_alloc_rx_resources(struct udma_chan *uc)
1752 {
1753 struct udma_dev *ud = uc->ud;
1754 struct k3_ring_cfg ring_cfg;
1755 struct udma_rflow *rflow;
1756 int fd_ring_id;
1757 int ret;
1758
1759 ret = udma_get_rchan(uc);
1760 if (ret)
1761 return ret;
1762
1763 /* For MEM_TO_MEM we don't need rflow or rings */
1764 if (uc->config.dir == DMA_MEM_TO_MEM)
1765 return 0;
1766
1767 if (uc->config.default_flow_id >= 0)
1768 ret = udma_get_rflow(uc, uc->config.default_flow_id);
1769 else
1770 ret = udma_get_rflow(uc, uc->rchan->id);
1771
1772 if (ret) {
1773 ret = -EBUSY;
1774 goto err_rflow;
1775 }
1776
1777 rflow = uc->rflow;
1778 if (ud->tflow_cnt)
1779 fd_ring_id = ud->tflow_cnt + rflow->id;
1780 else
1781 fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt +
1782 uc->rchan->id;
1783
1784 ret = k3_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
1785 &rflow->fd_ring, &rflow->r_ring);
1786 if (ret) {
1787 ret = -EBUSY;
1788 goto err_ring;
1789 }
1790
1791 memset(&ring_cfg, 0, sizeof(ring_cfg));
1792
1793 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1794 if (ud->match_data->type == DMA_TYPE_UDMA) {
1795 if (uc->config.pkt_mode)
1796 ring_cfg.size = SG_MAX_SEGMENTS;
1797 else
1798 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1799
1800 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1801 } else {
1802 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1803 ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
1804
1805 k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel);
1806 ring_cfg.asel = uc->config.asel;
1807 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
1808 }
1809
1810 ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
1811
1812 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1813 ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
1814
1815 if (ret)
1816 goto err_ringcfg;
1817
1818 return 0;
1819
1820 err_ringcfg:
1821 k3_ringacc_ring_free(rflow->r_ring);
1822 rflow->r_ring = NULL;
1823 k3_ringacc_ring_free(rflow->fd_ring);
1824 rflow->fd_ring = NULL;
1825 err_ring:
1826 udma_put_rflow(uc);
1827 err_rflow:
1828 udma_put_rchan(uc);
1829
1830 return ret;
1831 }
1832
1833 #define TISCI_BCDMA_BCHAN_VALID_PARAMS ( \
1834 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1835 TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID)
1836
1837 #define TISCI_BCDMA_TCHAN_VALID_PARAMS ( \
1838 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1839 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID)
1840
1841 #define TISCI_BCDMA_RCHAN_VALID_PARAMS ( \
1842 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID)
1843
1844 #define TISCI_UDMA_TCHAN_VALID_PARAMS ( \
1845 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1846 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \
1847 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \
1848 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1849 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \
1850 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1851 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1852 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1853
1854 #define TISCI_UDMA_RCHAN_VALID_PARAMS ( \
1855 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1856 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1857 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1858 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1859 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \
1860 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \
1861 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \
1862 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \
1863 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1864
udma_tisci_m2m_channel_config(struct udma_chan * uc)1865 static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
1866 {
1867 struct udma_dev *ud = uc->ud;
1868 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1869 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1870 struct udma_tchan *tchan = uc->tchan;
1871 struct udma_rchan *rchan = uc->rchan;
1872 u8 burst_size = 0;
1873 int ret;
1874 u8 tpl;
1875
1876 /* Non synchronized - mem to mem type of transfer */
1877 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1878 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1879 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
1880
1881 if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) {
1882 tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, tchan->id);
1883
1884 burst_size = ud->match_data->burst_size[tpl];
1885 }
1886
1887 req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS;
1888 req_tx.nav_id = tisci_rm->tisci_dev_id;
1889 req_tx.index = tchan->id;
1890 req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1891 req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1892 req_tx.txcq_qnum = tc_ring;
1893 req_tx.tx_atype = ud->atype;
1894 if (burst_size) {
1895 req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
1896 req_tx.tx_burst_size = burst_size;
1897 }
1898
1899 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1900 if (ret) {
1901 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1902 return ret;
1903 }
1904
1905 req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS;
1906 req_rx.nav_id = tisci_rm->tisci_dev_id;
1907 req_rx.index = rchan->id;
1908 req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1909 req_rx.rxcq_qnum = tc_ring;
1910 req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1911 req_rx.rx_atype = ud->atype;
1912 if (burst_size) {
1913 req_rx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
1914 req_rx.rx_burst_size = burst_size;
1915 }
1916
1917 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
1918 if (ret)
1919 dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret);
1920
1921 return ret;
1922 }
1923
bcdma_tisci_m2m_channel_config(struct udma_chan * uc)1924 static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc)
1925 {
1926 struct udma_dev *ud = uc->ud;
1927 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1928 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1929 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1930 struct udma_bchan *bchan = uc->bchan;
1931 u8 burst_size = 0;
1932 int ret;
1933 u8 tpl;
1934
1935 if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) {
1936 tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, bchan->id);
1937
1938 burst_size = ud->match_data->burst_size[tpl];
1939 }
1940
1941 req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS;
1942 req_tx.nav_id = tisci_rm->tisci_dev_id;
1943 req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN;
1944 req_tx.index = bchan->id;
1945 if (burst_size) {
1946 req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
1947 req_tx.tx_burst_size = burst_size;
1948 }
1949
1950 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1951 if (ret)
1952 dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret);
1953
1954 return ret;
1955 }
1956
udma_tisci_tx_channel_config(struct udma_chan * uc)1957 static int udma_tisci_tx_channel_config(struct udma_chan *uc)
1958 {
1959 struct udma_dev *ud = uc->ud;
1960 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1961 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1962 struct udma_tchan *tchan = uc->tchan;
1963 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1964 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1965 u32 mode, fetch_size;
1966 int ret;
1967
1968 if (uc->config.pkt_mode) {
1969 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1970 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1971 uc->config.psd_size, 0);
1972 } else {
1973 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
1974 fetch_size = sizeof(struct cppi5_desc_hdr_t);
1975 }
1976
1977 req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS;
1978 req_tx.nav_id = tisci_rm->tisci_dev_id;
1979 req_tx.index = tchan->id;
1980 req_tx.tx_chan_type = mode;
1981 req_tx.tx_supr_tdpkt = uc->config.notdpkt;
1982 req_tx.tx_fetch_size = fetch_size >> 2;
1983 req_tx.txcq_qnum = tc_ring;
1984 req_tx.tx_atype = uc->config.atype;
1985 if (uc->config.ep_type == PSIL_EP_PDMA_XY &&
1986 ud->match_data->flags & UDMA_FLAG_TDTYPE) {
1987 /* wait for peer to complete the teardown for PDMAs */
1988 req_tx.valid_params |=
1989 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
1990 req_tx.tx_tdtype = 1;
1991 }
1992
1993 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1994 if (ret)
1995 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1996
1997 return ret;
1998 }
1999
bcdma_tisci_tx_channel_config(struct udma_chan * uc)2000 static int bcdma_tisci_tx_channel_config(struct udma_chan *uc)
2001 {
2002 struct udma_dev *ud = uc->ud;
2003 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2004 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2005 struct udma_tchan *tchan = uc->tchan;
2006 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
2007 int ret;
2008
2009 req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS;
2010 req_tx.nav_id = tisci_rm->tisci_dev_id;
2011 req_tx.index = tchan->id;
2012 req_tx.tx_supr_tdpkt = uc->config.notdpkt;
2013 if (ud->match_data->flags & UDMA_FLAG_TDTYPE) {
2014 /* wait for peer to complete the teardown for PDMAs */
2015 req_tx.valid_params |=
2016 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
2017 req_tx.tx_tdtype = 1;
2018 }
2019
2020 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
2021 if (ret)
2022 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
2023
2024 return ret;
2025 }
2026
2027 #define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config
2028
udma_tisci_rx_channel_config(struct udma_chan * uc)2029 static int udma_tisci_rx_channel_config(struct udma_chan *uc)
2030 {
2031 struct udma_dev *ud = uc->ud;
2032 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2033 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2034 struct udma_rchan *rchan = uc->rchan;
2035 int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring);
2036 int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2037 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2038 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
2039 u32 mode, fetch_size;
2040 int ret;
2041
2042 if (uc->config.pkt_mode) {
2043 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
2044 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
2045 uc->config.psd_size, 0);
2046 } else {
2047 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
2048 fetch_size = sizeof(struct cppi5_desc_hdr_t);
2049 }
2050
2051 req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS;
2052 req_rx.nav_id = tisci_rm->tisci_dev_id;
2053 req_rx.index = rchan->id;
2054 req_rx.rx_fetch_size = fetch_size >> 2;
2055 req_rx.rxcq_qnum = rx_ring;
2056 req_rx.rx_chan_type = mode;
2057 req_rx.rx_atype = uc->config.atype;
2058
2059 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2060 if (ret) {
2061 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
2062 return ret;
2063 }
2064
2065 flow_req.valid_params =
2066 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
2067 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
2068 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
2069 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
2070 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
2071 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
2072 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
2073 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
2074 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
2075 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
2076 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
2077 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
2078 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
2079
2080 flow_req.nav_id = tisci_rm->tisci_dev_id;
2081 flow_req.flow_index = rchan->id;
2082
2083 if (uc->config.needs_epib)
2084 flow_req.rx_einfo_present = 1;
2085 else
2086 flow_req.rx_einfo_present = 0;
2087 if (uc->config.psd_size)
2088 flow_req.rx_psinfo_present = 1;
2089 else
2090 flow_req.rx_psinfo_present = 0;
2091 flow_req.rx_error_handling = 1;
2092 flow_req.rx_dest_qnum = rx_ring;
2093 flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE;
2094 flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG;
2095 flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI;
2096 flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO;
2097 flow_req.rx_fdq0_sz0_qnum = fd_ring;
2098 flow_req.rx_fdq1_qnum = fd_ring;
2099 flow_req.rx_fdq2_qnum = fd_ring;
2100 flow_req.rx_fdq3_qnum = fd_ring;
2101
2102 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
2103
2104 if (ret)
2105 dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret);
2106
2107 return 0;
2108 }
2109
bcdma_tisci_rx_channel_config(struct udma_chan * uc)2110 static int bcdma_tisci_rx_channel_config(struct udma_chan *uc)
2111 {
2112 struct udma_dev *ud = uc->ud;
2113 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2114 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2115 struct udma_rchan *rchan = uc->rchan;
2116 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2117 int ret;
2118
2119 req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
2120 req_rx.nav_id = tisci_rm->tisci_dev_id;
2121 req_rx.index = rchan->id;
2122
2123 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2124 if (ret)
2125 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
2126
2127 return ret;
2128 }
2129
pktdma_tisci_rx_channel_config(struct udma_chan * uc)2130 static int pktdma_tisci_rx_channel_config(struct udma_chan *uc)
2131 {
2132 struct udma_dev *ud = uc->ud;
2133 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2134 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2135 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2136 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
2137 int ret;
2138
2139 req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
2140 req_rx.nav_id = tisci_rm->tisci_dev_id;
2141 req_rx.index = uc->rchan->id;
2142
2143 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2144 if (ret) {
2145 dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret);
2146 return ret;
2147 }
2148
2149 flow_req.valid_params =
2150 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
2151 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
2152 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID;
2153
2154 flow_req.nav_id = tisci_rm->tisci_dev_id;
2155 flow_req.flow_index = uc->rflow->id;
2156
2157 if (uc->config.needs_epib)
2158 flow_req.rx_einfo_present = 1;
2159 else
2160 flow_req.rx_einfo_present = 0;
2161 if (uc->config.psd_size)
2162 flow_req.rx_psinfo_present = 1;
2163 else
2164 flow_req.rx_psinfo_present = 0;
2165 flow_req.rx_error_handling = 1;
2166
2167 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
2168
2169 if (ret)
2170 dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id,
2171 ret);
2172
2173 return ret;
2174 }
2175
udma_alloc_chan_resources(struct dma_chan * chan)2176 static int udma_alloc_chan_resources(struct dma_chan *chan)
2177 {
2178 struct udma_chan *uc = to_udma_chan(chan);
2179 struct udma_dev *ud = to_udma_dev(chan->device);
2180 const struct udma_soc_data *soc_data = ud->soc_data;
2181 struct k3_ring *irq_ring;
2182 u32 irq_udma_idx;
2183 int ret;
2184
2185 uc->dma_dev = ud->dev;
2186
2187 if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) {
2188 uc->use_dma_pool = true;
2189 /* in case of MEM_TO_MEM we have maximum of two TRs */
2190 if (uc->config.dir == DMA_MEM_TO_MEM) {
2191 uc->config.hdesc_size = cppi5_trdesc_calc_size(
2192 sizeof(struct cppi5_tr_type15_t), 2);
2193 uc->config.pkt_mode = false;
2194 }
2195 }
2196
2197 if (uc->use_dma_pool) {
2198 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
2199 uc->config.hdesc_size,
2200 ud->desc_align,
2201 0);
2202 if (!uc->hdesc_pool) {
2203 dev_err(ud->ddev.dev,
2204 "Descriptor pool allocation failed\n");
2205 uc->use_dma_pool = false;
2206 ret = -ENOMEM;
2207 goto err_cleanup;
2208 }
2209 }
2210
2211 /*
2212 * Make sure that the completion is in a known state:
2213 * No teardown, the channel is idle
2214 */
2215 reinit_completion(&uc->teardown_completed);
2216 complete_all(&uc->teardown_completed);
2217 uc->state = UDMA_CHAN_IS_IDLE;
2218
2219 switch (uc->config.dir) {
2220 case DMA_MEM_TO_MEM:
2221 /* Non synchronized - mem to mem type of transfer */
2222 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
2223 uc->id);
2224
2225 ret = udma_get_chan_pair(uc);
2226 if (ret)
2227 goto err_cleanup;
2228
2229 ret = udma_alloc_tx_resources(uc);
2230 if (ret) {
2231 udma_put_rchan(uc);
2232 goto err_cleanup;
2233 }
2234
2235 ret = udma_alloc_rx_resources(uc);
2236 if (ret) {
2237 udma_free_tx_resources(uc);
2238 goto err_cleanup;
2239 }
2240
2241 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2242 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2243 K3_PSIL_DST_THREAD_ID_OFFSET;
2244
2245 irq_ring = uc->tchan->tc_ring;
2246 irq_udma_idx = uc->tchan->id;
2247
2248 ret = udma_tisci_m2m_channel_config(uc);
2249 break;
2250 case DMA_MEM_TO_DEV:
2251 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2252 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2253 uc->id);
2254
2255 ret = udma_alloc_tx_resources(uc);
2256 if (ret)
2257 goto err_cleanup;
2258
2259 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2260 uc->config.dst_thread = uc->config.remote_thread_id;
2261 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2262
2263 irq_ring = uc->tchan->tc_ring;
2264 irq_udma_idx = uc->tchan->id;
2265
2266 ret = udma_tisci_tx_channel_config(uc);
2267 break;
2268 case DMA_DEV_TO_MEM:
2269 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2270 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2271 uc->id);
2272
2273 ret = udma_alloc_rx_resources(uc);
2274 if (ret)
2275 goto err_cleanup;
2276
2277 uc->config.src_thread = uc->config.remote_thread_id;
2278 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2279 K3_PSIL_DST_THREAD_ID_OFFSET;
2280
2281 irq_ring = uc->rflow->r_ring;
2282 irq_udma_idx = soc_data->oes.udma_rchan + uc->rchan->id;
2283
2284 ret = udma_tisci_rx_channel_config(uc);
2285 break;
2286 default:
2287 /* Can not happen */
2288 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2289 __func__, uc->id, uc->config.dir);
2290 ret = -EINVAL;
2291 goto err_cleanup;
2292
2293 }
2294
2295 /* check if the channel configuration was successful */
2296 if (ret)
2297 goto err_res_free;
2298
2299 if (udma_is_chan_running(uc)) {
2300 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2301 udma_reset_chan(uc, false);
2302 if (udma_is_chan_running(uc)) {
2303 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2304 ret = -EBUSY;
2305 goto err_res_free;
2306 }
2307 }
2308
2309 /* PSI-L pairing */
2310 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
2311 if (ret) {
2312 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2313 uc->config.src_thread, uc->config.dst_thread);
2314 goto err_res_free;
2315 }
2316
2317 uc->psil_paired = true;
2318
2319 uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring);
2320 if (uc->irq_num_ring <= 0) {
2321 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2322 k3_ringacc_get_ring_id(irq_ring));
2323 ret = -EINVAL;
2324 goto err_psi_free;
2325 }
2326
2327 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
2328 IRQF_TRIGGER_HIGH, uc->name, uc);
2329 if (ret) {
2330 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2331 goto err_irq_free;
2332 }
2333
2334 /* Event from UDMA (TR events) only needed for slave TR mode channels */
2335 if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) {
2336 uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx);
2337 if (uc->irq_num_udma <= 0) {
2338 dev_err(ud->dev, "Failed to get udma irq (index: %u)\n",
2339 irq_udma_idx);
2340 free_irq(uc->irq_num_ring, uc);
2341 ret = -EINVAL;
2342 goto err_irq_free;
2343 }
2344
2345 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
2346 uc->name, uc);
2347 if (ret) {
2348 dev_err(ud->dev, "chan%d: UDMA irq request failed\n",
2349 uc->id);
2350 free_irq(uc->irq_num_ring, uc);
2351 goto err_irq_free;
2352 }
2353 } else {
2354 uc->irq_num_udma = 0;
2355 }
2356
2357 udma_reset_rings(uc);
2358
2359 return 0;
2360
2361 err_irq_free:
2362 uc->irq_num_ring = 0;
2363 uc->irq_num_udma = 0;
2364 err_psi_free:
2365 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
2366 uc->psil_paired = false;
2367 err_res_free:
2368 udma_free_tx_resources(uc);
2369 udma_free_rx_resources(uc);
2370 err_cleanup:
2371 udma_reset_uchan(uc);
2372
2373 if (uc->use_dma_pool) {
2374 dma_pool_destroy(uc->hdesc_pool);
2375 uc->use_dma_pool = false;
2376 }
2377
2378 return ret;
2379 }
2380
bcdma_alloc_chan_resources(struct dma_chan * chan)2381 static int bcdma_alloc_chan_resources(struct dma_chan *chan)
2382 {
2383 struct udma_chan *uc = to_udma_chan(chan);
2384 struct udma_dev *ud = to_udma_dev(chan->device);
2385 const struct udma_oes_offsets *oes = &ud->soc_data->oes;
2386 u32 irq_udma_idx, irq_ring_idx;
2387 int ret;
2388
2389 /* Only TR mode is supported */
2390 uc->config.pkt_mode = false;
2391
2392 /*
2393 * Make sure that the completion is in a known state:
2394 * No teardown, the channel is idle
2395 */
2396 reinit_completion(&uc->teardown_completed);
2397 complete_all(&uc->teardown_completed);
2398 uc->state = UDMA_CHAN_IS_IDLE;
2399
2400 switch (uc->config.dir) {
2401 case DMA_MEM_TO_MEM:
2402 /* Non synchronized - mem to mem type of transfer */
2403 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
2404 uc->id);
2405
2406 ret = bcdma_alloc_bchan_resources(uc);
2407 if (ret)
2408 return ret;
2409
2410 irq_ring_idx = uc->bchan->id + oes->bcdma_bchan_ring;
2411 irq_udma_idx = uc->bchan->id + oes->bcdma_bchan_data;
2412
2413 ret = bcdma_tisci_m2m_channel_config(uc);
2414 break;
2415 case DMA_MEM_TO_DEV:
2416 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2417 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2418 uc->id);
2419
2420 ret = udma_alloc_tx_resources(uc);
2421 if (ret) {
2422 uc->config.remote_thread_id = -1;
2423 return ret;
2424 }
2425
2426 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2427 uc->config.dst_thread = uc->config.remote_thread_id;
2428 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2429
2430 irq_ring_idx = uc->tchan->id + oes->bcdma_tchan_ring;
2431 irq_udma_idx = uc->tchan->id + oes->bcdma_tchan_data;
2432
2433 ret = bcdma_tisci_tx_channel_config(uc);
2434 break;
2435 case DMA_DEV_TO_MEM:
2436 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2437 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2438 uc->id);
2439
2440 ret = udma_alloc_rx_resources(uc);
2441 if (ret) {
2442 uc->config.remote_thread_id = -1;
2443 return ret;
2444 }
2445
2446 uc->config.src_thread = uc->config.remote_thread_id;
2447 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2448 K3_PSIL_DST_THREAD_ID_OFFSET;
2449
2450 irq_ring_idx = uc->rchan->id + oes->bcdma_rchan_ring;
2451 irq_udma_idx = uc->rchan->id + oes->bcdma_rchan_data;
2452
2453 ret = bcdma_tisci_rx_channel_config(uc);
2454 break;
2455 default:
2456 /* Can not happen */
2457 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2458 __func__, uc->id, uc->config.dir);
2459 return -EINVAL;
2460 }
2461
2462 /* check if the channel configuration was successful */
2463 if (ret)
2464 goto err_res_free;
2465
2466 if (udma_is_chan_running(uc)) {
2467 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2468 udma_reset_chan(uc, false);
2469 if (udma_is_chan_running(uc)) {
2470 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2471 ret = -EBUSY;
2472 goto err_res_free;
2473 }
2474 }
2475
2476 uc->dma_dev = dmaengine_get_dma_device(chan);
2477 if (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type) {
2478 uc->config.hdesc_size = cppi5_trdesc_calc_size(
2479 sizeof(struct cppi5_tr_type15_t), 2);
2480
2481 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
2482 uc->config.hdesc_size,
2483 ud->desc_align,
2484 0);
2485 if (!uc->hdesc_pool) {
2486 dev_err(ud->ddev.dev,
2487 "Descriptor pool allocation failed\n");
2488 uc->use_dma_pool = false;
2489 ret = -ENOMEM;
2490 goto err_res_free;
2491 }
2492
2493 uc->use_dma_pool = true;
2494 } else if (uc->config.dir != DMA_MEM_TO_MEM) {
2495 /* PSI-L pairing */
2496 ret = navss_psil_pair(ud, uc->config.src_thread,
2497 uc->config.dst_thread);
2498 if (ret) {
2499 dev_err(ud->dev,
2500 "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2501 uc->config.src_thread, uc->config.dst_thread);
2502 goto err_res_free;
2503 }
2504
2505 uc->psil_paired = true;
2506 }
2507
2508 uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx);
2509 if (uc->irq_num_ring <= 0) {
2510 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2511 irq_ring_idx);
2512 ret = -EINVAL;
2513 goto err_psi_free;
2514 }
2515
2516 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
2517 IRQF_TRIGGER_HIGH, uc->name, uc);
2518 if (ret) {
2519 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2520 goto err_irq_free;
2521 }
2522
2523 /* Event from BCDMA (TR events) only needed for slave channels */
2524 if (is_slave_direction(uc->config.dir)) {
2525 uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx);
2526 if (uc->irq_num_udma <= 0) {
2527 dev_err(ud->dev, "Failed to get bcdma irq (index: %u)\n",
2528 irq_udma_idx);
2529 free_irq(uc->irq_num_ring, uc);
2530 ret = -EINVAL;
2531 goto err_irq_free;
2532 }
2533
2534 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
2535 uc->name, uc);
2536 if (ret) {
2537 dev_err(ud->dev, "chan%d: BCDMA irq request failed\n",
2538 uc->id);
2539 free_irq(uc->irq_num_ring, uc);
2540 goto err_irq_free;
2541 }
2542 } else {
2543 uc->irq_num_udma = 0;
2544 }
2545
2546 udma_reset_rings(uc);
2547
2548 INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
2549 udma_check_tx_completion);
2550 return 0;
2551
2552 err_irq_free:
2553 uc->irq_num_ring = 0;
2554 uc->irq_num_udma = 0;
2555 err_psi_free:
2556 if (uc->psil_paired)
2557 navss_psil_unpair(ud, uc->config.src_thread,
2558 uc->config.dst_thread);
2559 uc->psil_paired = false;
2560 err_res_free:
2561 bcdma_free_bchan_resources(uc);
2562 udma_free_tx_resources(uc);
2563 udma_free_rx_resources(uc);
2564
2565 udma_reset_uchan(uc);
2566
2567 if (uc->use_dma_pool) {
2568 dma_pool_destroy(uc->hdesc_pool);
2569 uc->use_dma_pool = false;
2570 }
2571
2572 return ret;
2573 }
2574
bcdma_router_config(struct dma_chan * chan)2575 static int bcdma_router_config(struct dma_chan *chan)
2576 {
2577 struct k3_event_route_data *router_data = chan->route_data;
2578 struct udma_chan *uc = to_udma_chan(chan);
2579 u32 trigger_event;
2580
2581 if (!uc->bchan)
2582 return -EINVAL;
2583
2584 if (uc->config.tr_trigger_type != 1 && uc->config.tr_trigger_type != 2)
2585 return -EINVAL;
2586
2587 trigger_event = uc->ud->soc_data->bcdma_trigger_event_offset;
2588 trigger_event += (uc->bchan->id * 2) + uc->config.tr_trigger_type - 1;
2589
2590 return router_data->set_event(router_data->priv, trigger_event);
2591 }
2592
pktdma_alloc_chan_resources(struct dma_chan * chan)2593 static int pktdma_alloc_chan_resources(struct dma_chan *chan)
2594 {
2595 struct udma_chan *uc = to_udma_chan(chan);
2596 struct udma_dev *ud = to_udma_dev(chan->device);
2597 const struct udma_oes_offsets *oes = &ud->soc_data->oes;
2598 u32 irq_ring_idx;
2599 int ret;
2600
2601 /*
2602 * Make sure that the completion is in a known state:
2603 * No teardown, the channel is idle
2604 */
2605 reinit_completion(&uc->teardown_completed);
2606 complete_all(&uc->teardown_completed);
2607 uc->state = UDMA_CHAN_IS_IDLE;
2608
2609 switch (uc->config.dir) {
2610 case DMA_MEM_TO_DEV:
2611 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2612 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2613 uc->id);
2614
2615 ret = udma_alloc_tx_resources(uc);
2616 if (ret) {
2617 uc->config.remote_thread_id = -1;
2618 return ret;
2619 }
2620
2621 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2622 uc->config.dst_thread = uc->config.remote_thread_id;
2623 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2624
2625 irq_ring_idx = uc->tchan->tflow_id + oes->pktdma_tchan_flow;
2626
2627 ret = pktdma_tisci_tx_channel_config(uc);
2628 break;
2629 case DMA_DEV_TO_MEM:
2630 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2631 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2632 uc->id);
2633
2634 ret = udma_alloc_rx_resources(uc);
2635 if (ret) {
2636 uc->config.remote_thread_id = -1;
2637 return ret;
2638 }
2639
2640 uc->config.src_thread = uc->config.remote_thread_id;
2641 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2642 K3_PSIL_DST_THREAD_ID_OFFSET;
2643
2644 irq_ring_idx = uc->rflow->id + oes->pktdma_rchan_flow;
2645
2646 ret = pktdma_tisci_rx_channel_config(uc);
2647 break;
2648 default:
2649 /* Can not happen */
2650 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2651 __func__, uc->id, uc->config.dir);
2652 return -EINVAL;
2653 }
2654
2655 /* check if the channel configuration was successful */
2656 if (ret)
2657 goto err_res_free;
2658
2659 if (udma_is_chan_running(uc)) {
2660 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2661 udma_reset_chan(uc, false);
2662 if (udma_is_chan_running(uc)) {
2663 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2664 ret = -EBUSY;
2665 goto err_res_free;
2666 }
2667 }
2668
2669 uc->dma_dev = dmaengine_get_dma_device(chan);
2670 uc->hdesc_pool = dma_pool_create(uc->name, uc->dma_dev,
2671 uc->config.hdesc_size, ud->desc_align,
2672 0);
2673 if (!uc->hdesc_pool) {
2674 dev_err(ud->ddev.dev,
2675 "Descriptor pool allocation failed\n");
2676 uc->use_dma_pool = false;
2677 ret = -ENOMEM;
2678 goto err_res_free;
2679 }
2680
2681 uc->use_dma_pool = true;
2682
2683 /* PSI-L pairing */
2684 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
2685 if (ret) {
2686 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2687 uc->config.src_thread, uc->config.dst_thread);
2688 goto err_res_free;
2689 }
2690
2691 uc->psil_paired = true;
2692
2693 uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx);
2694 if (uc->irq_num_ring <= 0) {
2695 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2696 irq_ring_idx);
2697 ret = -EINVAL;
2698 goto err_psi_free;
2699 }
2700
2701 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
2702 IRQF_TRIGGER_HIGH, uc->name, uc);
2703 if (ret) {
2704 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2705 goto err_irq_free;
2706 }
2707
2708 uc->irq_num_udma = 0;
2709
2710 udma_reset_rings(uc);
2711
2712 INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
2713 udma_check_tx_completion);
2714
2715 if (uc->tchan)
2716 dev_dbg(ud->dev,
2717 "chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n",
2718 uc->id, uc->tchan->id, uc->tchan->tflow_id,
2719 uc->config.remote_thread_id);
2720 else if (uc->rchan)
2721 dev_dbg(ud->dev,
2722 "chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n",
2723 uc->id, uc->rchan->id, uc->rflow->id,
2724 uc->config.remote_thread_id);
2725 return 0;
2726
2727 err_irq_free:
2728 uc->irq_num_ring = 0;
2729 err_psi_free:
2730 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
2731 uc->psil_paired = false;
2732 err_res_free:
2733 udma_free_tx_resources(uc);
2734 udma_free_rx_resources(uc);
2735
2736 udma_reset_uchan(uc);
2737
2738 dma_pool_destroy(uc->hdesc_pool);
2739 uc->use_dma_pool = false;
2740
2741 return ret;
2742 }
2743
udma_slave_config(struct dma_chan * chan,struct dma_slave_config * cfg)2744 static int udma_slave_config(struct dma_chan *chan,
2745 struct dma_slave_config *cfg)
2746 {
2747 struct udma_chan *uc = to_udma_chan(chan);
2748
2749 memcpy(&uc->cfg, cfg, sizeof(uc->cfg));
2750
2751 return 0;
2752 }
2753
udma_alloc_tr_desc(struct udma_chan * uc,size_t tr_size,int tr_count,enum dma_transfer_direction dir)2754 static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc,
2755 size_t tr_size, int tr_count,
2756 enum dma_transfer_direction dir)
2757 {
2758 struct udma_hwdesc *hwdesc;
2759 struct cppi5_desc_hdr_t *tr_desc;
2760 struct udma_desc *d;
2761 u32 reload_count = 0;
2762 u32 ring_id;
2763
2764 switch (tr_size) {
2765 case 16:
2766 case 32:
2767 case 64:
2768 case 128:
2769 break;
2770 default:
2771 dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size);
2772 return NULL;
2773 }
2774
2775 /* We have only one descriptor containing multiple TRs */
2776 d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT);
2777 if (!d)
2778 return NULL;
2779
2780 d->sglen = tr_count;
2781
2782 d->hwdesc_count = 1;
2783 hwdesc = &d->hwdesc[0];
2784
2785 /* Allocate memory for DMA ring descriptor */
2786 if (uc->use_dma_pool) {
2787 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
2788 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
2789 GFP_NOWAIT,
2790 &hwdesc->cppi5_desc_paddr);
2791 } else {
2792 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size,
2793 tr_count);
2794 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
2795 uc->ud->desc_align);
2796 hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev,
2797 hwdesc->cppi5_desc_size,
2798 &hwdesc->cppi5_desc_paddr,
2799 GFP_NOWAIT);
2800 }
2801
2802 if (!hwdesc->cppi5_desc_vaddr) {
2803 kfree(d);
2804 return NULL;
2805 }
2806
2807 /* Start of the TR req records */
2808 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
2809 /* Start address of the TR response array */
2810 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count;
2811
2812 tr_desc = hwdesc->cppi5_desc_vaddr;
2813
2814 if (uc->cyclic)
2815 reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE;
2816
2817 if (dir == DMA_DEV_TO_MEM)
2818 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2819 else
2820 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
2821
2822 cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count);
2823 cppi5_desc_set_pktids(tr_desc, uc->id,
2824 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
2825 cppi5_desc_set_retpolicy(tr_desc, 0, ring_id);
2826
2827 return d;
2828 }
2829
2830 /**
2831 * udma_get_tr_counters - calculate TR counters for a given length
2832 * @len: Length of the trasnfer
2833 * @align_to: Preferred alignment
2834 * @tr0_cnt0: First TR icnt0
2835 * @tr0_cnt1: First TR icnt1
2836 * @tr1_cnt0: Second (if used) TR icnt0
2837 *
2838 * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated
2839 * For len >= SZ_64K two TRs are used in a simple way:
2840 * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1)
2841 * Second TR: the remaining length (tr1_cnt0)
2842 *
2843 * Returns the number of TRs the length needs (1 or 2)
2844 * -EINVAL if the length can not be supported
2845 */
udma_get_tr_counters(size_t len,unsigned long align_to,u16 * tr0_cnt0,u16 * tr0_cnt1,u16 * tr1_cnt0)2846 static int udma_get_tr_counters(size_t len, unsigned long align_to,
2847 u16 *tr0_cnt0, u16 *tr0_cnt1, u16 *tr1_cnt0)
2848 {
2849 if (len < SZ_64K) {
2850 *tr0_cnt0 = len;
2851 *tr0_cnt1 = 1;
2852
2853 return 1;
2854 }
2855
2856 if (align_to > 3)
2857 align_to = 3;
2858
2859 realign:
2860 *tr0_cnt0 = SZ_64K - BIT(align_to);
2861 if (len / *tr0_cnt0 >= SZ_64K) {
2862 if (align_to) {
2863 align_to--;
2864 goto realign;
2865 }
2866 return -EINVAL;
2867 }
2868
2869 *tr0_cnt1 = len / *tr0_cnt0;
2870 *tr1_cnt0 = len % *tr0_cnt0;
2871
2872 return 2;
2873 }
2874
2875 static struct udma_desc *
udma_prep_slave_sg_tr(struct udma_chan * uc,struct scatterlist * sgl,unsigned int sglen,enum dma_transfer_direction dir,unsigned long tx_flags,void * context)2876 udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
2877 unsigned int sglen, enum dma_transfer_direction dir,
2878 unsigned long tx_flags, void *context)
2879 {
2880 struct scatterlist *sgent;
2881 struct udma_desc *d;
2882 struct cppi5_tr_type1_t *tr_req = NULL;
2883 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
2884 unsigned int i;
2885 size_t tr_size;
2886 int num_tr = 0;
2887 int tr_idx = 0;
2888 u64 asel;
2889
2890 /* estimate the number of TRs we will need */
2891 for_each_sg(sgl, sgent, sglen, i) {
2892 if (sg_dma_len(sgent) < SZ_64K)
2893 num_tr++;
2894 else
2895 num_tr += 2;
2896 }
2897
2898 /* Now allocate and setup the descriptor. */
2899 tr_size = sizeof(struct cppi5_tr_type1_t);
2900 d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
2901 if (!d)
2902 return NULL;
2903
2904 d->sglen = sglen;
2905
2906 if (uc->ud->match_data->type == DMA_TYPE_UDMA)
2907 asel = 0;
2908 else
2909 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
2910
2911 tr_req = d->hwdesc[0].tr_req_base;
2912 for_each_sg(sgl, sgent, sglen, i) {
2913 dma_addr_t sg_addr = sg_dma_address(sgent);
2914
2915 num_tr = udma_get_tr_counters(sg_dma_len(sgent), __ffs(sg_addr),
2916 &tr0_cnt0, &tr0_cnt1, &tr1_cnt0);
2917 if (num_tr < 0) {
2918 dev_err(uc->ud->dev, "size %u is not supported\n",
2919 sg_dma_len(sgent));
2920 udma_free_hwdesc(uc, d);
2921 kfree(d);
2922 return NULL;
2923 }
2924
2925 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
2926 false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2927 cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
2928
2929 sg_addr |= asel;
2930 tr_req[tr_idx].addr = sg_addr;
2931 tr_req[tr_idx].icnt0 = tr0_cnt0;
2932 tr_req[tr_idx].icnt1 = tr0_cnt1;
2933 tr_req[tr_idx].dim1 = tr0_cnt0;
2934 tr_idx++;
2935
2936 if (num_tr == 2) {
2937 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
2938 false, false,
2939 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2940 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
2941 CPPI5_TR_CSF_SUPR_EVT);
2942
2943 tr_req[tr_idx].addr = sg_addr + tr0_cnt1 * tr0_cnt0;
2944 tr_req[tr_idx].icnt0 = tr1_cnt0;
2945 tr_req[tr_idx].icnt1 = 1;
2946 tr_req[tr_idx].dim1 = tr1_cnt0;
2947 tr_idx++;
2948 }
2949
2950 d->residue += sg_dma_len(sgent);
2951 }
2952
2953 cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
2954 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
2955
2956 return d;
2957 }
2958
2959 static struct udma_desc *
udma_prep_slave_sg_triggered_tr(struct udma_chan * uc,struct scatterlist * sgl,unsigned int sglen,enum dma_transfer_direction dir,unsigned long tx_flags,void * context)2960 udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl,
2961 unsigned int sglen,
2962 enum dma_transfer_direction dir,
2963 unsigned long tx_flags, void *context)
2964 {
2965 struct scatterlist *sgent;
2966 struct cppi5_tr_type15_t *tr_req = NULL;
2967 enum dma_slave_buswidth dev_width;
2968 u32 csf = CPPI5_TR_CSF_SUPR_EVT;
2969 u16 tr_cnt0, tr_cnt1;
2970 dma_addr_t dev_addr;
2971 struct udma_desc *d;
2972 unsigned int i;
2973 size_t tr_size, sg_len;
2974 int num_tr = 0;
2975 int tr_idx = 0;
2976 u32 burst, trigger_size, port_window;
2977 u64 asel;
2978
2979 if (dir == DMA_DEV_TO_MEM) {
2980 dev_addr = uc->cfg.src_addr;
2981 dev_width = uc->cfg.src_addr_width;
2982 burst = uc->cfg.src_maxburst;
2983 port_window = uc->cfg.src_port_window_size;
2984 } else if (dir == DMA_MEM_TO_DEV) {
2985 dev_addr = uc->cfg.dst_addr;
2986 dev_width = uc->cfg.dst_addr_width;
2987 burst = uc->cfg.dst_maxburst;
2988 port_window = uc->cfg.dst_port_window_size;
2989 } else {
2990 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
2991 return NULL;
2992 }
2993
2994 if (!burst)
2995 burst = 1;
2996
2997 if (port_window) {
2998 if (port_window != burst) {
2999 dev_err(uc->ud->dev,
3000 "The burst must be equal to port_window\n");
3001 return NULL;
3002 }
3003
3004 tr_cnt0 = dev_width * port_window;
3005 tr_cnt1 = 1;
3006 } else {
3007 tr_cnt0 = dev_width;
3008 tr_cnt1 = burst;
3009 }
3010 trigger_size = tr_cnt0 * tr_cnt1;
3011
3012 /* estimate the number of TRs we will need */
3013 for_each_sg(sgl, sgent, sglen, i) {
3014 sg_len = sg_dma_len(sgent);
3015
3016 if (sg_len % trigger_size) {
3017 dev_err(uc->ud->dev,
3018 "Not aligned SG entry (%zu for %u)\n", sg_len,
3019 trigger_size);
3020 return NULL;
3021 }
3022
3023 if (sg_len / trigger_size < SZ_64K)
3024 num_tr++;
3025 else
3026 num_tr += 2;
3027 }
3028
3029 /* Now allocate and setup the descriptor. */
3030 tr_size = sizeof(struct cppi5_tr_type15_t);
3031 d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
3032 if (!d)
3033 return NULL;
3034
3035 d->sglen = sglen;
3036
3037 if (uc->ud->match_data->type == DMA_TYPE_UDMA) {
3038 asel = 0;
3039 csf |= CPPI5_TR_CSF_EOL_ICNT0;
3040 } else {
3041 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
3042 dev_addr |= asel;
3043 }
3044
3045 tr_req = d->hwdesc[0].tr_req_base;
3046 for_each_sg(sgl, sgent, sglen, i) {
3047 u16 tr0_cnt2, tr0_cnt3, tr1_cnt2;
3048 dma_addr_t sg_addr = sg_dma_address(sgent);
3049
3050 sg_len = sg_dma_len(sgent);
3051 num_tr = udma_get_tr_counters(sg_len / trigger_size, 0,
3052 &tr0_cnt2, &tr0_cnt3, &tr1_cnt2);
3053 if (num_tr < 0) {
3054 dev_err(uc->ud->dev, "size %zu is not supported\n",
3055 sg_len);
3056 udma_free_hwdesc(uc, d);
3057 kfree(d);
3058 return NULL;
3059 }
3060
3061 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15, false,
3062 true, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3063 cppi5_tr_csf_set(&tr_req[tr_idx].flags, csf);
3064 cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
3065 uc->config.tr_trigger_type,
3066 CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, 0, 0);
3067
3068 sg_addr |= asel;
3069 if (dir == DMA_DEV_TO_MEM) {
3070 tr_req[tr_idx].addr = dev_addr;
3071 tr_req[tr_idx].icnt0 = tr_cnt0;
3072 tr_req[tr_idx].icnt1 = tr_cnt1;
3073 tr_req[tr_idx].icnt2 = tr0_cnt2;
3074 tr_req[tr_idx].icnt3 = tr0_cnt3;
3075 tr_req[tr_idx].dim1 = (-1) * tr_cnt0;
3076
3077 tr_req[tr_idx].daddr = sg_addr;
3078 tr_req[tr_idx].dicnt0 = tr_cnt0;
3079 tr_req[tr_idx].dicnt1 = tr_cnt1;
3080 tr_req[tr_idx].dicnt2 = tr0_cnt2;
3081 tr_req[tr_idx].dicnt3 = tr0_cnt3;
3082 tr_req[tr_idx].ddim1 = tr_cnt0;
3083 tr_req[tr_idx].ddim2 = trigger_size;
3084 tr_req[tr_idx].ddim3 = trigger_size * tr0_cnt2;
3085 } else {
3086 tr_req[tr_idx].addr = sg_addr;
3087 tr_req[tr_idx].icnt0 = tr_cnt0;
3088 tr_req[tr_idx].icnt1 = tr_cnt1;
3089 tr_req[tr_idx].icnt2 = tr0_cnt2;
3090 tr_req[tr_idx].icnt3 = tr0_cnt3;
3091 tr_req[tr_idx].dim1 = tr_cnt0;
3092 tr_req[tr_idx].dim2 = trigger_size;
3093 tr_req[tr_idx].dim3 = trigger_size * tr0_cnt2;
3094
3095 tr_req[tr_idx].daddr = dev_addr;
3096 tr_req[tr_idx].dicnt0 = tr_cnt0;
3097 tr_req[tr_idx].dicnt1 = tr_cnt1;
3098 tr_req[tr_idx].dicnt2 = tr0_cnt2;
3099 tr_req[tr_idx].dicnt3 = tr0_cnt3;
3100 tr_req[tr_idx].ddim1 = (-1) * tr_cnt0;
3101 }
3102
3103 tr_idx++;
3104
3105 if (num_tr == 2) {
3106 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15,
3107 false, true,
3108 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3109 cppi5_tr_csf_set(&tr_req[tr_idx].flags, csf);
3110 cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
3111 uc->config.tr_trigger_type,
3112 CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC,
3113 0, 0);
3114
3115 sg_addr += trigger_size * tr0_cnt2 * tr0_cnt3;
3116 if (dir == DMA_DEV_TO_MEM) {
3117 tr_req[tr_idx].addr = dev_addr;
3118 tr_req[tr_idx].icnt0 = tr_cnt0;
3119 tr_req[tr_idx].icnt1 = tr_cnt1;
3120 tr_req[tr_idx].icnt2 = tr1_cnt2;
3121 tr_req[tr_idx].icnt3 = 1;
3122 tr_req[tr_idx].dim1 = (-1) * tr_cnt0;
3123
3124 tr_req[tr_idx].daddr = sg_addr;
3125 tr_req[tr_idx].dicnt0 = tr_cnt0;
3126 tr_req[tr_idx].dicnt1 = tr_cnt1;
3127 tr_req[tr_idx].dicnt2 = tr1_cnt2;
3128 tr_req[tr_idx].dicnt3 = 1;
3129 tr_req[tr_idx].ddim1 = tr_cnt0;
3130 tr_req[tr_idx].ddim2 = trigger_size;
3131 } else {
3132 tr_req[tr_idx].addr = sg_addr;
3133 tr_req[tr_idx].icnt0 = tr_cnt0;
3134 tr_req[tr_idx].icnt1 = tr_cnt1;
3135 tr_req[tr_idx].icnt2 = tr1_cnt2;
3136 tr_req[tr_idx].icnt3 = 1;
3137 tr_req[tr_idx].dim1 = tr_cnt0;
3138 tr_req[tr_idx].dim2 = trigger_size;
3139
3140 tr_req[tr_idx].daddr = dev_addr;
3141 tr_req[tr_idx].dicnt0 = tr_cnt0;
3142 tr_req[tr_idx].dicnt1 = tr_cnt1;
3143 tr_req[tr_idx].dicnt2 = tr1_cnt2;
3144 tr_req[tr_idx].dicnt3 = 1;
3145 tr_req[tr_idx].ddim1 = (-1) * tr_cnt0;
3146 }
3147 tr_idx++;
3148 }
3149
3150 d->residue += sg_len;
3151 }
3152
3153 cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, csf | CPPI5_TR_CSF_EOP);
3154
3155 return d;
3156 }
3157
udma_configure_statictr(struct udma_chan * uc,struct udma_desc * d,enum dma_slave_buswidth dev_width,u16 elcnt)3158 static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
3159 enum dma_slave_buswidth dev_width,
3160 u16 elcnt)
3161 {
3162 if (uc->config.ep_type != PSIL_EP_PDMA_XY)
3163 return 0;
3164
3165 /* Bus width translates to the element size (ES) */
3166 switch (dev_width) {
3167 case DMA_SLAVE_BUSWIDTH_1_BYTE:
3168 d->static_tr.elsize = 0;
3169 break;
3170 case DMA_SLAVE_BUSWIDTH_2_BYTES:
3171 d->static_tr.elsize = 1;
3172 break;
3173 case DMA_SLAVE_BUSWIDTH_3_BYTES:
3174 d->static_tr.elsize = 2;
3175 break;
3176 case DMA_SLAVE_BUSWIDTH_4_BYTES:
3177 d->static_tr.elsize = 3;
3178 break;
3179 case DMA_SLAVE_BUSWIDTH_8_BYTES:
3180 d->static_tr.elsize = 4;
3181 break;
3182 default: /* not reached */
3183 return -EINVAL;
3184 }
3185
3186 d->static_tr.elcnt = elcnt;
3187
3188 if (uc->config.pkt_mode || !uc->cyclic) {
3189 /*
3190 * PDMA must close the packet when the channel is in packet mode.
3191 * For TR mode when the channel is not cyclic we also need PDMA
3192 * to close the packet otherwise the transfer will stall because
3193 * PDMA holds on the data it has received from the peripheral.
3194 */
3195 unsigned int div = dev_width * elcnt;
3196
3197 if (uc->cyclic)
3198 d->static_tr.bstcnt = d->residue / d->sglen / div;
3199 else
3200 d->static_tr.bstcnt = d->residue / div;
3201 } else if (uc->ud->match_data->type == DMA_TYPE_BCDMA &&
3202 uc->config.dir == DMA_DEV_TO_MEM &&
3203 uc->cyclic) {
3204 /*
3205 * For cyclic mode with BCDMA we have to set EOP in each TR to
3206 * prevent short packet errors seen on channel teardown. So the
3207 * PDMA must close the packet after every TR transfer by setting
3208 * burst count equal to the number of bytes transferred.
3209 */
3210 struct cppi5_tr_type1_t *tr_req = d->hwdesc[0].tr_req_base;
3211
3212 d->static_tr.bstcnt =
3213 (tr_req->icnt0 * tr_req->icnt1) / dev_width;
3214 } else {
3215 d->static_tr.bstcnt = 0;
3216 }
3217
3218 if (uc->config.dir == DMA_DEV_TO_MEM &&
3219 d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
3220 return -EINVAL;
3221
3222 return 0;
3223 }
3224
3225 static struct udma_desc *
udma_prep_slave_sg_pkt(struct udma_chan * uc,struct scatterlist * sgl,unsigned int sglen,enum dma_transfer_direction dir,unsigned long tx_flags,void * context)3226 udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
3227 unsigned int sglen, enum dma_transfer_direction dir,
3228 unsigned long tx_flags, void *context)
3229 {
3230 struct scatterlist *sgent;
3231 struct cppi5_host_desc_t *h_desc = NULL;
3232 struct udma_desc *d;
3233 u32 ring_id;
3234 unsigned int i;
3235 u64 asel;
3236
3237 d = kzalloc(struct_size(d, hwdesc, sglen), GFP_NOWAIT);
3238 if (!d)
3239 return NULL;
3240
3241 d->sglen = sglen;
3242 d->hwdesc_count = sglen;
3243
3244 if (dir == DMA_DEV_TO_MEM)
3245 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
3246 else
3247 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
3248
3249 if (uc->ud->match_data->type == DMA_TYPE_UDMA)
3250 asel = 0;
3251 else
3252 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
3253
3254 for_each_sg(sgl, sgent, sglen, i) {
3255 struct udma_hwdesc *hwdesc = &d->hwdesc[i];
3256 dma_addr_t sg_addr = sg_dma_address(sgent);
3257 struct cppi5_host_desc_t *desc;
3258 size_t sg_len = sg_dma_len(sgent);
3259
3260 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
3261 GFP_NOWAIT,
3262 &hwdesc->cppi5_desc_paddr);
3263 if (!hwdesc->cppi5_desc_vaddr) {
3264 dev_err(uc->ud->dev,
3265 "descriptor%d allocation failed\n", i);
3266
3267 udma_free_hwdesc(uc, d);
3268 kfree(d);
3269 return NULL;
3270 }
3271
3272 d->residue += sg_len;
3273 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
3274 desc = hwdesc->cppi5_desc_vaddr;
3275
3276 if (i == 0) {
3277 cppi5_hdesc_init(desc, 0, 0);
3278 /* Flow and Packed ID */
3279 cppi5_desc_set_pktids(&desc->hdr, uc->id,
3280 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3281 cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id);
3282 } else {
3283 cppi5_hdesc_reset_hbdesc(desc);
3284 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff);
3285 }
3286
3287 /* attach the sg buffer to the descriptor */
3288 sg_addr |= asel;
3289 cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len);
3290
3291 /* Attach link as host buffer descriptor */
3292 if (h_desc)
3293 cppi5_hdesc_link_hbdesc(h_desc,
3294 hwdesc->cppi5_desc_paddr | asel);
3295
3296 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA ||
3297 dir == DMA_MEM_TO_DEV)
3298 h_desc = desc;
3299 }
3300
3301 if (d->residue >= SZ_4M) {
3302 dev_err(uc->ud->dev,
3303 "%s: Transfer size %u is over the supported 4M range\n",
3304 __func__, d->residue);
3305 udma_free_hwdesc(uc, d);
3306 kfree(d);
3307 return NULL;
3308 }
3309
3310 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3311 cppi5_hdesc_set_pktlen(h_desc, d->residue);
3312
3313 return d;
3314 }
3315
udma_attach_metadata(struct dma_async_tx_descriptor * desc,void * data,size_t len)3316 static int udma_attach_metadata(struct dma_async_tx_descriptor *desc,
3317 void *data, size_t len)
3318 {
3319 struct udma_desc *d = to_udma_desc(desc);
3320 struct udma_chan *uc = to_udma_chan(desc->chan);
3321 struct cppi5_host_desc_t *h_desc;
3322 u32 psd_size = len;
3323 u32 flags = 0;
3324
3325 if (!uc->config.pkt_mode || !uc->config.metadata_size)
3326 return -ENOTSUPP;
3327
3328 if (!data || len > uc->config.metadata_size)
3329 return -EINVAL;
3330
3331 if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE)
3332 return -EINVAL;
3333
3334 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3335 if (d->dir == DMA_MEM_TO_DEV)
3336 memcpy(h_desc->epib, data, len);
3337
3338 if (uc->config.needs_epib)
3339 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
3340
3341 d->metadata = data;
3342 d->metadata_size = len;
3343 if (uc->config.needs_epib)
3344 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
3345
3346 cppi5_hdesc_update_flags(h_desc, flags);
3347 cppi5_hdesc_update_psdata_size(h_desc, psd_size);
3348
3349 return 0;
3350 }
3351
udma_get_metadata_ptr(struct dma_async_tx_descriptor * desc,size_t * payload_len,size_t * max_len)3352 static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
3353 size_t *payload_len, size_t *max_len)
3354 {
3355 struct udma_desc *d = to_udma_desc(desc);
3356 struct udma_chan *uc = to_udma_chan(desc->chan);
3357 struct cppi5_host_desc_t *h_desc;
3358
3359 if (!uc->config.pkt_mode || !uc->config.metadata_size)
3360 return ERR_PTR(-ENOTSUPP);
3361
3362 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3363
3364 *max_len = uc->config.metadata_size;
3365
3366 *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ?
3367 CPPI5_INFO0_HDESC_EPIB_SIZE : 0;
3368 *payload_len += cppi5_hdesc_get_psdata_size(h_desc);
3369
3370 return h_desc->epib;
3371 }
3372
udma_set_metadata_len(struct dma_async_tx_descriptor * desc,size_t payload_len)3373 static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc,
3374 size_t payload_len)
3375 {
3376 struct udma_desc *d = to_udma_desc(desc);
3377 struct udma_chan *uc = to_udma_chan(desc->chan);
3378 struct cppi5_host_desc_t *h_desc;
3379 u32 psd_size = payload_len;
3380 u32 flags = 0;
3381
3382 if (!uc->config.pkt_mode || !uc->config.metadata_size)
3383 return -ENOTSUPP;
3384
3385 if (payload_len > uc->config.metadata_size)
3386 return -EINVAL;
3387
3388 if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE)
3389 return -EINVAL;
3390
3391 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3392
3393 if (uc->config.needs_epib) {
3394 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
3395 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
3396 }
3397
3398 cppi5_hdesc_update_flags(h_desc, flags);
3399 cppi5_hdesc_update_psdata_size(h_desc, psd_size);
3400
3401 return 0;
3402 }
3403
3404 static struct dma_descriptor_metadata_ops metadata_ops = {
3405 .attach = udma_attach_metadata,
3406 .get_ptr = udma_get_metadata_ptr,
3407 .set_len = udma_set_metadata_len,
3408 };
3409
3410 static struct dma_async_tx_descriptor *
udma_prep_slave_sg(struct dma_chan * chan,struct scatterlist * sgl,unsigned int sglen,enum dma_transfer_direction dir,unsigned long tx_flags,void * context)3411 udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
3412 unsigned int sglen, enum dma_transfer_direction dir,
3413 unsigned long tx_flags, void *context)
3414 {
3415 struct udma_chan *uc = to_udma_chan(chan);
3416 enum dma_slave_buswidth dev_width;
3417 struct udma_desc *d;
3418 u32 burst;
3419
3420 if (dir != uc->config.dir &&
3421 (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type)) {
3422 dev_err(chan->device->dev,
3423 "%s: chan%d is for %s, not supporting %s\n",
3424 __func__, uc->id,
3425 dmaengine_get_direction_text(uc->config.dir),
3426 dmaengine_get_direction_text(dir));
3427 return NULL;
3428 }
3429
3430 if (dir == DMA_DEV_TO_MEM) {
3431 dev_width = uc->cfg.src_addr_width;
3432 burst = uc->cfg.src_maxburst;
3433 } else if (dir == DMA_MEM_TO_DEV) {
3434 dev_width = uc->cfg.dst_addr_width;
3435 burst = uc->cfg.dst_maxburst;
3436 } else {
3437 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
3438 return NULL;
3439 }
3440
3441 if (!burst)
3442 burst = 1;
3443
3444 uc->config.tx_flags = tx_flags;
3445
3446 if (uc->config.pkt_mode)
3447 d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
3448 context);
3449 else if (is_slave_direction(uc->config.dir))
3450 d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags,
3451 context);
3452 else
3453 d = udma_prep_slave_sg_triggered_tr(uc, sgl, sglen, dir,
3454 tx_flags, context);
3455
3456 if (!d)
3457 return NULL;
3458
3459 d->dir = dir;
3460 d->desc_idx = 0;
3461 d->tr_idx = 0;
3462
3463 /* static TR for remote PDMA */
3464 if (udma_configure_statictr(uc, d, dev_width, burst)) {
3465 dev_err(uc->ud->dev,
3466 "%s: StaticTR Z is limited to maximum %u (%u)\n",
3467 __func__, uc->ud->match_data->statictr_z_mask,
3468 d->static_tr.bstcnt);
3469
3470 udma_free_hwdesc(uc, d);
3471 kfree(d);
3472 return NULL;
3473 }
3474
3475 if (uc->config.metadata_size)
3476 d->vd.tx.metadata_ops = &metadata_ops;
3477
3478 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
3479 }
3480
3481 static struct udma_desc *
udma_prep_dma_cyclic_tr(struct udma_chan * uc,dma_addr_t buf_addr,size_t buf_len,size_t period_len,enum dma_transfer_direction dir,unsigned long flags)3482 udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
3483 size_t buf_len, size_t period_len,
3484 enum dma_transfer_direction dir, unsigned long flags)
3485 {
3486 struct udma_desc *d;
3487 size_t tr_size, period_addr;
3488 struct cppi5_tr_type1_t *tr_req;
3489 unsigned int periods = buf_len / period_len;
3490 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
3491 unsigned int i;
3492 int num_tr;
3493 u32 period_csf = 0;
3494
3495 num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
3496 &tr0_cnt1, &tr1_cnt0);
3497 if (num_tr < 0) {
3498 dev_err(uc->ud->dev, "size %zu is not supported\n",
3499 period_len);
3500 return NULL;
3501 }
3502
3503 /* Now allocate and setup the descriptor. */
3504 tr_size = sizeof(struct cppi5_tr_type1_t);
3505 d = udma_alloc_tr_desc(uc, tr_size, periods * num_tr, dir);
3506 if (!d)
3507 return NULL;
3508
3509 tr_req = d->hwdesc[0].tr_req_base;
3510 if (uc->ud->match_data->type == DMA_TYPE_UDMA)
3511 period_addr = buf_addr;
3512 else
3513 period_addr = buf_addr |
3514 ((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT);
3515
3516 /*
3517 * For BCDMA <-> PDMA transfers, the EOP flag needs to be set on the
3518 * last TR of a descriptor, to mark the packet as complete.
3519 * This is required for getting the teardown completion message in case
3520 * of TX, and to avoid short-packet error in case of RX.
3521 *
3522 * As we are in cyclic mode, we do not know which period might be the
3523 * last one, so set the flag for each period.
3524 */
3525 if (uc->config.ep_type == PSIL_EP_PDMA_XY &&
3526 uc->ud->match_data->type == DMA_TYPE_BCDMA) {
3527 period_csf = CPPI5_TR_CSF_EOP;
3528 }
3529
3530 for (i = 0; i < periods; i++) {
3531 int tr_idx = i * num_tr;
3532
3533 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
3534 false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3535
3536 tr_req[tr_idx].addr = period_addr;
3537 tr_req[tr_idx].icnt0 = tr0_cnt0;
3538 tr_req[tr_idx].icnt1 = tr0_cnt1;
3539 tr_req[tr_idx].dim1 = tr0_cnt0;
3540
3541 if (num_tr == 2) {
3542 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
3543 CPPI5_TR_CSF_SUPR_EVT);
3544 tr_idx++;
3545
3546 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
3547 false, false,
3548 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3549
3550 tr_req[tr_idx].addr = period_addr + tr0_cnt1 * tr0_cnt0;
3551 tr_req[tr_idx].icnt0 = tr1_cnt0;
3552 tr_req[tr_idx].icnt1 = 1;
3553 tr_req[tr_idx].dim1 = tr1_cnt0;
3554 }
3555
3556 if (!(flags & DMA_PREP_INTERRUPT))
3557 period_csf |= CPPI5_TR_CSF_SUPR_EVT;
3558
3559 if (period_csf)
3560 cppi5_tr_csf_set(&tr_req[tr_idx].flags, period_csf);
3561
3562 period_addr += period_len;
3563 }
3564
3565 return d;
3566 }
3567
3568 static struct udma_desc *
udma_prep_dma_cyclic_pkt(struct udma_chan * uc,dma_addr_t buf_addr,size_t buf_len,size_t period_len,enum dma_transfer_direction dir,unsigned long flags)3569 udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr,
3570 size_t buf_len, size_t period_len,
3571 enum dma_transfer_direction dir, unsigned long flags)
3572 {
3573 struct udma_desc *d;
3574 u32 ring_id;
3575 int i;
3576 int periods = buf_len / period_len;
3577
3578 if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1))
3579 return NULL;
3580
3581 if (period_len >= SZ_4M)
3582 return NULL;
3583
3584 d = kzalloc(struct_size(d, hwdesc, periods), GFP_NOWAIT);
3585 if (!d)
3586 return NULL;
3587
3588 d->hwdesc_count = periods;
3589
3590 /* TODO: re-check this... */
3591 if (dir == DMA_DEV_TO_MEM)
3592 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
3593 else
3594 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
3595
3596 if (uc->ud->match_data->type != DMA_TYPE_UDMA)
3597 buf_addr |= (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
3598
3599 for (i = 0; i < periods; i++) {
3600 struct udma_hwdesc *hwdesc = &d->hwdesc[i];
3601 dma_addr_t period_addr = buf_addr + (period_len * i);
3602 struct cppi5_host_desc_t *h_desc;
3603
3604 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
3605 GFP_NOWAIT,
3606 &hwdesc->cppi5_desc_paddr);
3607 if (!hwdesc->cppi5_desc_vaddr) {
3608 dev_err(uc->ud->dev,
3609 "descriptor%d allocation failed\n", i);
3610
3611 udma_free_hwdesc(uc, d);
3612 kfree(d);
3613 return NULL;
3614 }
3615
3616 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
3617 h_desc = hwdesc->cppi5_desc_vaddr;
3618
3619 cppi5_hdesc_init(h_desc, 0, 0);
3620 cppi5_hdesc_set_pktlen(h_desc, period_len);
3621
3622 /* Flow and Packed ID */
3623 cppi5_desc_set_pktids(&h_desc->hdr, uc->id,
3624 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3625 cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id);
3626
3627 /* attach each period to a new descriptor */
3628 cppi5_hdesc_attach_buf(h_desc,
3629 period_addr, period_len,
3630 period_addr, period_len);
3631 }
3632
3633 return d;
3634 }
3635
3636 static struct dma_async_tx_descriptor *
udma_prep_dma_cyclic(struct dma_chan * chan,dma_addr_t buf_addr,size_t buf_len,size_t period_len,enum dma_transfer_direction dir,unsigned long flags)3637 udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
3638 size_t period_len, enum dma_transfer_direction dir,
3639 unsigned long flags)
3640 {
3641 struct udma_chan *uc = to_udma_chan(chan);
3642 enum dma_slave_buswidth dev_width;
3643 struct udma_desc *d;
3644 u32 burst;
3645
3646 if (dir != uc->config.dir) {
3647 dev_err(chan->device->dev,
3648 "%s: chan%d is for %s, not supporting %s\n",
3649 __func__, uc->id,
3650 dmaengine_get_direction_text(uc->config.dir),
3651 dmaengine_get_direction_text(dir));
3652 return NULL;
3653 }
3654
3655 uc->cyclic = true;
3656
3657 if (dir == DMA_DEV_TO_MEM) {
3658 dev_width = uc->cfg.src_addr_width;
3659 burst = uc->cfg.src_maxburst;
3660 } else if (dir == DMA_MEM_TO_DEV) {
3661 dev_width = uc->cfg.dst_addr_width;
3662 burst = uc->cfg.dst_maxburst;
3663 } else {
3664 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
3665 return NULL;
3666 }
3667
3668 if (!burst)
3669 burst = 1;
3670
3671 if (uc->config.pkt_mode)
3672 d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len,
3673 dir, flags);
3674 else
3675 d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len,
3676 dir, flags);
3677
3678 if (!d)
3679 return NULL;
3680
3681 d->sglen = buf_len / period_len;
3682
3683 d->dir = dir;
3684 d->residue = buf_len;
3685
3686 /* static TR for remote PDMA */
3687 if (udma_configure_statictr(uc, d, dev_width, burst)) {
3688 dev_err(uc->ud->dev,
3689 "%s: StaticTR Z is limited to maximum %u (%u)\n",
3690 __func__, uc->ud->match_data->statictr_z_mask,
3691 d->static_tr.bstcnt);
3692
3693 udma_free_hwdesc(uc, d);
3694 kfree(d);
3695 return NULL;
3696 }
3697
3698 if (uc->config.metadata_size)
3699 d->vd.tx.metadata_ops = &metadata_ops;
3700
3701 return vchan_tx_prep(&uc->vc, &d->vd, flags);
3702 }
3703
3704 static struct dma_async_tx_descriptor *
udma_prep_dma_memcpy(struct dma_chan * chan,dma_addr_t dest,dma_addr_t src,size_t len,unsigned long tx_flags)3705 udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
3706 size_t len, unsigned long tx_flags)
3707 {
3708 struct udma_chan *uc = to_udma_chan(chan);
3709 struct udma_desc *d;
3710 struct cppi5_tr_type15_t *tr_req;
3711 int num_tr;
3712 size_t tr_size = sizeof(struct cppi5_tr_type15_t);
3713 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
3714 u32 csf = CPPI5_TR_CSF_SUPR_EVT;
3715
3716 if (uc->config.dir != DMA_MEM_TO_MEM) {
3717 dev_err(chan->device->dev,
3718 "%s: chan%d is for %s, not supporting %s\n",
3719 __func__, uc->id,
3720 dmaengine_get_direction_text(uc->config.dir),
3721 dmaengine_get_direction_text(DMA_MEM_TO_MEM));
3722 return NULL;
3723 }
3724
3725 num_tr = udma_get_tr_counters(len, __ffs(src | dest), &tr0_cnt0,
3726 &tr0_cnt1, &tr1_cnt0);
3727 if (num_tr < 0) {
3728 dev_err(uc->ud->dev, "size %zu is not supported\n",
3729 len);
3730 return NULL;
3731 }
3732
3733 d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM);
3734 if (!d)
3735 return NULL;
3736
3737 d->dir = DMA_MEM_TO_MEM;
3738 d->desc_idx = 0;
3739 d->tr_idx = 0;
3740 d->residue = len;
3741
3742 if (uc->ud->match_data->type != DMA_TYPE_UDMA) {
3743 src |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
3744 dest |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
3745 } else {
3746 csf |= CPPI5_TR_CSF_EOL_ICNT0;
3747 }
3748
3749 tr_req = d->hwdesc[0].tr_req_base;
3750
3751 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
3752 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3753 cppi5_tr_csf_set(&tr_req[0].flags, csf);
3754
3755 tr_req[0].addr = src;
3756 tr_req[0].icnt0 = tr0_cnt0;
3757 tr_req[0].icnt1 = tr0_cnt1;
3758 tr_req[0].icnt2 = 1;
3759 tr_req[0].icnt3 = 1;
3760 tr_req[0].dim1 = tr0_cnt0;
3761
3762 tr_req[0].daddr = dest;
3763 tr_req[0].dicnt0 = tr0_cnt0;
3764 tr_req[0].dicnt1 = tr0_cnt1;
3765 tr_req[0].dicnt2 = 1;
3766 tr_req[0].dicnt3 = 1;
3767 tr_req[0].ddim1 = tr0_cnt0;
3768
3769 if (num_tr == 2) {
3770 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
3771 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3772 cppi5_tr_csf_set(&tr_req[1].flags, csf);
3773
3774 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
3775 tr_req[1].icnt0 = tr1_cnt0;
3776 tr_req[1].icnt1 = 1;
3777 tr_req[1].icnt2 = 1;
3778 tr_req[1].icnt3 = 1;
3779
3780 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
3781 tr_req[1].dicnt0 = tr1_cnt0;
3782 tr_req[1].dicnt1 = 1;
3783 tr_req[1].dicnt2 = 1;
3784 tr_req[1].dicnt3 = 1;
3785 }
3786
3787 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, csf | CPPI5_TR_CSF_EOP);
3788
3789 if (uc->config.metadata_size)
3790 d->vd.tx.metadata_ops = &metadata_ops;
3791
3792 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
3793 }
3794
udma_issue_pending(struct dma_chan * chan)3795 static void udma_issue_pending(struct dma_chan *chan)
3796 {
3797 struct udma_chan *uc = to_udma_chan(chan);
3798 unsigned long flags;
3799
3800 spin_lock_irqsave(&uc->vc.lock, flags);
3801
3802 /* If we have something pending and no active descriptor, then */
3803 if (vchan_issue_pending(&uc->vc) && !uc->desc) {
3804 /*
3805 * start a descriptor if the channel is NOT [marked as
3806 * terminating _and_ it is still running (teardown has not
3807 * completed yet)].
3808 */
3809 if (!(uc->state == UDMA_CHAN_IS_TERMINATING &&
3810 udma_is_chan_running(uc)))
3811 udma_start(uc);
3812 }
3813
3814 spin_unlock_irqrestore(&uc->vc.lock, flags);
3815 }
3816
udma_tx_status(struct dma_chan * chan,dma_cookie_t cookie,struct dma_tx_state * txstate)3817 static enum dma_status udma_tx_status(struct dma_chan *chan,
3818 dma_cookie_t cookie,
3819 struct dma_tx_state *txstate)
3820 {
3821 struct udma_chan *uc = to_udma_chan(chan);
3822 enum dma_status ret;
3823 unsigned long flags;
3824
3825 spin_lock_irqsave(&uc->vc.lock, flags);
3826
3827 ret = dma_cookie_status(chan, cookie, txstate);
3828
3829 if (!udma_is_chan_running(uc))
3830 ret = DMA_COMPLETE;
3831
3832 if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc))
3833 ret = DMA_PAUSED;
3834
3835 if (ret == DMA_COMPLETE || !txstate)
3836 goto out;
3837
3838 if (uc->desc && uc->desc->vd.tx.cookie == cookie) {
3839 u32 peer_bcnt = 0;
3840 u32 bcnt = 0;
3841 u32 residue = uc->desc->residue;
3842 u32 delay = 0;
3843
3844 if (uc->desc->dir == DMA_MEM_TO_DEV) {
3845 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
3846
3847 if (uc->config.ep_type != PSIL_EP_NATIVE) {
3848 peer_bcnt = udma_tchanrt_read(uc,
3849 UDMA_CHAN_RT_PEER_BCNT_REG);
3850
3851 if (bcnt > peer_bcnt)
3852 delay = bcnt - peer_bcnt;
3853 }
3854 } else if (uc->desc->dir == DMA_DEV_TO_MEM) {
3855 bcnt = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
3856
3857 if (uc->config.ep_type != PSIL_EP_NATIVE) {
3858 peer_bcnt = udma_rchanrt_read(uc,
3859 UDMA_CHAN_RT_PEER_BCNT_REG);
3860
3861 if (peer_bcnt > bcnt)
3862 delay = peer_bcnt - bcnt;
3863 }
3864 } else {
3865 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
3866 }
3867
3868 if (bcnt && !(bcnt % uc->desc->residue))
3869 residue = 0;
3870 else
3871 residue -= bcnt % uc->desc->residue;
3872
3873 if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) {
3874 ret = DMA_COMPLETE;
3875 delay = 0;
3876 }
3877
3878 dma_set_residue(txstate, residue);
3879 dma_set_in_flight_bytes(txstate, delay);
3880
3881 } else {
3882 ret = DMA_COMPLETE;
3883 }
3884
3885 out:
3886 spin_unlock_irqrestore(&uc->vc.lock, flags);
3887 return ret;
3888 }
3889
udma_pause(struct dma_chan * chan)3890 static int udma_pause(struct dma_chan *chan)
3891 {
3892 struct udma_chan *uc = to_udma_chan(chan);
3893
3894 /* pause the channel */
3895 switch (uc->config.dir) {
3896 case DMA_DEV_TO_MEM:
3897 udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3898 UDMA_PEER_RT_EN_PAUSE,
3899 UDMA_PEER_RT_EN_PAUSE);
3900 break;
3901 case DMA_MEM_TO_DEV:
3902 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3903 UDMA_PEER_RT_EN_PAUSE,
3904 UDMA_PEER_RT_EN_PAUSE);
3905 break;
3906 case DMA_MEM_TO_MEM:
3907 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG,
3908 UDMA_CHAN_RT_CTL_PAUSE,
3909 UDMA_CHAN_RT_CTL_PAUSE);
3910 break;
3911 default:
3912 return -EINVAL;
3913 }
3914
3915 return 0;
3916 }
3917
udma_resume(struct dma_chan * chan)3918 static int udma_resume(struct dma_chan *chan)
3919 {
3920 struct udma_chan *uc = to_udma_chan(chan);
3921
3922 /* resume the channel */
3923 switch (uc->config.dir) {
3924 case DMA_DEV_TO_MEM:
3925 udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3926 UDMA_PEER_RT_EN_PAUSE, 0);
3927
3928 break;
3929 case DMA_MEM_TO_DEV:
3930 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3931 UDMA_PEER_RT_EN_PAUSE, 0);
3932 break;
3933 case DMA_MEM_TO_MEM:
3934 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG,
3935 UDMA_CHAN_RT_CTL_PAUSE, 0);
3936 break;
3937 default:
3938 return -EINVAL;
3939 }
3940
3941 return 0;
3942 }
3943
udma_terminate_all(struct dma_chan * chan)3944 static int udma_terminate_all(struct dma_chan *chan)
3945 {
3946 struct udma_chan *uc = to_udma_chan(chan);
3947 unsigned long flags;
3948 LIST_HEAD(head);
3949
3950 spin_lock_irqsave(&uc->vc.lock, flags);
3951
3952 if (udma_is_chan_running(uc))
3953 udma_stop(uc);
3954
3955 if (uc->desc) {
3956 uc->terminated_desc = uc->desc;
3957 uc->desc = NULL;
3958 uc->terminated_desc->terminated = true;
3959 cancel_delayed_work(&uc->tx_drain.work);
3960 }
3961
3962 uc->paused = false;
3963
3964 vchan_get_all_descriptors(&uc->vc, &head);
3965 spin_unlock_irqrestore(&uc->vc.lock, flags);
3966 vchan_dma_desc_free_list(&uc->vc, &head);
3967
3968 return 0;
3969 }
3970
udma_synchronize(struct dma_chan * chan)3971 static void udma_synchronize(struct dma_chan *chan)
3972 {
3973 struct udma_chan *uc = to_udma_chan(chan);
3974 unsigned long timeout = msecs_to_jiffies(1000);
3975
3976 vchan_synchronize(&uc->vc);
3977
3978 if (uc->state == UDMA_CHAN_IS_TERMINATING) {
3979 timeout = wait_for_completion_timeout(&uc->teardown_completed,
3980 timeout);
3981 if (!timeout) {
3982 dev_warn(uc->ud->dev, "chan%d teardown timeout!\n",
3983 uc->id);
3984 udma_dump_chan_stdata(uc);
3985 udma_reset_chan(uc, true);
3986 }
3987 }
3988
3989 udma_reset_chan(uc, false);
3990 if (udma_is_chan_running(uc))
3991 dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id);
3992
3993 cancel_delayed_work_sync(&uc->tx_drain.work);
3994 udma_reset_rings(uc);
3995 }
3996
udma_desc_pre_callback(struct virt_dma_chan * vc,struct virt_dma_desc * vd,struct dmaengine_result * result)3997 static void udma_desc_pre_callback(struct virt_dma_chan *vc,
3998 struct virt_dma_desc *vd,
3999 struct dmaengine_result *result)
4000 {
4001 struct udma_chan *uc = to_udma_chan(&vc->chan);
4002 struct udma_desc *d;
4003 u8 status;
4004
4005 if (!vd)
4006 return;
4007
4008 d = to_udma_desc(&vd->tx);
4009
4010 if (d->metadata_size)
4011 udma_fetch_epib(uc, d);
4012
4013 if (result) {
4014 void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
4015
4016 if (cppi5_desc_get_type(desc_vaddr) ==
4017 CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
4018 /* Provide residue information for the client */
4019 result->residue = d->residue -
4020 cppi5_hdesc_get_pktlen(desc_vaddr);
4021 if (result->residue)
4022 result->result = DMA_TRANS_ABORTED;
4023 else
4024 result->result = DMA_TRANS_NOERROR;
4025 } else {
4026 result->residue = 0;
4027 /* Propagate TR Response errors to the client */
4028 status = d->hwdesc[0].tr_resp_base->status;
4029 if (status)
4030 result->result = DMA_TRANS_ABORTED;
4031 else
4032 result->result = DMA_TRANS_NOERROR;
4033 }
4034 }
4035 }
4036
4037 /*
4038 * This tasklet handles the completion of a DMA descriptor by
4039 * calling its callback and freeing it.
4040 */
udma_vchan_complete(struct tasklet_struct * t)4041 static void udma_vchan_complete(struct tasklet_struct *t)
4042 {
4043 struct virt_dma_chan *vc = from_tasklet(vc, t, task);
4044 struct virt_dma_desc *vd, *_vd;
4045 struct dmaengine_desc_callback cb;
4046 LIST_HEAD(head);
4047
4048 spin_lock_irq(&vc->lock);
4049 list_splice_tail_init(&vc->desc_completed, &head);
4050 vd = vc->cyclic;
4051 if (vd) {
4052 vc->cyclic = NULL;
4053 dmaengine_desc_get_callback(&vd->tx, &cb);
4054 } else {
4055 memset(&cb, 0, sizeof(cb));
4056 }
4057 spin_unlock_irq(&vc->lock);
4058
4059 udma_desc_pre_callback(vc, vd, NULL);
4060 dmaengine_desc_callback_invoke(&cb, NULL);
4061
4062 list_for_each_entry_safe(vd, _vd, &head, node) {
4063 struct dmaengine_result result;
4064
4065 dmaengine_desc_get_callback(&vd->tx, &cb);
4066
4067 list_del(&vd->node);
4068
4069 udma_desc_pre_callback(vc, vd, &result);
4070 dmaengine_desc_callback_invoke(&cb, &result);
4071
4072 vchan_vdesc_fini(vd);
4073 }
4074 }
4075
udma_free_chan_resources(struct dma_chan * chan)4076 static void udma_free_chan_resources(struct dma_chan *chan)
4077 {
4078 struct udma_chan *uc = to_udma_chan(chan);
4079 struct udma_dev *ud = to_udma_dev(chan->device);
4080
4081 udma_terminate_all(chan);
4082 if (uc->terminated_desc) {
4083 udma_reset_chan(uc, false);
4084 udma_reset_rings(uc);
4085 }
4086
4087 cancel_delayed_work_sync(&uc->tx_drain.work);
4088
4089 if (uc->irq_num_ring > 0) {
4090 free_irq(uc->irq_num_ring, uc);
4091
4092 uc->irq_num_ring = 0;
4093 }
4094 if (uc->irq_num_udma > 0) {
4095 free_irq(uc->irq_num_udma, uc);
4096
4097 uc->irq_num_udma = 0;
4098 }
4099
4100 /* Release PSI-L pairing */
4101 if (uc->psil_paired) {
4102 navss_psil_unpair(ud, uc->config.src_thread,
4103 uc->config.dst_thread);
4104 uc->psil_paired = false;
4105 }
4106
4107 vchan_free_chan_resources(&uc->vc);
4108 tasklet_kill(&uc->vc.task);
4109
4110 bcdma_free_bchan_resources(uc);
4111 udma_free_tx_resources(uc);
4112 udma_free_rx_resources(uc);
4113 udma_reset_uchan(uc);
4114
4115 if (uc->use_dma_pool) {
4116 dma_pool_destroy(uc->hdesc_pool);
4117 uc->use_dma_pool = false;
4118 }
4119 }
4120
4121 static struct platform_driver udma_driver;
4122 static struct platform_driver bcdma_driver;
4123 static struct platform_driver pktdma_driver;
4124
4125 struct udma_filter_param {
4126 int remote_thread_id;
4127 u32 atype;
4128 u32 asel;
4129 u32 tr_trigger_type;
4130 };
4131
udma_dma_filter_fn(struct dma_chan * chan,void * param)4132 static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
4133 {
4134 struct udma_chan_config *ucc;
4135 struct psil_endpoint_config *ep_config;
4136 struct udma_filter_param *filter_param;
4137 struct udma_chan *uc;
4138 struct udma_dev *ud;
4139
4140 if (chan->device->dev->driver != &udma_driver.driver &&
4141 chan->device->dev->driver != &bcdma_driver.driver &&
4142 chan->device->dev->driver != &pktdma_driver.driver)
4143 return false;
4144
4145 uc = to_udma_chan(chan);
4146 ucc = &uc->config;
4147 ud = uc->ud;
4148 filter_param = param;
4149
4150 if (filter_param->atype > 2) {
4151 dev_err(ud->dev, "Invalid channel atype: %u\n",
4152 filter_param->atype);
4153 return false;
4154 }
4155
4156 if (filter_param->asel > 15) {
4157 dev_err(ud->dev, "Invalid channel asel: %u\n",
4158 filter_param->asel);
4159 return false;
4160 }
4161
4162 ucc->remote_thread_id = filter_param->remote_thread_id;
4163 ucc->atype = filter_param->atype;
4164 ucc->asel = filter_param->asel;
4165 ucc->tr_trigger_type = filter_param->tr_trigger_type;
4166
4167 if (ucc->tr_trigger_type) {
4168 ucc->dir = DMA_MEM_TO_MEM;
4169 goto triggered_bchan;
4170 } else if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET) {
4171 ucc->dir = DMA_MEM_TO_DEV;
4172 } else {
4173 ucc->dir = DMA_DEV_TO_MEM;
4174 }
4175
4176 ep_config = psil_get_ep_config(ucc->remote_thread_id);
4177 if (IS_ERR(ep_config)) {
4178 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
4179 ucc->remote_thread_id);
4180 ucc->dir = DMA_MEM_TO_MEM;
4181 ucc->remote_thread_id = -1;
4182 ucc->atype = 0;
4183 ucc->asel = 0;
4184 return false;
4185 }
4186
4187 if (ud->match_data->type == DMA_TYPE_BCDMA &&
4188 ep_config->pkt_mode) {
4189 dev_err(ud->dev,
4190 "Only TR mode is supported (psi-l thread 0x%04x)\n",
4191 ucc->remote_thread_id);
4192 ucc->dir = DMA_MEM_TO_MEM;
4193 ucc->remote_thread_id = -1;
4194 ucc->atype = 0;
4195 ucc->asel = 0;
4196 return false;
4197 }
4198
4199 ucc->pkt_mode = ep_config->pkt_mode;
4200 ucc->channel_tpl = ep_config->channel_tpl;
4201 ucc->notdpkt = ep_config->notdpkt;
4202 ucc->ep_type = ep_config->ep_type;
4203
4204 if (ud->match_data->type == DMA_TYPE_PKTDMA &&
4205 ep_config->mapped_channel_id >= 0) {
4206 ucc->mapped_channel_id = ep_config->mapped_channel_id;
4207 ucc->default_flow_id = ep_config->default_flow_id;
4208 } else {
4209 ucc->mapped_channel_id = -1;
4210 ucc->default_flow_id = -1;
4211 }
4212
4213 if (ucc->ep_type != PSIL_EP_NATIVE) {
4214 const struct udma_match_data *match_data = ud->match_data;
4215
4216 if (match_data->flags & UDMA_FLAG_PDMA_ACC32)
4217 ucc->enable_acc32 = ep_config->pdma_acc32;
4218 if (match_data->flags & UDMA_FLAG_PDMA_BURST)
4219 ucc->enable_burst = ep_config->pdma_burst;
4220 }
4221
4222 ucc->needs_epib = ep_config->needs_epib;
4223 ucc->psd_size = ep_config->psd_size;
4224 ucc->metadata_size =
4225 (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) +
4226 ucc->psd_size;
4227
4228 if (ucc->pkt_mode)
4229 ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
4230 ucc->metadata_size, ud->desc_align);
4231
4232 dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id,
4233 ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir));
4234
4235 return true;
4236
4237 triggered_bchan:
4238 dev_dbg(ud->dev, "chan%d: triggered channel (type: %u)\n", uc->id,
4239 ucc->tr_trigger_type);
4240
4241 return true;
4242
4243 }
4244
udma_of_xlate(struct of_phandle_args * dma_spec,struct of_dma * ofdma)4245 static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
4246 struct of_dma *ofdma)
4247 {
4248 struct udma_dev *ud = ofdma->of_dma_data;
4249 dma_cap_mask_t mask = ud->ddev.cap_mask;
4250 struct udma_filter_param filter_param;
4251 struct dma_chan *chan;
4252
4253 if (ud->match_data->type == DMA_TYPE_BCDMA) {
4254 if (dma_spec->args_count != 3)
4255 return NULL;
4256
4257 filter_param.tr_trigger_type = dma_spec->args[0];
4258 filter_param.remote_thread_id = dma_spec->args[1];
4259 filter_param.asel = dma_spec->args[2];
4260 filter_param.atype = 0;
4261 } else {
4262 if (dma_spec->args_count != 1 && dma_spec->args_count != 2)
4263 return NULL;
4264
4265 filter_param.remote_thread_id = dma_spec->args[0];
4266 filter_param.tr_trigger_type = 0;
4267 if (dma_spec->args_count == 2) {
4268 if (ud->match_data->type == DMA_TYPE_UDMA) {
4269 filter_param.atype = dma_spec->args[1];
4270 filter_param.asel = 0;
4271 } else {
4272 filter_param.atype = 0;
4273 filter_param.asel = dma_spec->args[1];
4274 }
4275 } else {
4276 filter_param.atype = 0;
4277 filter_param.asel = 0;
4278 }
4279 }
4280
4281 chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
4282 ofdma->of_node);
4283 if (!chan) {
4284 dev_err(ud->dev, "get channel fail in %s.\n", __func__);
4285 return ERR_PTR(-EINVAL);
4286 }
4287
4288 return chan;
4289 }
4290
4291 static struct udma_match_data am654_main_data = {
4292 .type = DMA_TYPE_UDMA,
4293 .psil_base = 0x1000,
4294 .enable_memcpy_support = true,
4295 .statictr_z_mask = GENMASK(11, 0),
4296 .burst_size = {
4297 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4298 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */
4299 0, /* No UH Channels */
4300 },
4301 };
4302
4303 static struct udma_match_data am654_mcu_data = {
4304 .type = DMA_TYPE_UDMA,
4305 .psil_base = 0x6000,
4306 .enable_memcpy_support = false,
4307 .statictr_z_mask = GENMASK(11, 0),
4308 .burst_size = {
4309 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4310 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */
4311 0, /* No UH Channels */
4312 },
4313 };
4314
4315 static struct udma_match_data j721e_main_data = {
4316 .type = DMA_TYPE_UDMA,
4317 .psil_base = 0x1000,
4318 .enable_memcpy_support = true,
4319 .flags = UDMA_FLAGS_J7_CLASS,
4320 .statictr_z_mask = GENMASK(23, 0),
4321 .burst_size = {
4322 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4323 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* H Channels */
4324 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* UH Channels */
4325 },
4326 };
4327
4328 static struct udma_match_data j721e_mcu_data = {
4329 .type = DMA_TYPE_UDMA,
4330 .psil_base = 0x6000,
4331 .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
4332 .flags = UDMA_FLAGS_J7_CLASS,
4333 .statictr_z_mask = GENMASK(23, 0),
4334 .burst_size = {
4335 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4336 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES, /* H Channels */
4337 0, /* No UH Channels */
4338 },
4339 };
4340
4341 static struct udma_soc_data am62a_dmss_csi_soc_data = {
4342 .oes = {
4343 .bcdma_rchan_data = 0xe00,
4344 .bcdma_rchan_ring = 0x1000,
4345 },
4346 };
4347
4348 static struct udma_soc_data j721s2_bcdma_csi_soc_data = {
4349 .oes = {
4350 .bcdma_tchan_data = 0x800,
4351 .bcdma_tchan_ring = 0xa00,
4352 .bcdma_rchan_data = 0xe00,
4353 .bcdma_rchan_ring = 0x1000,
4354 },
4355 };
4356
4357 static struct udma_match_data am62a_bcdma_csirx_data = {
4358 .type = DMA_TYPE_BCDMA,
4359 .psil_base = 0x3100,
4360 .enable_memcpy_support = false,
4361 .burst_size = {
4362 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4363 0, /* No H Channels */
4364 0, /* No UH Channels */
4365 },
4366 .soc_data = &am62a_dmss_csi_soc_data,
4367 };
4368
4369 static struct udma_match_data am64_bcdma_data = {
4370 .type = DMA_TYPE_BCDMA,
4371 .psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */
4372 .enable_memcpy_support = true, /* Supported via bchan */
4373 .flags = UDMA_FLAGS_J7_CLASS,
4374 .statictr_z_mask = GENMASK(23, 0),
4375 .burst_size = {
4376 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4377 0, /* No H Channels */
4378 0, /* No UH Channels */
4379 },
4380 };
4381
4382 static struct udma_match_data am64_pktdma_data = {
4383 .type = DMA_TYPE_PKTDMA,
4384 .psil_base = 0x1000,
4385 .enable_memcpy_support = false, /* PKTDMA does not support MEM_TO_MEM */
4386 .flags = UDMA_FLAGS_J7_CLASS,
4387 .statictr_z_mask = GENMASK(23, 0),
4388 .burst_size = {
4389 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4390 0, /* No H Channels */
4391 0, /* No UH Channels */
4392 },
4393 };
4394
4395 static struct udma_match_data j721s2_bcdma_csi_data = {
4396 .type = DMA_TYPE_BCDMA,
4397 .psil_base = 0x2000,
4398 .enable_memcpy_support = false,
4399 .burst_size = {
4400 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4401 0, /* No H Channels */
4402 0, /* No UH Channels */
4403 },
4404 .soc_data = &j721s2_bcdma_csi_soc_data,
4405 };
4406
4407 static const struct of_device_id udma_of_match[] = {
4408 {
4409 .compatible = "ti,am654-navss-main-udmap",
4410 .data = &am654_main_data,
4411 },
4412 {
4413 .compatible = "ti,am654-navss-mcu-udmap",
4414 .data = &am654_mcu_data,
4415 }, {
4416 .compatible = "ti,j721e-navss-main-udmap",
4417 .data = &j721e_main_data,
4418 }, {
4419 .compatible = "ti,j721e-navss-mcu-udmap",
4420 .data = &j721e_mcu_data,
4421 },
4422 {
4423 .compatible = "ti,am64-dmss-bcdma",
4424 .data = &am64_bcdma_data,
4425 },
4426 {
4427 .compatible = "ti,am64-dmss-pktdma",
4428 .data = &am64_pktdma_data,
4429 },
4430 {
4431 .compatible = "ti,am62a-dmss-bcdma-csirx",
4432 .data = &am62a_bcdma_csirx_data,
4433 },
4434 {
4435 .compatible = "ti,j721s2-dmss-bcdma-csi",
4436 .data = &j721s2_bcdma_csi_data,
4437 },
4438 { /* Sentinel */ },
4439 };
4440 MODULE_DEVICE_TABLE(of, udma_of_match);
4441
4442 static struct udma_soc_data am654_soc_data = {
4443 .oes = {
4444 .udma_rchan = 0x200,
4445 },
4446 };
4447
4448 static struct udma_soc_data j721e_soc_data = {
4449 .oes = {
4450 .udma_rchan = 0x400,
4451 },
4452 };
4453
4454 static struct udma_soc_data j7200_soc_data = {
4455 .oes = {
4456 .udma_rchan = 0x80,
4457 },
4458 };
4459
4460 static struct udma_soc_data am64_soc_data = {
4461 .oes = {
4462 .bcdma_bchan_data = 0x2200,
4463 .bcdma_bchan_ring = 0x2400,
4464 .bcdma_tchan_data = 0x2800,
4465 .bcdma_tchan_ring = 0x2a00,
4466 .bcdma_rchan_data = 0x2e00,
4467 .bcdma_rchan_ring = 0x3000,
4468 .pktdma_tchan_flow = 0x1200,
4469 .pktdma_rchan_flow = 0x1600,
4470 },
4471 .bcdma_trigger_event_offset = 0xc400,
4472 };
4473
4474 static const struct soc_device_attribute k3_soc_devices[] = {
4475 { .family = "AM65X", .data = &am654_soc_data },
4476 { .family = "J721E", .data = &j721e_soc_data },
4477 { .family = "J7200", .data = &j7200_soc_data },
4478 { .family = "AM64X", .data = &am64_soc_data },
4479 { .family = "J721S2", .data = &j721e_soc_data},
4480 { .family = "AM62X", .data = &am64_soc_data },
4481 { .family = "AM62AX", .data = &am64_soc_data },
4482 { .family = "J784S4", .data = &j721e_soc_data },
4483 { .family = "AM62PX", .data = &am64_soc_data },
4484 { .family = "J722S", .data = &am64_soc_data },
4485 { /* sentinel */ }
4486 };
4487
udma_get_mmrs(struct platform_device * pdev,struct udma_dev * ud)4488 static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
4489 {
4490 u32 cap2, cap3, cap4;
4491 int i;
4492
4493 ud->mmrs[MMR_GCFG] = devm_platform_ioremap_resource_byname(pdev, mmr_names[MMR_GCFG]);
4494 if (IS_ERR(ud->mmrs[MMR_GCFG]))
4495 return PTR_ERR(ud->mmrs[MMR_GCFG]);
4496
4497 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
4498 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4499
4500 switch (ud->match_data->type) {
4501 case DMA_TYPE_UDMA:
4502 ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
4503 ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
4504 ud->echan_cnt = UDMA_CAP2_ECHAN_CNT(cap2);
4505 ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
4506 break;
4507 case DMA_TYPE_BCDMA:
4508 ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2) +
4509 BCDMA_CAP3_HBCHAN_CNT(cap3) +
4510 BCDMA_CAP3_UBCHAN_CNT(cap3);
4511 ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2);
4512 ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2);
4513 ud->rflow_cnt = ud->rchan_cnt;
4514 break;
4515 case DMA_TYPE_PKTDMA:
4516 cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30);
4517 ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
4518 ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
4519 ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
4520 ud->tflow_cnt = PKTDMA_CAP4_TFLOW_CNT(cap4);
4521 break;
4522 default:
4523 return -EINVAL;
4524 }
4525
4526 for (i = 1; i < MMR_LAST; i++) {
4527 if (i == MMR_BCHANRT && ud->bchan_cnt == 0)
4528 continue;
4529 if (i == MMR_TCHANRT && ud->tchan_cnt == 0)
4530 continue;
4531 if (i == MMR_RCHANRT && ud->rchan_cnt == 0)
4532 continue;
4533
4534 ud->mmrs[i] = devm_platform_ioremap_resource_byname(pdev, mmr_names[i]);
4535 if (IS_ERR(ud->mmrs[i]))
4536 return PTR_ERR(ud->mmrs[i]);
4537 }
4538
4539 return 0;
4540 }
4541
udma_mark_resource_ranges(struct udma_dev * ud,unsigned long * map,struct ti_sci_resource_desc * rm_desc,char * name)4542 static void udma_mark_resource_ranges(struct udma_dev *ud, unsigned long *map,
4543 struct ti_sci_resource_desc *rm_desc,
4544 char *name)
4545 {
4546 bitmap_clear(map, rm_desc->start, rm_desc->num);
4547 bitmap_clear(map, rm_desc->start_sec, rm_desc->num_sec);
4548 dev_dbg(ud->dev, "ti_sci resource range for %s: %d:%d | %d:%d\n", name,
4549 rm_desc->start, rm_desc->num, rm_desc->start_sec,
4550 rm_desc->num_sec);
4551 }
4552
4553 static const char * const range_names[] = {
4554 [RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan",
4555 [RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan",
4556 [RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan",
4557 [RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow",
4558 [RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow",
4559 };
4560
udma_setup_resources(struct udma_dev * ud)4561 static int udma_setup_resources(struct udma_dev *ud)
4562 {
4563 int ret, i, j;
4564 struct device *dev = ud->dev;
4565 struct ti_sci_resource *rm_res, irq_res;
4566 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4567 u32 cap3;
4568
4569 /* Set up the throughput level start indexes */
4570 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4571 if (of_device_is_compatible(dev->of_node,
4572 "ti,am654-navss-main-udmap")) {
4573 ud->tchan_tpl.levels = 2;
4574 ud->tchan_tpl.start_idx[0] = 8;
4575 } else if (of_device_is_compatible(dev->of_node,
4576 "ti,am654-navss-mcu-udmap")) {
4577 ud->tchan_tpl.levels = 2;
4578 ud->tchan_tpl.start_idx[0] = 2;
4579 } else if (UDMA_CAP3_UCHAN_CNT(cap3)) {
4580 ud->tchan_tpl.levels = 3;
4581 ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
4582 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4583 } else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
4584 ud->tchan_tpl.levels = 2;
4585 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4586 } else {
4587 ud->tchan_tpl.levels = 1;
4588 }
4589
4590 ud->rchan_tpl.levels = ud->tchan_tpl.levels;
4591 ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
4592 ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
4593
4594 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4595 sizeof(unsigned long), GFP_KERNEL);
4596 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4597 GFP_KERNEL);
4598 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4599 sizeof(unsigned long), GFP_KERNEL);
4600 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4601 GFP_KERNEL);
4602 ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
4603 sizeof(unsigned long),
4604 GFP_KERNEL);
4605 ud->rflow_gp_map_allocated = devm_kcalloc(dev,
4606 BITS_TO_LONGS(ud->rflow_cnt),
4607 sizeof(unsigned long),
4608 GFP_KERNEL);
4609 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
4610 sizeof(unsigned long),
4611 GFP_KERNEL);
4612 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
4613 GFP_KERNEL);
4614
4615 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map ||
4616 !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans ||
4617 !ud->rflows || !ud->rflow_in_use)
4618 return -ENOMEM;
4619
4620 /*
4621 * RX flows with the same Ids as RX channels are reserved to be used
4622 * as default flows if remote HW can't generate flow_ids. Those
4623 * RX flows can be requested only explicitly by id.
4624 */
4625 bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt);
4626
4627 /* by default no GP rflows are assigned to Linux */
4628 bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);
4629
4630 /* Get resource ranges from tisci */
4631 for (i = 0; i < RM_RANGE_LAST; i++) {
4632 if (i == RM_RANGE_BCHAN || i == RM_RANGE_TFLOW)
4633 continue;
4634
4635 tisci_rm->rm_ranges[i] =
4636 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
4637 tisci_rm->tisci_dev_id,
4638 (char *)range_names[i]);
4639 }
4640
4641 /* tchan ranges */
4642 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4643 if (IS_ERR(rm_res)) {
4644 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4645 irq_res.sets = 1;
4646 } else {
4647 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4648 for (i = 0; i < rm_res->sets; i++)
4649 udma_mark_resource_ranges(ud, ud->tchan_map,
4650 &rm_res->desc[i], "tchan");
4651 irq_res.sets = rm_res->sets;
4652 }
4653
4654 /* rchan and matching default flow ranges */
4655 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4656 if (IS_ERR(rm_res)) {
4657 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4658 irq_res.sets++;
4659 } else {
4660 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4661 for (i = 0; i < rm_res->sets; i++)
4662 udma_mark_resource_ranges(ud, ud->rchan_map,
4663 &rm_res->desc[i], "rchan");
4664 irq_res.sets += rm_res->sets;
4665 }
4666
4667 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
4668 if (!irq_res.desc)
4669 return -ENOMEM;
4670 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4671 if (IS_ERR(rm_res)) {
4672 irq_res.desc[0].start = 0;
4673 irq_res.desc[0].num = ud->tchan_cnt;
4674 i = 1;
4675 } else {
4676 for (i = 0; i < rm_res->sets; i++) {
4677 irq_res.desc[i].start = rm_res->desc[i].start;
4678 irq_res.desc[i].num = rm_res->desc[i].num;
4679 irq_res.desc[i].start_sec = rm_res->desc[i].start_sec;
4680 irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
4681 }
4682 }
4683 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4684 if (IS_ERR(rm_res)) {
4685 irq_res.desc[i].start = 0;
4686 irq_res.desc[i].num = ud->rchan_cnt;
4687 } else {
4688 for (j = 0; j < rm_res->sets; j++, i++) {
4689 if (rm_res->desc[j].num) {
4690 irq_res.desc[i].start = rm_res->desc[j].start +
4691 ud->soc_data->oes.udma_rchan;
4692 irq_res.desc[i].num = rm_res->desc[j].num;
4693 }
4694 if (rm_res->desc[j].num_sec) {
4695 irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
4696 ud->soc_data->oes.udma_rchan;
4697 irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
4698 }
4699 }
4700 }
4701 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
4702 kfree(irq_res.desc);
4703 if (ret) {
4704 dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
4705 return ret;
4706 }
4707
4708 /* GP rflow ranges */
4709 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
4710 if (IS_ERR(rm_res)) {
4711 /* all gp flows are assigned exclusively to Linux */
4712 bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
4713 ud->rflow_cnt - ud->rchan_cnt);
4714 } else {
4715 for (i = 0; i < rm_res->sets; i++)
4716 udma_mark_resource_ranges(ud, ud->rflow_gp_map,
4717 &rm_res->desc[i], "gp-rflow");
4718 }
4719
4720 return 0;
4721 }
4722
bcdma_setup_resources(struct udma_dev * ud)4723 static int bcdma_setup_resources(struct udma_dev *ud)
4724 {
4725 int ret, i, j;
4726 struct device *dev = ud->dev;
4727 struct ti_sci_resource *rm_res, irq_res;
4728 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4729 const struct udma_oes_offsets *oes = &ud->soc_data->oes;
4730 u32 cap;
4731
4732 /* Set up the throughput level start indexes */
4733 cap = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4734 if (BCDMA_CAP3_UBCHAN_CNT(cap)) {
4735 ud->bchan_tpl.levels = 3;
4736 ud->bchan_tpl.start_idx[1] = BCDMA_CAP3_UBCHAN_CNT(cap);
4737 ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap);
4738 } else if (BCDMA_CAP3_HBCHAN_CNT(cap)) {
4739 ud->bchan_tpl.levels = 2;
4740 ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap);
4741 } else {
4742 ud->bchan_tpl.levels = 1;
4743 }
4744
4745 cap = udma_read(ud->mmrs[MMR_GCFG], 0x30);
4746 if (BCDMA_CAP4_URCHAN_CNT(cap)) {
4747 ud->rchan_tpl.levels = 3;
4748 ud->rchan_tpl.start_idx[1] = BCDMA_CAP4_URCHAN_CNT(cap);
4749 ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap);
4750 } else if (BCDMA_CAP4_HRCHAN_CNT(cap)) {
4751 ud->rchan_tpl.levels = 2;
4752 ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap);
4753 } else {
4754 ud->rchan_tpl.levels = 1;
4755 }
4756
4757 if (BCDMA_CAP4_UTCHAN_CNT(cap)) {
4758 ud->tchan_tpl.levels = 3;
4759 ud->tchan_tpl.start_idx[1] = BCDMA_CAP4_UTCHAN_CNT(cap);
4760 ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap);
4761 } else if (BCDMA_CAP4_HTCHAN_CNT(cap)) {
4762 ud->tchan_tpl.levels = 2;
4763 ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap);
4764 } else {
4765 ud->tchan_tpl.levels = 1;
4766 }
4767
4768 ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt),
4769 sizeof(unsigned long), GFP_KERNEL);
4770 ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans),
4771 GFP_KERNEL);
4772 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4773 sizeof(unsigned long), GFP_KERNEL);
4774 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4775 GFP_KERNEL);
4776 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4777 sizeof(unsigned long), GFP_KERNEL);
4778 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4779 GFP_KERNEL);
4780 /* BCDMA do not really have flows, but the driver expect it */
4781 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rchan_cnt),
4782 sizeof(unsigned long),
4783 GFP_KERNEL);
4784 ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows),
4785 GFP_KERNEL);
4786
4787 if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map ||
4788 !ud->rflow_in_use || !ud->bchans || !ud->tchans || !ud->rchans ||
4789 !ud->rflows)
4790 return -ENOMEM;
4791
4792 /* Get resource ranges from tisci */
4793 for (i = 0; i < RM_RANGE_LAST; i++) {
4794 if (i == RM_RANGE_RFLOW || i == RM_RANGE_TFLOW)
4795 continue;
4796 if (i == RM_RANGE_BCHAN && ud->bchan_cnt == 0)
4797 continue;
4798 if (i == RM_RANGE_TCHAN && ud->tchan_cnt == 0)
4799 continue;
4800 if (i == RM_RANGE_RCHAN && ud->rchan_cnt == 0)
4801 continue;
4802
4803 tisci_rm->rm_ranges[i] =
4804 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
4805 tisci_rm->tisci_dev_id,
4806 (char *)range_names[i]);
4807 }
4808
4809 irq_res.sets = 0;
4810
4811 /* bchan ranges */
4812 if (ud->bchan_cnt) {
4813 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
4814 if (IS_ERR(rm_res)) {
4815 bitmap_zero(ud->bchan_map, ud->bchan_cnt);
4816 irq_res.sets++;
4817 } else {
4818 bitmap_fill(ud->bchan_map, ud->bchan_cnt);
4819 for (i = 0; i < rm_res->sets; i++)
4820 udma_mark_resource_ranges(ud, ud->bchan_map,
4821 &rm_res->desc[i],
4822 "bchan");
4823 irq_res.sets += rm_res->sets;
4824 }
4825 }
4826
4827 /* tchan ranges */
4828 if (ud->tchan_cnt) {
4829 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4830 if (IS_ERR(rm_res)) {
4831 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4832 irq_res.sets += 2;
4833 } else {
4834 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4835 for (i = 0; i < rm_res->sets; i++)
4836 udma_mark_resource_ranges(ud, ud->tchan_map,
4837 &rm_res->desc[i],
4838 "tchan");
4839 irq_res.sets += rm_res->sets * 2;
4840 }
4841 }
4842
4843 /* rchan ranges */
4844 if (ud->rchan_cnt) {
4845 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4846 if (IS_ERR(rm_res)) {
4847 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4848 irq_res.sets += 2;
4849 } else {
4850 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4851 for (i = 0; i < rm_res->sets; i++)
4852 udma_mark_resource_ranges(ud, ud->rchan_map,
4853 &rm_res->desc[i],
4854 "rchan");
4855 irq_res.sets += rm_res->sets * 2;
4856 }
4857 }
4858
4859 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
4860 if (!irq_res.desc)
4861 return -ENOMEM;
4862 if (ud->bchan_cnt) {
4863 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
4864 if (IS_ERR(rm_res)) {
4865 irq_res.desc[0].start = oes->bcdma_bchan_ring;
4866 irq_res.desc[0].num = ud->bchan_cnt;
4867 i = 1;
4868 } else {
4869 for (i = 0; i < rm_res->sets; i++) {
4870 irq_res.desc[i].start = rm_res->desc[i].start +
4871 oes->bcdma_bchan_ring;
4872 irq_res.desc[i].num = rm_res->desc[i].num;
4873 }
4874 }
4875 } else {
4876 i = 0;
4877 }
4878
4879 if (ud->tchan_cnt) {
4880 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4881 if (IS_ERR(rm_res)) {
4882 irq_res.desc[i].start = oes->bcdma_tchan_data;
4883 irq_res.desc[i].num = ud->tchan_cnt;
4884 irq_res.desc[i + 1].start = oes->bcdma_tchan_ring;
4885 irq_res.desc[i + 1].num = ud->tchan_cnt;
4886 i += 2;
4887 } else {
4888 for (j = 0; j < rm_res->sets; j++, i += 2) {
4889 irq_res.desc[i].start = rm_res->desc[j].start +
4890 oes->bcdma_tchan_data;
4891 irq_res.desc[i].num = rm_res->desc[j].num;
4892
4893 irq_res.desc[i + 1].start = rm_res->desc[j].start +
4894 oes->bcdma_tchan_ring;
4895 irq_res.desc[i + 1].num = rm_res->desc[j].num;
4896 }
4897 }
4898 }
4899 if (ud->rchan_cnt) {
4900 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4901 if (IS_ERR(rm_res)) {
4902 irq_res.desc[i].start = oes->bcdma_rchan_data;
4903 irq_res.desc[i].num = ud->rchan_cnt;
4904 irq_res.desc[i + 1].start = oes->bcdma_rchan_ring;
4905 irq_res.desc[i + 1].num = ud->rchan_cnt;
4906 i += 2;
4907 } else {
4908 for (j = 0; j < rm_res->sets; j++, i += 2) {
4909 irq_res.desc[i].start = rm_res->desc[j].start +
4910 oes->bcdma_rchan_data;
4911 irq_res.desc[i].num = rm_res->desc[j].num;
4912
4913 irq_res.desc[i + 1].start = rm_res->desc[j].start +
4914 oes->bcdma_rchan_ring;
4915 irq_res.desc[i + 1].num = rm_res->desc[j].num;
4916 }
4917 }
4918 }
4919
4920 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
4921 kfree(irq_res.desc);
4922 if (ret) {
4923 dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
4924 return ret;
4925 }
4926
4927 return 0;
4928 }
4929
pktdma_setup_resources(struct udma_dev * ud)4930 static int pktdma_setup_resources(struct udma_dev *ud)
4931 {
4932 int ret, i, j;
4933 struct device *dev = ud->dev;
4934 struct ti_sci_resource *rm_res, irq_res;
4935 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4936 const struct udma_oes_offsets *oes = &ud->soc_data->oes;
4937 u32 cap3;
4938
4939 /* Set up the throughput level start indexes */
4940 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4941 if (UDMA_CAP3_UCHAN_CNT(cap3)) {
4942 ud->tchan_tpl.levels = 3;
4943 ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
4944 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4945 } else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
4946 ud->tchan_tpl.levels = 2;
4947 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4948 } else {
4949 ud->tchan_tpl.levels = 1;
4950 }
4951
4952 ud->rchan_tpl.levels = ud->tchan_tpl.levels;
4953 ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
4954 ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
4955
4956 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4957 sizeof(unsigned long), GFP_KERNEL);
4958 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4959 GFP_KERNEL);
4960 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4961 sizeof(unsigned long), GFP_KERNEL);
4962 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4963 GFP_KERNEL);
4964 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
4965 sizeof(unsigned long),
4966 GFP_KERNEL);
4967 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
4968 GFP_KERNEL);
4969 ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt),
4970 sizeof(unsigned long), GFP_KERNEL);
4971
4972 if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans ||
4973 !ud->rchans || !ud->rflows || !ud->rflow_in_use)
4974 return -ENOMEM;
4975
4976 /* Get resource ranges from tisci */
4977 for (i = 0; i < RM_RANGE_LAST; i++) {
4978 if (i == RM_RANGE_BCHAN)
4979 continue;
4980
4981 tisci_rm->rm_ranges[i] =
4982 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
4983 tisci_rm->tisci_dev_id,
4984 (char *)range_names[i]);
4985 }
4986
4987 /* tchan ranges */
4988 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4989 if (IS_ERR(rm_res)) {
4990 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4991 } else {
4992 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4993 for (i = 0; i < rm_res->sets; i++)
4994 udma_mark_resource_ranges(ud, ud->tchan_map,
4995 &rm_res->desc[i], "tchan");
4996 }
4997
4998 /* rchan ranges */
4999 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
5000 if (IS_ERR(rm_res)) {
5001 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
5002 } else {
5003 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
5004 for (i = 0; i < rm_res->sets; i++)
5005 udma_mark_resource_ranges(ud, ud->rchan_map,
5006 &rm_res->desc[i], "rchan");
5007 }
5008
5009 /* rflow ranges */
5010 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
5011 if (IS_ERR(rm_res)) {
5012 /* all rflows are assigned exclusively to Linux */
5013 bitmap_zero(ud->rflow_in_use, ud->rflow_cnt);
5014 irq_res.sets = 1;
5015 } else {
5016 bitmap_fill(ud->rflow_in_use, ud->rflow_cnt);
5017 for (i = 0; i < rm_res->sets; i++)
5018 udma_mark_resource_ranges(ud, ud->rflow_in_use,
5019 &rm_res->desc[i], "rflow");
5020 irq_res.sets = rm_res->sets;
5021 }
5022
5023 /* tflow ranges */
5024 rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
5025 if (IS_ERR(rm_res)) {
5026 /* all tflows are assigned exclusively to Linux */
5027 bitmap_zero(ud->tflow_map, ud->tflow_cnt);
5028 irq_res.sets++;
5029 } else {
5030 bitmap_fill(ud->tflow_map, ud->tflow_cnt);
5031 for (i = 0; i < rm_res->sets; i++)
5032 udma_mark_resource_ranges(ud, ud->tflow_map,
5033 &rm_res->desc[i], "tflow");
5034 irq_res.sets += rm_res->sets;
5035 }
5036
5037 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
5038 if (!irq_res.desc)
5039 return -ENOMEM;
5040 rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
5041 if (IS_ERR(rm_res)) {
5042 irq_res.desc[0].start = oes->pktdma_tchan_flow;
5043 irq_res.desc[0].num = ud->tflow_cnt;
5044 i = 1;
5045 } else {
5046 for (i = 0; i < rm_res->sets; i++) {
5047 irq_res.desc[i].start = rm_res->desc[i].start +
5048 oes->pktdma_tchan_flow;
5049 irq_res.desc[i].num = rm_res->desc[i].num;
5050 }
5051 }
5052 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
5053 if (IS_ERR(rm_res)) {
5054 irq_res.desc[i].start = oes->pktdma_rchan_flow;
5055 irq_res.desc[i].num = ud->rflow_cnt;
5056 } else {
5057 for (j = 0; j < rm_res->sets; j++, i++) {
5058 irq_res.desc[i].start = rm_res->desc[j].start +
5059 oes->pktdma_rchan_flow;
5060 irq_res.desc[i].num = rm_res->desc[j].num;
5061 }
5062 }
5063 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
5064 kfree(irq_res.desc);
5065 if (ret) {
5066 dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
5067 return ret;
5068 }
5069
5070 return 0;
5071 }
5072
setup_resources(struct udma_dev * ud)5073 static int setup_resources(struct udma_dev *ud)
5074 {
5075 struct device *dev = ud->dev;
5076 int ch_count, ret;
5077
5078 switch (ud->match_data->type) {
5079 case DMA_TYPE_UDMA:
5080 ret = udma_setup_resources(ud);
5081 break;
5082 case DMA_TYPE_BCDMA:
5083 ret = bcdma_setup_resources(ud);
5084 break;
5085 case DMA_TYPE_PKTDMA:
5086 ret = pktdma_setup_resources(ud);
5087 break;
5088 default:
5089 return -EINVAL;
5090 }
5091
5092 if (ret)
5093 return ret;
5094
5095 ch_count = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt;
5096 if (ud->bchan_cnt)
5097 ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt);
5098 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
5099 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
5100 if (!ch_count)
5101 return -ENODEV;
5102
5103 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
5104 GFP_KERNEL);
5105 if (!ud->channels)
5106 return -ENOMEM;
5107
5108 switch (ud->match_data->type) {
5109 case DMA_TYPE_UDMA:
5110 dev_info(dev,
5111 "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
5112 ch_count,
5113 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
5114 ud->tchan_cnt),
5115 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
5116 ud->rchan_cnt),
5117 ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
5118 ud->rflow_cnt));
5119 break;
5120 case DMA_TYPE_BCDMA:
5121 dev_info(dev,
5122 "Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n",
5123 ch_count,
5124 ud->bchan_cnt - bitmap_weight(ud->bchan_map,
5125 ud->bchan_cnt),
5126 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
5127 ud->tchan_cnt),
5128 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
5129 ud->rchan_cnt));
5130 break;
5131 case DMA_TYPE_PKTDMA:
5132 dev_info(dev,
5133 "Channels: %d (tchan: %u, rchan: %u)\n",
5134 ch_count,
5135 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
5136 ud->tchan_cnt),
5137 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
5138 ud->rchan_cnt));
5139 break;
5140 default:
5141 break;
5142 }
5143
5144 return ch_count;
5145 }
5146
udma_setup_rx_flush(struct udma_dev * ud)5147 static int udma_setup_rx_flush(struct udma_dev *ud)
5148 {
5149 struct udma_rx_flush *rx_flush = &ud->rx_flush;
5150 struct cppi5_desc_hdr_t *tr_desc;
5151 struct cppi5_tr_type1_t *tr_req;
5152 struct cppi5_host_desc_t *desc;
5153 struct device *dev = ud->dev;
5154 struct udma_hwdesc *hwdesc;
5155 size_t tr_size;
5156
5157 /* Allocate 1K buffer for discarded data on RX channel teardown */
5158 rx_flush->buffer_size = SZ_1K;
5159 rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size,
5160 GFP_KERNEL);
5161 if (!rx_flush->buffer_vaddr)
5162 return -ENOMEM;
5163
5164 rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr,
5165 rx_flush->buffer_size,
5166 DMA_TO_DEVICE);
5167 if (dma_mapping_error(dev, rx_flush->buffer_paddr))
5168 return -ENOMEM;
5169
5170 /* Set up descriptor to be used for TR mode */
5171 hwdesc = &rx_flush->hwdescs[0];
5172 tr_size = sizeof(struct cppi5_tr_type1_t);
5173 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1);
5174 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
5175 ud->desc_align);
5176
5177 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
5178 GFP_KERNEL);
5179 if (!hwdesc->cppi5_desc_vaddr)
5180 return -ENOMEM;
5181
5182 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
5183 hwdesc->cppi5_desc_size,
5184 DMA_TO_DEVICE);
5185 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
5186 return -ENOMEM;
5187
5188 /* Start of the TR req records */
5189 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
5190 /* Start address of the TR response array */
5191 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size;
5192
5193 tr_desc = hwdesc->cppi5_desc_vaddr;
5194 cppi5_trdesc_init(tr_desc, 1, tr_size, 0, 0);
5195 cppi5_desc_set_pktids(tr_desc, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
5196 cppi5_desc_set_retpolicy(tr_desc, 0, 0);
5197
5198 tr_req = hwdesc->tr_req_base;
5199 cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false,
5200 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
5201 cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT);
5202
5203 tr_req->addr = rx_flush->buffer_paddr;
5204 tr_req->icnt0 = rx_flush->buffer_size;
5205 tr_req->icnt1 = 1;
5206
5207 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
5208 hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
5209
5210 /* Set up descriptor to be used for packet mode */
5211 hwdesc = &rx_flush->hwdescs[1];
5212 hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
5213 CPPI5_INFO0_HDESC_EPIB_SIZE +
5214 CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE,
5215 ud->desc_align);
5216
5217 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
5218 GFP_KERNEL);
5219 if (!hwdesc->cppi5_desc_vaddr)
5220 return -ENOMEM;
5221
5222 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
5223 hwdesc->cppi5_desc_size,
5224 DMA_TO_DEVICE);
5225 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
5226 return -ENOMEM;
5227
5228 desc = hwdesc->cppi5_desc_vaddr;
5229 cppi5_hdesc_init(desc, 0, 0);
5230 cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
5231 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0);
5232
5233 cppi5_hdesc_attach_buf(desc,
5234 rx_flush->buffer_paddr, rx_flush->buffer_size,
5235 rx_flush->buffer_paddr, rx_flush->buffer_size);
5236
5237 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
5238 hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
5239 return 0;
5240 }
5241
5242 #ifdef CONFIG_DEBUG_FS
udma_dbg_summary_show_chan(struct seq_file * s,struct dma_chan * chan)5243 static void udma_dbg_summary_show_chan(struct seq_file *s,
5244 struct dma_chan *chan)
5245 {
5246 struct udma_chan *uc = to_udma_chan(chan);
5247 struct udma_chan_config *ucc = &uc->config;
5248
5249 seq_printf(s, " %-13s| %s", dma_chan_name(chan),
5250 chan->dbg_client_name ?: "in-use");
5251 if (ucc->tr_trigger_type)
5252 seq_puts(s, " (triggered, ");
5253 else
5254 seq_printf(s, " (%s, ",
5255 dmaengine_get_direction_text(uc->config.dir));
5256
5257 switch (uc->config.dir) {
5258 case DMA_MEM_TO_MEM:
5259 if (uc->ud->match_data->type == DMA_TYPE_BCDMA) {
5260 seq_printf(s, "bchan%d)\n", uc->bchan->id);
5261 return;
5262 }
5263
5264 seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id,
5265 ucc->src_thread, ucc->dst_thread);
5266 break;
5267 case DMA_DEV_TO_MEM:
5268 seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id,
5269 ucc->src_thread, ucc->dst_thread);
5270 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA)
5271 seq_printf(s, "rflow%d, ", uc->rflow->id);
5272 break;
5273 case DMA_MEM_TO_DEV:
5274 seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id,
5275 ucc->src_thread, ucc->dst_thread);
5276 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA)
5277 seq_printf(s, "tflow%d, ", uc->tchan->tflow_id);
5278 break;
5279 default:
5280 seq_printf(s, ")\n");
5281 return;
5282 }
5283
5284 if (ucc->ep_type == PSIL_EP_NATIVE) {
5285 seq_printf(s, "PSI-L Native");
5286 if (ucc->metadata_size) {
5287 seq_printf(s, "[%s", ucc->needs_epib ? " EPIB" : "");
5288 if (ucc->psd_size)
5289 seq_printf(s, " PSDsize:%u", ucc->psd_size);
5290 seq_printf(s, " ]");
5291 }
5292 } else {
5293 seq_printf(s, "PDMA");
5294 if (ucc->enable_acc32 || ucc->enable_burst)
5295 seq_printf(s, "[%s%s ]",
5296 ucc->enable_acc32 ? " ACC32" : "",
5297 ucc->enable_burst ? " BURST" : "");
5298 }
5299
5300 seq_printf(s, ", %s)\n", ucc->pkt_mode ? "Packet mode" : "TR mode");
5301 }
5302
udma_dbg_summary_show(struct seq_file * s,struct dma_device * dma_dev)5303 static void udma_dbg_summary_show(struct seq_file *s,
5304 struct dma_device *dma_dev)
5305 {
5306 struct dma_chan *chan;
5307
5308 list_for_each_entry(chan, &dma_dev->channels, device_node) {
5309 if (chan->client_count)
5310 udma_dbg_summary_show_chan(s, chan);
5311 }
5312 }
5313 #endif /* CONFIG_DEBUG_FS */
5314
udma_get_copy_align(struct udma_dev * ud)5315 static enum dmaengine_alignment udma_get_copy_align(struct udma_dev *ud)
5316 {
5317 const struct udma_match_data *match_data = ud->match_data;
5318 u8 tpl;
5319
5320 if (!match_data->enable_memcpy_support)
5321 return DMAENGINE_ALIGN_8_BYTES;
5322
5323 /* Get the highest TPL level the device supports for memcpy */
5324 if (ud->bchan_cnt)
5325 tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, 0);
5326 else if (ud->tchan_cnt)
5327 tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, 0);
5328 else
5329 return DMAENGINE_ALIGN_8_BYTES;
5330
5331 switch (match_data->burst_size[tpl]) {
5332 case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES:
5333 return DMAENGINE_ALIGN_256_BYTES;
5334 case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES:
5335 return DMAENGINE_ALIGN_128_BYTES;
5336 case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES:
5337 fallthrough;
5338 default:
5339 return DMAENGINE_ALIGN_64_BYTES;
5340 }
5341 }
5342
5343 #define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
5344 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
5345 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
5346 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
5347 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
5348
udma_probe(struct platform_device * pdev)5349 static int udma_probe(struct platform_device *pdev)
5350 {
5351 struct device_node *navss_node = pdev->dev.parent->of_node;
5352 const struct soc_device_attribute *soc;
5353 struct device *dev = &pdev->dev;
5354 struct udma_dev *ud;
5355 const struct of_device_id *match;
5356 int i, ret;
5357 int ch_count;
5358
5359 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
5360 if (ret)
5361 dev_err(dev, "failed to set dma mask stuff\n");
5362
5363 ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL);
5364 if (!ud)
5365 return -ENOMEM;
5366
5367 match = of_match_node(udma_of_match, dev->of_node);
5368 if (!match) {
5369 dev_err(dev, "No compatible match found\n");
5370 return -ENODEV;
5371 }
5372 ud->match_data = match->data;
5373
5374 ud->soc_data = ud->match_data->soc_data;
5375 if (!ud->soc_data) {
5376 soc = soc_device_match(k3_soc_devices);
5377 if (!soc) {
5378 dev_err(dev, "No compatible SoC found\n");
5379 return -ENODEV;
5380 }
5381 ud->soc_data = soc->data;
5382 }
5383
5384 ret = udma_get_mmrs(pdev, ud);
5385 if (ret)
5386 return ret;
5387
5388 ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci");
5389 if (IS_ERR(ud->tisci_rm.tisci))
5390 return PTR_ERR(ud->tisci_rm.tisci);
5391
5392 ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id",
5393 &ud->tisci_rm.tisci_dev_id);
5394 if (ret) {
5395 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
5396 return ret;
5397 }
5398 pdev->id = ud->tisci_rm.tisci_dev_id;
5399
5400 ret = of_property_read_u32(navss_node, "ti,sci-dev-id",
5401 &ud->tisci_rm.tisci_navss_dev_id);
5402 if (ret) {
5403 dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret);
5404 return ret;
5405 }
5406
5407 if (ud->match_data->type == DMA_TYPE_UDMA) {
5408 ret = of_property_read_u32(dev->of_node, "ti,udma-atype",
5409 &ud->atype);
5410 if (!ret && ud->atype > 2) {
5411 dev_err(dev, "Invalid atype: %u\n", ud->atype);
5412 return -EINVAL;
5413 }
5414 } else {
5415 ret = of_property_read_u32(dev->of_node, "ti,asel",
5416 &ud->asel);
5417 if (!ret && ud->asel > 15) {
5418 dev_err(dev, "Invalid asel: %u\n", ud->asel);
5419 return -EINVAL;
5420 }
5421 }
5422
5423 ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
5424 ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
5425
5426 if (ud->match_data->type == DMA_TYPE_UDMA) {
5427 ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
5428 } else {
5429 struct k3_ringacc_init_data ring_init_data;
5430
5431 ring_init_data.tisci = ud->tisci_rm.tisci;
5432 ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id;
5433 if (ud->match_data->type == DMA_TYPE_BCDMA) {
5434 ring_init_data.num_rings = ud->bchan_cnt +
5435 ud->tchan_cnt +
5436 ud->rchan_cnt;
5437 } else {
5438 ring_init_data.num_rings = ud->rflow_cnt +
5439 ud->tflow_cnt;
5440 }
5441
5442 ud->ringacc = k3_ringacc_dmarings_init(pdev, &ring_init_data);
5443 }
5444
5445 if (IS_ERR(ud->ringacc))
5446 return PTR_ERR(ud->ringacc);
5447
5448 dev->msi.domain = of_msi_get_domain(dev, dev->of_node,
5449 DOMAIN_BUS_TI_SCI_INTA_MSI);
5450 if (!dev->msi.domain) {
5451 return -EPROBE_DEFER;
5452 }
5453
5454 dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
5455 /* cyclic operation is not supported via PKTDMA */
5456 if (ud->match_data->type != DMA_TYPE_PKTDMA) {
5457 dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
5458 ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
5459 }
5460
5461 ud->ddev.device_config = udma_slave_config;
5462 ud->ddev.device_prep_slave_sg = udma_prep_slave_sg;
5463 ud->ddev.device_issue_pending = udma_issue_pending;
5464 ud->ddev.device_tx_status = udma_tx_status;
5465 ud->ddev.device_pause = udma_pause;
5466 ud->ddev.device_resume = udma_resume;
5467 ud->ddev.device_terminate_all = udma_terminate_all;
5468 ud->ddev.device_synchronize = udma_synchronize;
5469 #ifdef CONFIG_DEBUG_FS
5470 ud->ddev.dbg_summary_show = udma_dbg_summary_show;
5471 #endif
5472
5473 switch (ud->match_data->type) {
5474 case DMA_TYPE_UDMA:
5475 ud->ddev.device_alloc_chan_resources =
5476 udma_alloc_chan_resources;
5477 break;
5478 case DMA_TYPE_BCDMA:
5479 ud->ddev.device_alloc_chan_resources =
5480 bcdma_alloc_chan_resources;
5481 ud->ddev.device_router_config = bcdma_router_config;
5482 break;
5483 case DMA_TYPE_PKTDMA:
5484 ud->ddev.device_alloc_chan_resources =
5485 pktdma_alloc_chan_resources;
5486 break;
5487 default:
5488 return -EINVAL;
5489 }
5490 ud->ddev.device_free_chan_resources = udma_free_chan_resources;
5491
5492 ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
5493 ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
5494 ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
5495 ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
5496 ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
5497 DESC_METADATA_ENGINE;
5498 if (ud->match_data->enable_memcpy_support &&
5499 !(ud->match_data->type == DMA_TYPE_BCDMA && ud->bchan_cnt == 0)) {
5500 dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask);
5501 ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy;
5502 ud->ddev.directions |= BIT(DMA_MEM_TO_MEM);
5503 }
5504
5505 ud->ddev.dev = dev;
5506 ud->dev = dev;
5507 ud->psil_base = ud->match_data->psil_base;
5508
5509 INIT_LIST_HEAD(&ud->ddev.channels);
5510 INIT_LIST_HEAD(&ud->desc_to_purge);
5511
5512 ch_count = setup_resources(ud);
5513 if (ch_count <= 0)
5514 return ch_count;
5515
5516 spin_lock_init(&ud->lock);
5517 INIT_WORK(&ud->purge_work, udma_purge_desc_work);
5518
5519 ud->desc_align = 64;
5520 if (ud->desc_align < dma_get_cache_alignment())
5521 ud->desc_align = dma_get_cache_alignment();
5522
5523 ret = udma_setup_rx_flush(ud);
5524 if (ret)
5525 return ret;
5526
5527 for (i = 0; i < ud->bchan_cnt; i++) {
5528 struct udma_bchan *bchan = &ud->bchans[i];
5529
5530 bchan->id = i;
5531 bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000;
5532 }
5533
5534 for (i = 0; i < ud->tchan_cnt; i++) {
5535 struct udma_tchan *tchan = &ud->tchans[i];
5536
5537 tchan->id = i;
5538 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000;
5539 }
5540
5541 for (i = 0; i < ud->rchan_cnt; i++) {
5542 struct udma_rchan *rchan = &ud->rchans[i];
5543
5544 rchan->id = i;
5545 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000;
5546 }
5547
5548 for (i = 0; i < ud->rflow_cnt; i++) {
5549 struct udma_rflow *rflow = &ud->rflows[i];
5550
5551 rflow->id = i;
5552 }
5553
5554 for (i = 0; i < ch_count; i++) {
5555 struct udma_chan *uc = &ud->channels[i];
5556
5557 uc->ud = ud;
5558 uc->vc.desc_free = udma_desc_free;
5559 uc->id = i;
5560 uc->bchan = NULL;
5561 uc->tchan = NULL;
5562 uc->rchan = NULL;
5563 uc->config.remote_thread_id = -1;
5564 uc->config.mapped_channel_id = -1;
5565 uc->config.default_flow_id = -1;
5566 uc->config.dir = DMA_MEM_TO_MEM;
5567 uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
5568 dev_name(dev), i);
5569
5570 vchan_init(&uc->vc, &ud->ddev);
5571 /* Use custom vchan completion handling */
5572 tasklet_setup(&uc->vc.task, udma_vchan_complete);
5573 init_completion(&uc->teardown_completed);
5574 INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion);
5575 }
5576
5577 /* Configure the copy_align to the maximum burst size the device supports */
5578 ud->ddev.copy_align = udma_get_copy_align(ud);
5579
5580 ret = dma_async_device_register(&ud->ddev);
5581 if (ret) {
5582 dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
5583 return ret;
5584 }
5585
5586 platform_set_drvdata(pdev, ud);
5587
5588 ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud);
5589 if (ret) {
5590 dev_err(dev, "failed to register of_dma controller\n");
5591 dma_async_device_unregister(&ud->ddev);
5592 }
5593
5594 return ret;
5595 }
5596
udma_pm_suspend(struct device * dev)5597 static int __maybe_unused udma_pm_suspend(struct device *dev)
5598 {
5599 struct udma_dev *ud = dev_get_drvdata(dev);
5600 struct dma_device *dma_dev = &ud->ddev;
5601 struct dma_chan *chan;
5602 struct udma_chan *uc;
5603
5604 list_for_each_entry(chan, &dma_dev->channels, device_node) {
5605 if (chan->client_count) {
5606 uc = to_udma_chan(chan);
5607 /* backup the channel configuration */
5608 memcpy(&uc->backup_config, &uc->config,
5609 sizeof(struct udma_chan_config));
5610 dev_dbg(dev, "Suspending channel %s\n",
5611 dma_chan_name(chan));
5612 ud->ddev.device_free_chan_resources(chan);
5613 }
5614 }
5615
5616 return 0;
5617 }
5618
udma_pm_resume(struct device * dev)5619 static int __maybe_unused udma_pm_resume(struct device *dev)
5620 {
5621 struct udma_dev *ud = dev_get_drvdata(dev);
5622 struct dma_device *dma_dev = &ud->ddev;
5623 struct dma_chan *chan;
5624 struct udma_chan *uc;
5625 int ret;
5626
5627 list_for_each_entry(chan, &dma_dev->channels, device_node) {
5628 if (chan->client_count) {
5629 uc = to_udma_chan(chan);
5630 /* restore the channel configuration */
5631 memcpy(&uc->config, &uc->backup_config,
5632 sizeof(struct udma_chan_config));
5633 dev_dbg(dev, "Resuming channel %s\n",
5634 dma_chan_name(chan));
5635 ret = ud->ddev.device_alloc_chan_resources(chan);
5636 if (ret)
5637 return ret;
5638 }
5639 }
5640
5641 return 0;
5642 }
5643
5644 static const struct dev_pm_ops udma_pm_ops = {
5645 SET_LATE_SYSTEM_SLEEP_PM_OPS(udma_pm_suspend, udma_pm_resume)
5646 };
5647
5648 static struct platform_driver udma_driver = {
5649 .driver = {
5650 .name = "ti-udma",
5651 .of_match_table = udma_of_match,
5652 .suppress_bind_attrs = true,
5653 .pm = &udma_pm_ops,
5654 },
5655 .probe = udma_probe,
5656 };
5657
5658 module_platform_driver(udma_driver);
5659 MODULE_DESCRIPTION("Texas Instruments UDMA support");
5660 MODULE_LICENSE("GPL v2");
5661
5662 /* Private interfaces to UDMA */
5663 #include "k3-udma-private.c"
5664