1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com 4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> 5 */ 6 7 #include <linux/kernel.h> 8 #include <linux/delay.h> 9 #include <linux/dmaengine.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/dmapool.h> 12 #include <linux/err.h> 13 #include <linux/init.h> 14 #include <linux/interrupt.h> 15 #include <linux/list.h> 16 #include <linux/platform_device.h> 17 #include <linux/slab.h> 18 #include <linux/spinlock.h> 19 #include <linux/of.h> 20 #include <linux/of_dma.h> 21 #include <linux/of_device.h> 22 #include <linux/of_irq.h> 23 #include <linux/workqueue.h> 24 #include <linux/completion.h> 25 #include <linux/soc/ti/k3-ringacc.h> 26 #include <linux/soc/ti/ti_sci_protocol.h> 27 #include <linux/soc/ti/ti_sci_inta_msi.h> 28 #include <linux/dma/ti-cppi5.h> 29 30 #include "../virt-dma.h" 31 #include "k3-udma.h" 32 #include "k3-psil-priv.h" 33 34 struct udma_static_tr { 35 u8 elsize; /* RPSTR0 */ 36 u16 elcnt; /* RPSTR0 */ 37 u16 bstcnt; /* RPSTR1 */ 38 }; 39 40 #define K3_UDMA_MAX_RFLOWS 1024 41 #define K3_UDMA_DEFAULT_RING_SIZE 16 42 43 /* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */ 44 #define UDMA_RFLOW_SRCTAG_NONE 0 45 #define UDMA_RFLOW_SRCTAG_CFG_TAG 1 46 #define UDMA_RFLOW_SRCTAG_FLOW_ID 2 47 #define UDMA_RFLOW_SRCTAG_SRC_TAG 4 48 49 #define UDMA_RFLOW_DSTTAG_NONE 0 50 #define UDMA_RFLOW_DSTTAG_CFG_TAG 1 51 #define UDMA_RFLOW_DSTTAG_FLOW_ID 2 52 #define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4 53 #define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5 54 55 struct udma_chan; 56 57 enum udma_mmr { 58 MMR_GCFG = 0, 59 MMR_RCHANRT, 60 MMR_TCHANRT, 61 MMR_LAST, 62 }; 63 64 static const char * const mmr_names[] = { "gcfg", "rchanrt", "tchanrt" }; 65 66 struct udma_tchan { 67 void __iomem *reg_rt; 68 69 int id; 70 struct k3_ring *t_ring; /* Transmit ring */ 71 struct k3_ring *tc_ring; /* Transmit Completion ring */ 72 }; 73 74 struct udma_rflow { 75 int id; 76 struct k3_ring *fd_ring; /* Free Descriptor ring */ 77 struct k3_ring *r_ring; /* Receive ring */ 78 }; 79 80 struct udma_rchan { 81 void __iomem *reg_rt; 82 83 int id; 84 }; 85 86 #define UDMA_FLAG_PDMA_ACC32 BIT(0) 87 #define UDMA_FLAG_PDMA_BURST BIT(1) 88 89 struct udma_match_data { 90 u32 psil_base; 91 bool enable_memcpy_support; 92 u32 flags; 93 u32 statictr_z_mask; 94 u32 rchan_oes_offset; 95 }; 96 97 struct udma_hwdesc { 98 size_t cppi5_desc_size; 99 void *cppi5_desc_vaddr; 100 dma_addr_t cppi5_desc_paddr; 101 102 /* TR descriptor internal pointers */ 103 void *tr_req_base; 104 struct cppi5_tr_resp_t *tr_resp_base; 105 }; 106 107 struct udma_rx_flush { 108 struct udma_hwdesc hwdescs[2]; 109 110 size_t buffer_size; 111 void *buffer_vaddr; 112 dma_addr_t buffer_paddr; 113 }; 114 115 struct udma_dev { 116 struct dma_device ddev; 117 struct device *dev; 118 void __iomem *mmrs[MMR_LAST]; 119 const struct udma_match_data *match_data; 120 121 u8 tpl_levels; 122 u32 tpl_start_idx[3]; 123 124 size_t desc_align; /* alignment to use for descriptors */ 125 126 struct udma_tisci_rm tisci_rm; 127 128 struct k3_ringacc *ringacc; 129 130 struct work_struct purge_work; 131 struct list_head desc_to_purge; 132 spinlock_t lock; 133 134 struct udma_rx_flush rx_flush; 135 136 int tchan_cnt; 137 int echan_cnt; 138 int rchan_cnt; 139 int rflow_cnt; 140 unsigned long *tchan_map; 141 unsigned long *rchan_map; 142 unsigned long *rflow_gp_map; 143 unsigned long *rflow_gp_map_allocated; 144 unsigned long *rflow_in_use; 145 146 struct udma_tchan *tchans; 147 struct udma_rchan *rchans; 148 struct udma_rflow *rflows; 149 150 struct udma_chan *channels; 151 u32 psil_base; 152 u32 atype; 153 }; 154 155 struct udma_desc { 156 struct virt_dma_desc vd; 157 158 bool terminated; 159 160 enum dma_transfer_direction dir; 161 162 struct udma_static_tr static_tr; 163 u32 residue; 164 165 unsigned int sglen; 166 unsigned int desc_idx; /* Only used for cyclic in packet mode */ 167 unsigned int tr_idx; 168 169 u32 metadata_size; 170 void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */ 171 172 unsigned int hwdesc_count; 173 struct udma_hwdesc hwdesc[0]; 174 }; 175 176 enum udma_chan_state { 177 UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */ 178 UDMA_CHAN_IS_ACTIVE, /* Normal operation */ 179 UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */ 180 }; 181 182 struct udma_tx_drain { 183 struct delayed_work work; 184 ktime_t tstamp; 185 u32 residue; 186 }; 187 188 struct udma_chan_config { 189 bool pkt_mode; /* TR or packet */ 190 bool needs_epib; /* EPIB is needed for the communication or not */ 191 u32 psd_size; /* size of Protocol Specific Data */ 192 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */ 193 u32 hdesc_size; /* Size of a packet descriptor in packet mode */ 194 bool notdpkt; /* Suppress sending TDC packet */ 195 int remote_thread_id; 196 u32 atype; 197 u32 src_thread; 198 u32 dst_thread; 199 enum psil_endpoint_type ep_type; 200 bool enable_acc32; 201 bool enable_burst; 202 enum udma_tp_level channel_tpl; /* Channel Throughput Level */ 203 204 enum dma_transfer_direction dir; 205 }; 206 207 struct udma_chan { 208 struct virt_dma_chan vc; 209 struct dma_slave_config cfg; 210 struct udma_dev *ud; 211 struct udma_desc *desc; 212 struct udma_desc *terminated_desc; 213 struct udma_static_tr static_tr; 214 char *name; 215 216 struct udma_tchan *tchan; 217 struct udma_rchan *rchan; 218 struct udma_rflow *rflow; 219 220 bool psil_paired; 221 222 int irq_num_ring; 223 int irq_num_udma; 224 225 bool cyclic; 226 bool paused; 227 228 enum udma_chan_state state; 229 struct completion teardown_completed; 230 231 struct udma_tx_drain tx_drain; 232 233 u32 bcnt; /* number of bytes completed since the start of the channel */ 234 235 /* Channel configuration parameters */ 236 struct udma_chan_config config; 237 238 /* dmapool for packet mode descriptors */ 239 bool use_dma_pool; 240 struct dma_pool *hdesc_pool; 241 242 u32 id; 243 }; 244 245 static inline struct udma_dev *to_udma_dev(struct dma_device *d) 246 { 247 return container_of(d, struct udma_dev, ddev); 248 } 249 250 static inline struct udma_chan *to_udma_chan(struct dma_chan *c) 251 { 252 return container_of(c, struct udma_chan, vc.chan); 253 } 254 255 static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t) 256 { 257 return container_of(t, struct udma_desc, vd.tx); 258 } 259 260 /* Generic register access functions */ 261 static inline u32 udma_read(void __iomem *base, int reg) 262 { 263 return readl(base + reg); 264 } 265 266 static inline void udma_write(void __iomem *base, int reg, u32 val) 267 { 268 writel(val, base + reg); 269 } 270 271 static inline void udma_update_bits(void __iomem *base, int reg, 272 u32 mask, u32 val) 273 { 274 u32 tmp, orig; 275 276 orig = readl(base + reg); 277 tmp = orig & ~mask; 278 tmp |= (val & mask); 279 280 if (tmp != orig) 281 writel(tmp, base + reg); 282 } 283 284 /* TCHANRT */ 285 static inline u32 udma_tchanrt_read(struct udma_chan *uc, int reg) 286 { 287 if (!uc->tchan) 288 return 0; 289 return udma_read(uc->tchan->reg_rt, reg); 290 } 291 292 static inline void udma_tchanrt_write(struct udma_chan *uc, int reg, u32 val) 293 { 294 if (!uc->tchan) 295 return; 296 udma_write(uc->tchan->reg_rt, reg, val); 297 } 298 299 static inline void udma_tchanrt_update_bits(struct udma_chan *uc, int reg, 300 u32 mask, u32 val) 301 { 302 if (!uc->tchan) 303 return; 304 udma_update_bits(uc->tchan->reg_rt, reg, mask, val); 305 } 306 307 /* RCHANRT */ 308 static inline u32 udma_rchanrt_read(struct udma_chan *uc, int reg) 309 { 310 if (!uc->rchan) 311 return 0; 312 return udma_read(uc->rchan->reg_rt, reg); 313 } 314 315 static inline void udma_rchanrt_write(struct udma_chan *uc, int reg, u32 val) 316 { 317 if (!uc->rchan) 318 return; 319 udma_write(uc->rchan->reg_rt, reg, val); 320 } 321 322 static inline void udma_rchanrt_update_bits(struct udma_chan *uc, int reg, 323 u32 mask, u32 val) 324 { 325 if (!uc->rchan) 326 return; 327 udma_update_bits(uc->rchan->reg_rt, reg, mask, val); 328 } 329 330 static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread) 331 { 332 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 333 334 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; 335 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci, 336 tisci_rm->tisci_navss_dev_id, 337 src_thread, dst_thread); 338 } 339 340 static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread, 341 u32 dst_thread) 342 { 343 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 344 345 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; 346 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci, 347 tisci_rm->tisci_navss_dev_id, 348 src_thread, dst_thread); 349 } 350 351 static void udma_reset_uchan(struct udma_chan *uc) 352 { 353 memset(&uc->config, 0, sizeof(uc->config)); 354 uc->config.remote_thread_id = -1; 355 uc->state = UDMA_CHAN_IS_IDLE; 356 } 357 358 static void udma_dump_chan_stdata(struct udma_chan *uc) 359 { 360 struct device *dev = uc->ud->dev; 361 u32 offset; 362 int i; 363 364 if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) { 365 dev_dbg(dev, "TCHAN State data:\n"); 366 for (i = 0; i < 32; i++) { 367 offset = UDMA_CHAN_RT_STDATA_REG + i * 4; 368 dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i, 369 udma_tchanrt_read(uc, offset)); 370 } 371 } 372 373 if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) { 374 dev_dbg(dev, "RCHAN State data:\n"); 375 for (i = 0; i < 32; i++) { 376 offset = UDMA_CHAN_RT_STDATA_REG + i * 4; 377 dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i, 378 udma_rchanrt_read(uc, offset)); 379 } 380 } 381 } 382 383 static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d, 384 int idx) 385 { 386 return d->hwdesc[idx].cppi5_desc_paddr; 387 } 388 389 static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx) 390 { 391 return d->hwdesc[idx].cppi5_desc_vaddr; 392 } 393 394 static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc, 395 dma_addr_t paddr) 396 { 397 struct udma_desc *d = uc->terminated_desc; 398 399 if (d) { 400 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, 401 d->desc_idx); 402 403 if (desc_paddr != paddr) 404 d = NULL; 405 } 406 407 if (!d) { 408 d = uc->desc; 409 if (d) { 410 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, 411 d->desc_idx); 412 413 if (desc_paddr != paddr) 414 d = NULL; 415 } 416 } 417 418 return d; 419 } 420 421 static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d) 422 { 423 if (uc->use_dma_pool) { 424 int i; 425 426 for (i = 0; i < d->hwdesc_count; i++) { 427 if (!d->hwdesc[i].cppi5_desc_vaddr) 428 continue; 429 430 dma_pool_free(uc->hdesc_pool, 431 d->hwdesc[i].cppi5_desc_vaddr, 432 d->hwdesc[i].cppi5_desc_paddr); 433 434 d->hwdesc[i].cppi5_desc_vaddr = NULL; 435 } 436 } else if (d->hwdesc[0].cppi5_desc_vaddr) { 437 struct udma_dev *ud = uc->ud; 438 439 dma_free_coherent(ud->dev, d->hwdesc[0].cppi5_desc_size, 440 d->hwdesc[0].cppi5_desc_vaddr, 441 d->hwdesc[0].cppi5_desc_paddr); 442 443 d->hwdesc[0].cppi5_desc_vaddr = NULL; 444 } 445 } 446 447 static void udma_purge_desc_work(struct work_struct *work) 448 { 449 struct udma_dev *ud = container_of(work, typeof(*ud), purge_work); 450 struct virt_dma_desc *vd, *_vd; 451 unsigned long flags; 452 LIST_HEAD(head); 453 454 spin_lock_irqsave(&ud->lock, flags); 455 list_splice_tail_init(&ud->desc_to_purge, &head); 456 spin_unlock_irqrestore(&ud->lock, flags); 457 458 list_for_each_entry_safe(vd, _vd, &head, node) { 459 struct udma_chan *uc = to_udma_chan(vd->tx.chan); 460 struct udma_desc *d = to_udma_desc(&vd->tx); 461 462 udma_free_hwdesc(uc, d); 463 list_del(&vd->node); 464 kfree(d); 465 } 466 467 /* If more to purge, schedule the work again */ 468 if (!list_empty(&ud->desc_to_purge)) 469 schedule_work(&ud->purge_work); 470 } 471 472 static void udma_desc_free(struct virt_dma_desc *vd) 473 { 474 struct udma_dev *ud = to_udma_dev(vd->tx.chan->device); 475 struct udma_chan *uc = to_udma_chan(vd->tx.chan); 476 struct udma_desc *d = to_udma_desc(&vd->tx); 477 unsigned long flags; 478 479 if (uc->terminated_desc == d) 480 uc->terminated_desc = NULL; 481 482 if (uc->use_dma_pool) { 483 udma_free_hwdesc(uc, d); 484 kfree(d); 485 return; 486 } 487 488 spin_lock_irqsave(&ud->lock, flags); 489 list_add_tail(&vd->node, &ud->desc_to_purge); 490 spin_unlock_irqrestore(&ud->lock, flags); 491 492 schedule_work(&ud->purge_work); 493 } 494 495 static bool udma_is_chan_running(struct udma_chan *uc) 496 { 497 u32 trt_ctl = 0; 498 u32 rrt_ctl = 0; 499 500 if (uc->tchan) 501 trt_ctl = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG); 502 if (uc->rchan) 503 rrt_ctl = udma_rchanrt_read(uc, UDMA_CHAN_RT_CTL_REG); 504 505 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN) 506 return true; 507 508 return false; 509 } 510 511 static bool udma_is_chan_paused(struct udma_chan *uc) 512 { 513 u32 val, pause_mask; 514 515 switch (uc->config.dir) { 516 case DMA_DEV_TO_MEM: 517 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG); 518 pause_mask = UDMA_PEER_RT_EN_PAUSE; 519 break; 520 case DMA_MEM_TO_DEV: 521 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG); 522 pause_mask = UDMA_PEER_RT_EN_PAUSE; 523 break; 524 case DMA_MEM_TO_MEM: 525 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG); 526 pause_mask = UDMA_CHAN_RT_CTL_PAUSE; 527 break; 528 default: 529 return false; 530 } 531 532 if (val & pause_mask) 533 return true; 534 535 return false; 536 } 537 538 static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc) 539 { 540 return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr; 541 } 542 543 static int udma_push_to_ring(struct udma_chan *uc, int idx) 544 { 545 struct udma_desc *d = uc->desc; 546 struct k3_ring *ring = NULL; 547 dma_addr_t paddr; 548 549 switch (uc->config.dir) { 550 case DMA_DEV_TO_MEM: 551 ring = uc->rflow->fd_ring; 552 break; 553 case DMA_MEM_TO_DEV: 554 case DMA_MEM_TO_MEM: 555 ring = uc->tchan->t_ring; 556 break; 557 default: 558 return -EINVAL; 559 } 560 561 /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */ 562 if (idx == -1) { 563 paddr = udma_get_rx_flush_hwdesc_paddr(uc); 564 } else { 565 paddr = udma_curr_cppi5_desc_paddr(d, idx); 566 567 wmb(); /* Ensure that writes are not moved over this point */ 568 } 569 570 return k3_ringacc_ring_push(ring, &paddr); 571 } 572 573 static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr) 574 { 575 if (uc->config.dir != DMA_DEV_TO_MEM) 576 return false; 577 578 if (addr == udma_get_rx_flush_hwdesc_paddr(uc)) 579 return true; 580 581 return false; 582 } 583 584 static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr) 585 { 586 struct k3_ring *ring = NULL; 587 int ret; 588 589 switch (uc->config.dir) { 590 case DMA_DEV_TO_MEM: 591 ring = uc->rflow->r_ring; 592 break; 593 case DMA_MEM_TO_DEV: 594 case DMA_MEM_TO_MEM: 595 ring = uc->tchan->tc_ring; 596 break; 597 default: 598 return -ENOENT; 599 } 600 601 ret = k3_ringacc_ring_pop(ring, addr); 602 if (ret) 603 return ret; 604 605 rmb(); /* Ensure that reads are not moved before this point */ 606 607 /* Teardown completion */ 608 if (cppi5_desc_is_tdcm(*addr)) 609 return 0; 610 611 /* Check for flush descriptor */ 612 if (udma_desc_is_rx_flush(uc, *addr)) 613 return -ENOENT; 614 615 return 0; 616 } 617 618 static void udma_reset_rings(struct udma_chan *uc) 619 { 620 struct k3_ring *ring1 = NULL; 621 struct k3_ring *ring2 = NULL; 622 623 switch (uc->config.dir) { 624 case DMA_DEV_TO_MEM: 625 if (uc->rchan) { 626 ring1 = uc->rflow->fd_ring; 627 ring2 = uc->rflow->r_ring; 628 } 629 break; 630 case DMA_MEM_TO_DEV: 631 case DMA_MEM_TO_MEM: 632 if (uc->tchan) { 633 ring1 = uc->tchan->t_ring; 634 ring2 = uc->tchan->tc_ring; 635 } 636 break; 637 default: 638 break; 639 } 640 641 if (ring1) 642 k3_ringacc_ring_reset_dma(ring1, 643 k3_ringacc_ring_get_occ(ring1)); 644 if (ring2) 645 k3_ringacc_ring_reset(ring2); 646 647 /* make sure we are not leaking memory by stalled descriptor */ 648 if (uc->terminated_desc) { 649 udma_desc_free(&uc->terminated_desc->vd); 650 uc->terminated_desc = NULL; 651 } 652 } 653 654 static void udma_reset_counters(struct udma_chan *uc) 655 { 656 u32 val; 657 658 if (uc->tchan) { 659 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); 660 udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val); 661 662 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG); 663 udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val); 664 665 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG); 666 udma_tchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val); 667 668 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG); 669 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val); 670 } 671 672 if (uc->rchan) { 673 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); 674 udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val); 675 676 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG); 677 udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val); 678 679 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG); 680 udma_rchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val); 681 682 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG); 683 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val); 684 } 685 686 uc->bcnt = 0; 687 } 688 689 static int udma_reset_chan(struct udma_chan *uc, bool hard) 690 { 691 switch (uc->config.dir) { 692 case DMA_DEV_TO_MEM: 693 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0); 694 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0); 695 break; 696 case DMA_MEM_TO_DEV: 697 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0); 698 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0); 699 break; 700 case DMA_MEM_TO_MEM: 701 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0); 702 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0); 703 break; 704 default: 705 return -EINVAL; 706 } 707 708 /* Reset all counters */ 709 udma_reset_counters(uc); 710 711 /* Hard reset: re-initialize the channel to reset */ 712 if (hard) { 713 struct udma_chan_config ucc_backup; 714 int ret; 715 716 memcpy(&ucc_backup, &uc->config, sizeof(uc->config)); 717 uc->ud->ddev.device_free_chan_resources(&uc->vc.chan); 718 719 /* restore the channel configuration */ 720 memcpy(&uc->config, &ucc_backup, sizeof(uc->config)); 721 ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan); 722 if (ret) 723 return ret; 724 725 /* 726 * Setting forced teardown after forced reset helps recovering 727 * the rchan. 728 */ 729 if (uc->config.dir == DMA_DEV_TO_MEM) 730 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 731 UDMA_CHAN_RT_CTL_EN | 732 UDMA_CHAN_RT_CTL_TDOWN | 733 UDMA_CHAN_RT_CTL_FTDOWN); 734 } 735 uc->state = UDMA_CHAN_IS_IDLE; 736 737 return 0; 738 } 739 740 static void udma_start_desc(struct udma_chan *uc) 741 { 742 struct udma_chan_config *ucc = &uc->config; 743 744 if (ucc->pkt_mode && (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) { 745 int i; 746 747 /* Push all descriptors to ring for packet mode cyclic or RX */ 748 for (i = 0; i < uc->desc->sglen; i++) 749 udma_push_to_ring(uc, i); 750 } else { 751 udma_push_to_ring(uc, 0); 752 } 753 } 754 755 static bool udma_chan_needs_reconfiguration(struct udma_chan *uc) 756 { 757 /* Only PDMAs have staticTR */ 758 if (uc->config.ep_type == PSIL_EP_NATIVE) 759 return false; 760 761 /* Check if the staticTR configuration has changed for TX */ 762 if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr))) 763 return true; 764 765 return false; 766 } 767 768 static int udma_start(struct udma_chan *uc) 769 { 770 struct virt_dma_desc *vd = vchan_next_desc(&uc->vc); 771 772 if (!vd) { 773 uc->desc = NULL; 774 return -ENOENT; 775 } 776 777 list_del(&vd->node); 778 779 uc->desc = to_udma_desc(&vd->tx); 780 781 /* Channel is already running and does not need reconfiguration */ 782 if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) { 783 udma_start_desc(uc); 784 goto out; 785 } 786 787 /* Make sure that we clear the teardown bit, if it is set */ 788 udma_reset_chan(uc, false); 789 790 /* Push descriptors before we start the channel */ 791 udma_start_desc(uc); 792 793 switch (uc->desc->dir) { 794 case DMA_DEV_TO_MEM: 795 /* Config remote TR */ 796 if (uc->config.ep_type == PSIL_EP_PDMA_XY) { 797 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) | 798 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize); 799 const struct udma_match_data *match_data = 800 uc->ud->match_data; 801 802 if (uc->config.enable_acc32) 803 val |= PDMA_STATIC_TR_XY_ACC32; 804 if (uc->config.enable_burst) 805 val |= PDMA_STATIC_TR_XY_BURST; 806 807 udma_rchanrt_write(uc, 808 UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG, 809 val); 810 811 udma_rchanrt_write(uc, 812 UDMA_CHAN_RT_PEER_STATIC_TR_Z_REG, 813 PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt, 814 match_data->statictr_z_mask)); 815 816 /* save the current staticTR configuration */ 817 memcpy(&uc->static_tr, &uc->desc->static_tr, 818 sizeof(uc->static_tr)); 819 } 820 821 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 822 UDMA_CHAN_RT_CTL_EN); 823 824 /* Enable remote */ 825 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 826 UDMA_PEER_RT_EN_ENABLE); 827 828 break; 829 case DMA_MEM_TO_DEV: 830 /* Config remote TR */ 831 if (uc->config.ep_type == PSIL_EP_PDMA_XY) { 832 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) | 833 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize); 834 835 if (uc->config.enable_acc32) 836 val |= PDMA_STATIC_TR_XY_ACC32; 837 if (uc->config.enable_burst) 838 val |= PDMA_STATIC_TR_XY_BURST; 839 840 udma_tchanrt_write(uc, 841 UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG, 842 val); 843 844 /* save the current staticTR configuration */ 845 memcpy(&uc->static_tr, &uc->desc->static_tr, 846 sizeof(uc->static_tr)); 847 } 848 849 /* Enable remote */ 850 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 851 UDMA_PEER_RT_EN_ENABLE); 852 853 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 854 UDMA_CHAN_RT_CTL_EN); 855 856 break; 857 case DMA_MEM_TO_MEM: 858 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 859 UDMA_CHAN_RT_CTL_EN); 860 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 861 UDMA_CHAN_RT_CTL_EN); 862 863 break; 864 default: 865 return -EINVAL; 866 } 867 868 uc->state = UDMA_CHAN_IS_ACTIVE; 869 out: 870 871 return 0; 872 } 873 874 static int udma_stop(struct udma_chan *uc) 875 { 876 enum udma_chan_state old_state = uc->state; 877 878 uc->state = UDMA_CHAN_IS_TERMINATING; 879 reinit_completion(&uc->teardown_completed); 880 881 switch (uc->config.dir) { 882 case DMA_DEV_TO_MEM: 883 if (!uc->cyclic && !uc->desc) 884 udma_push_to_ring(uc, -1); 885 886 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 887 UDMA_PEER_RT_EN_ENABLE | 888 UDMA_PEER_RT_EN_TEARDOWN); 889 break; 890 case DMA_MEM_TO_DEV: 891 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 892 UDMA_PEER_RT_EN_ENABLE | 893 UDMA_PEER_RT_EN_FLUSH); 894 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 895 UDMA_CHAN_RT_CTL_EN | 896 UDMA_CHAN_RT_CTL_TDOWN); 897 break; 898 case DMA_MEM_TO_MEM: 899 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 900 UDMA_CHAN_RT_CTL_EN | 901 UDMA_CHAN_RT_CTL_TDOWN); 902 break; 903 default: 904 uc->state = old_state; 905 complete_all(&uc->teardown_completed); 906 return -EINVAL; 907 } 908 909 return 0; 910 } 911 912 static void udma_cyclic_packet_elapsed(struct udma_chan *uc) 913 { 914 struct udma_desc *d = uc->desc; 915 struct cppi5_host_desc_t *h_desc; 916 917 h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr; 918 cppi5_hdesc_reset_to_original(h_desc); 919 udma_push_to_ring(uc, d->desc_idx); 920 d->desc_idx = (d->desc_idx + 1) % d->sglen; 921 } 922 923 static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d) 924 { 925 struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr; 926 927 memcpy(d->metadata, h_desc->epib, d->metadata_size); 928 } 929 930 static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d) 931 { 932 u32 peer_bcnt, bcnt; 933 934 /* Only TX towards PDMA is affected */ 935 if (uc->config.ep_type == PSIL_EP_NATIVE || 936 uc->config.dir != DMA_MEM_TO_DEV) 937 return true; 938 939 peer_bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG); 940 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); 941 942 /* Transfer is incomplete, store current residue and time stamp */ 943 if (peer_bcnt < bcnt) { 944 uc->tx_drain.residue = bcnt - peer_bcnt; 945 uc->tx_drain.tstamp = ktime_get(); 946 return false; 947 } 948 949 return true; 950 } 951 952 static void udma_check_tx_completion(struct work_struct *work) 953 { 954 struct udma_chan *uc = container_of(work, typeof(*uc), 955 tx_drain.work.work); 956 bool desc_done = true; 957 u32 residue_diff; 958 ktime_t time_diff; 959 unsigned long delay; 960 961 while (1) { 962 if (uc->desc) { 963 /* Get previous residue and time stamp */ 964 residue_diff = uc->tx_drain.residue; 965 time_diff = uc->tx_drain.tstamp; 966 /* 967 * Get current residue and time stamp or see if 968 * transfer is complete 969 */ 970 desc_done = udma_is_desc_really_done(uc, uc->desc); 971 } 972 973 if (!desc_done) { 974 /* 975 * Find the time delta and residue delta w.r.t 976 * previous poll 977 */ 978 time_diff = ktime_sub(uc->tx_drain.tstamp, 979 time_diff) + 1; 980 residue_diff -= uc->tx_drain.residue; 981 if (residue_diff) { 982 /* 983 * Try to guess when we should check 984 * next time by calculating rate at 985 * which data is being drained at the 986 * peer device 987 */ 988 delay = (time_diff / residue_diff) * 989 uc->tx_drain.residue; 990 } else { 991 /* No progress, check again in 1 second */ 992 schedule_delayed_work(&uc->tx_drain.work, HZ); 993 break; 994 } 995 996 usleep_range(ktime_to_us(delay), 997 ktime_to_us(delay) + 10); 998 continue; 999 } 1000 1001 if (uc->desc) { 1002 struct udma_desc *d = uc->desc; 1003 1004 uc->bcnt += d->residue; 1005 udma_start(uc); 1006 vchan_cookie_complete(&d->vd); 1007 break; 1008 } 1009 1010 break; 1011 } 1012 } 1013 1014 static irqreturn_t udma_ring_irq_handler(int irq, void *data) 1015 { 1016 struct udma_chan *uc = data; 1017 struct udma_desc *d; 1018 unsigned long flags; 1019 dma_addr_t paddr = 0; 1020 1021 if (udma_pop_from_ring(uc, &paddr) || !paddr) 1022 return IRQ_HANDLED; 1023 1024 spin_lock_irqsave(&uc->vc.lock, flags); 1025 1026 /* Teardown completion message */ 1027 if (cppi5_desc_is_tdcm(paddr)) { 1028 complete_all(&uc->teardown_completed); 1029 1030 if (uc->terminated_desc) { 1031 udma_desc_free(&uc->terminated_desc->vd); 1032 uc->terminated_desc = NULL; 1033 } 1034 1035 if (!uc->desc) 1036 udma_start(uc); 1037 1038 goto out; 1039 } 1040 1041 d = udma_udma_desc_from_paddr(uc, paddr); 1042 1043 if (d) { 1044 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, 1045 d->desc_idx); 1046 if (desc_paddr != paddr) { 1047 dev_err(uc->ud->dev, "not matching descriptors!\n"); 1048 goto out; 1049 } 1050 1051 if (d == uc->desc) { 1052 /* active descriptor */ 1053 if (uc->cyclic) { 1054 udma_cyclic_packet_elapsed(uc); 1055 vchan_cyclic_callback(&d->vd); 1056 } else { 1057 if (udma_is_desc_really_done(uc, d)) { 1058 uc->bcnt += d->residue; 1059 udma_start(uc); 1060 vchan_cookie_complete(&d->vd); 1061 } else { 1062 schedule_delayed_work(&uc->tx_drain.work, 1063 0); 1064 } 1065 } 1066 } else { 1067 /* 1068 * terminated descriptor, mark the descriptor as 1069 * completed to update the channel's cookie marker 1070 */ 1071 dma_cookie_complete(&d->vd.tx); 1072 } 1073 } 1074 out: 1075 spin_unlock_irqrestore(&uc->vc.lock, flags); 1076 1077 return IRQ_HANDLED; 1078 } 1079 1080 static irqreturn_t udma_udma_irq_handler(int irq, void *data) 1081 { 1082 struct udma_chan *uc = data; 1083 struct udma_desc *d; 1084 unsigned long flags; 1085 1086 spin_lock_irqsave(&uc->vc.lock, flags); 1087 d = uc->desc; 1088 if (d) { 1089 d->tr_idx = (d->tr_idx + 1) % d->sglen; 1090 1091 if (uc->cyclic) { 1092 vchan_cyclic_callback(&d->vd); 1093 } else { 1094 /* TODO: figure out the real amount of data */ 1095 uc->bcnt += d->residue; 1096 udma_start(uc); 1097 vchan_cookie_complete(&d->vd); 1098 } 1099 } 1100 1101 spin_unlock_irqrestore(&uc->vc.lock, flags); 1102 1103 return IRQ_HANDLED; 1104 } 1105 1106 /** 1107 * __udma_alloc_gp_rflow_range - alloc range of GP RX flows 1108 * @ud: UDMA device 1109 * @from: Start the search from this flow id number 1110 * @cnt: Number of consecutive flow ids to allocate 1111 * 1112 * Allocate range of RX flow ids for future use, those flows can be requested 1113 * only using explicit flow id number. if @from is set to -1 it will try to find 1114 * first free range. if @from is positive value it will force allocation only 1115 * of the specified range of flows. 1116 * 1117 * Returns -ENOMEM if can't find free range. 1118 * -EEXIST if requested range is busy. 1119 * -EINVAL if wrong input values passed. 1120 * Returns flow id on success. 1121 */ 1122 static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt) 1123 { 1124 int start, tmp_from; 1125 DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS); 1126 1127 tmp_from = from; 1128 if (tmp_from < 0) 1129 tmp_from = ud->rchan_cnt; 1130 /* default flows can't be allocated and accessible only by id */ 1131 if (tmp_from < ud->rchan_cnt) 1132 return -EINVAL; 1133 1134 if (tmp_from + cnt > ud->rflow_cnt) 1135 return -EINVAL; 1136 1137 bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated, 1138 ud->rflow_cnt); 1139 1140 start = bitmap_find_next_zero_area(tmp, 1141 ud->rflow_cnt, 1142 tmp_from, cnt, 0); 1143 if (start >= ud->rflow_cnt) 1144 return -ENOMEM; 1145 1146 if (from >= 0 && start != from) 1147 return -EEXIST; 1148 1149 bitmap_set(ud->rflow_gp_map_allocated, start, cnt); 1150 return start; 1151 } 1152 1153 static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt) 1154 { 1155 if (from < ud->rchan_cnt) 1156 return -EINVAL; 1157 if (from + cnt > ud->rflow_cnt) 1158 return -EINVAL; 1159 1160 bitmap_clear(ud->rflow_gp_map_allocated, from, cnt); 1161 return 0; 1162 } 1163 1164 static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id) 1165 { 1166 /* 1167 * Attempt to request rflow by ID can be made for any rflow 1168 * if not in use with assumption that caller knows what's doing. 1169 * TI-SCI FW will perform additional permission check ant way, it's 1170 * safe 1171 */ 1172 1173 if (id < 0 || id >= ud->rflow_cnt) 1174 return ERR_PTR(-ENOENT); 1175 1176 if (test_bit(id, ud->rflow_in_use)) 1177 return ERR_PTR(-ENOENT); 1178 1179 /* GP rflow has to be allocated first */ 1180 if (!test_bit(id, ud->rflow_gp_map) && 1181 !test_bit(id, ud->rflow_gp_map_allocated)) 1182 return ERR_PTR(-EINVAL); 1183 1184 dev_dbg(ud->dev, "get rflow%d\n", id); 1185 set_bit(id, ud->rflow_in_use); 1186 return &ud->rflows[id]; 1187 } 1188 1189 static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow) 1190 { 1191 if (!test_bit(rflow->id, ud->rflow_in_use)) { 1192 dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id); 1193 return; 1194 } 1195 1196 dev_dbg(ud->dev, "put rflow%d\n", rflow->id); 1197 clear_bit(rflow->id, ud->rflow_in_use); 1198 } 1199 1200 #define UDMA_RESERVE_RESOURCE(res) \ 1201 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \ 1202 enum udma_tp_level tpl, \ 1203 int id) \ 1204 { \ 1205 if (id >= 0) { \ 1206 if (test_bit(id, ud->res##_map)) { \ 1207 dev_err(ud->dev, "res##%d is in use\n", id); \ 1208 return ERR_PTR(-ENOENT); \ 1209 } \ 1210 } else { \ 1211 int start; \ 1212 \ 1213 if (tpl >= ud->tpl_levels) \ 1214 tpl = ud->tpl_levels - 1; \ 1215 \ 1216 start = ud->tpl_start_idx[tpl]; \ 1217 \ 1218 id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \ 1219 start); \ 1220 if (id == ud->res##_cnt) { \ 1221 return ERR_PTR(-ENOENT); \ 1222 } \ 1223 } \ 1224 \ 1225 set_bit(id, ud->res##_map); \ 1226 return &ud->res##s[id]; \ 1227 } 1228 1229 UDMA_RESERVE_RESOURCE(tchan); 1230 UDMA_RESERVE_RESOURCE(rchan); 1231 1232 static int udma_get_tchan(struct udma_chan *uc) 1233 { 1234 struct udma_dev *ud = uc->ud; 1235 1236 if (uc->tchan) { 1237 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n", 1238 uc->id, uc->tchan->id); 1239 return 0; 1240 } 1241 1242 uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, -1); 1243 1244 return PTR_ERR_OR_ZERO(uc->tchan); 1245 } 1246 1247 static int udma_get_rchan(struct udma_chan *uc) 1248 { 1249 struct udma_dev *ud = uc->ud; 1250 1251 if (uc->rchan) { 1252 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n", 1253 uc->id, uc->rchan->id); 1254 return 0; 1255 } 1256 1257 uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, -1); 1258 1259 return PTR_ERR_OR_ZERO(uc->rchan); 1260 } 1261 1262 static int udma_get_chan_pair(struct udma_chan *uc) 1263 { 1264 struct udma_dev *ud = uc->ud; 1265 int chan_id, end; 1266 1267 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) { 1268 dev_info(ud->dev, "chan%d: already have %d pair allocated\n", 1269 uc->id, uc->tchan->id); 1270 return 0; 1271 } 1272 1273 if (uc->tchan) { 1274 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n", 1275 uc->id, uc->tchan->id); 1276 return -EBUSY; 1277 } else if (uc->rchan) { 1278 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n", 1279 uc->id, uc->rchan->id); 1280 return -EBUSY; 1281 } 1282 1283 /* Can be optimized, but let's have it like this for now */ 1284 end = min(ud->tchan_cnt, ud->rchan_cnt); 1285 /* Try to use the highest TPL channel pair for MEM_TO_MEM channels */ 1286 chan_id = ud->tpl_start_idx[ud->tpl_levels - 1]; 1287 for (; chan_id < end; chan_id++) { 1288 if (!test_bit(chan_id, ud->tchan_map) && 1289 !test_bit(chan_id, ud->rchan_map)) 1290 break; 1291 } 1292 1293 if (chan_id == end) 1294 return -ENOENT; 1295 1296 set_bit(chan_id, ud->tchan_map); 1297 set_bit(chan_id, ud->rchan_map); 1298 uc->tchan = &ud->tchans[chan_id]; 1299 uc->rchan = &ud->rchans[chan_id]; 1300 1301 return 0; 1302 } 1303 1304 static int udma_get_rflow(struct udma_chan *uc, int flow_id) 1305 { 1306 struct udma_dev *ud = uc->ud; 1307 1308 if (!uc->rchan) { 1309 dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id); 1310 return -EINVAL; 1311 } 1312 1313 if (uc->rflow) { 1314 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n", 1315 uc->id, uc->rflow->id); 1316 return 0; 1317 } 1318 1319 uc->rflow = __udma_get_rflow(ud, flow_id); 1320 1321 return PTR_ERR_OR_ZERO(uc->rflow); 1322 } 1323 1324 static void udma_put_rchan(struct udma_chan *uc) 1325 { 1326 struct udma_dev *ud = uc->ud; 1327 1328 if (uc->rchan) { 1329 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id, 1330 uc->rchan->id); 1331 clear_bit(uc->rchan->id, ud->rchan_map); 1332 uc->rchan = NULL; 1333 } 1334 } 1335 1336 static void udma_put_tchan(struct udma_chan *uc) 1337 { 1338 struct udma_dev *ud = uc->ud; 1339 1340 if (uc->tchan) { 1341 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id, 1342 uc->tchan->id); 1343 clear_bit(uc->tchan->id, ud->tchan_map); 1344 uc->tchan = NULL; 1345 } 1346 } 1347 1348 static void udma_put_rflow(struct udma_chan *uc) 1349 { 1350 struct udma_dev *ud = uc->ud; 1351 1352 if (uc->rflow) { 1353 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id, 1354 uc->rflow->id); 1355 __udma_put_rflow(ud, uc->rflow); 1356 uc->rflow = NULL; 1357 } 1358 } 1359 1360 static void udma_free_tx_resources(struct udma_chan *uc) 1361 { 1362 if (!uc->tchan) 1363 return; 1364 1365 k3_ringacc_ring_free(uc->tchan->t_ring); 1366 k3_ringacc_ring_free(uc->tchan->tc_ring); 1367 uc->tchan->t_ring = NULL; 1368 uc->tchan->tc_ring = NULL; 1369 1370 udma_put_tchan(uc); 1371 } 1372 1373 static int udma_alloc_tx_resources(struct udma_chan *uc) 1374 { 1375 struct k3_ring_cfg ring_cfg; 1376 struct udma_dev *ud = uc->ud; 1377 int ret; 1378 1379 ret = udma_get_tchan(uc); 1380 if (ret) 1381 return ret; 1382 1383 uc->tchan->t_ring = k3_ringacc_request_ring(ud->ringacc, 1384 uc->tchan->id, 0); 1385 if (!uc->tchan->t_ring) { 1386 ret = -EBUSY; 1387 goto err_tx_ring; 1388 } 1389 1390 uc->tchan->tc_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0); 1391 if (!uc->tchan->tc_ring) { 1392 ret = -EBUSY; 1393 goto err_txc_ring; 1394 } 1395 1396 memset(&ring_cfg, 0, sizeof(ring_cfg)); 1397 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; 1398 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8; 1399 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE; 1400 1401 ret = k3_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg); 1402 ret |= k3_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg); 1403 1404 if (ret) 1405 goto err_ringcfg; 1406 1407 return 0; 1408 1409 err_ringcfg: 1410 k3_ringacc_ring_free(uc->tchan->tc_ring); 1411 uc->tchan->tc_ring = NULL; 1412 err_txc_ring: 1413 k3_ringacc_ring_free(uc->tchan->t_ring); 1414 uc->tchan->t_ring = NULL; 1415 err_tx_ring: 1416 udma_put_tchan(uc); 1417 1418 return ret; 1419 } 1420 1421 static void udma_free_rx_resources(struct udma_chan *uc) 1422 { 1423 if (!uc->rchan) 1424 return; 1425 1426 if (uc->rflow) { 1427 struct udma_rflow *rflow = uc->rflow; 1428 1429 k3_ringacc_ring_free(rflow->fd_ring); 1430 k3_ringacc_ring_free(rflow->r_ring); 1431 rflow->fd_ring = NULL; 1432 rflow->r_ring = NULL; 1433 1434 udma_put_rflow(uc); 1435 } 1436 1437 udma_put_rchan(uc); 1438 } 1439 1440 static int udma_alloc_rx_resources(struct udma_chan *uc) 1441 { 1442 struct udma_dev *ud = uc->ud; 1443 struct k3_ring_cfg ring_cfg; 1444 struct udma_rflow *rflow; 1445 int fd_ring_id; 1446 int ret; 1447 1448 ret = udma_get_rchan(uc); 1449 if (ret) 1450 return ret; 1451 1452 /* For MEM_TO_MEM we don't need rflow or rings */ 1453 if (uc->config.dir == DMA_MEM_TO_MEM) 1454 return 0; 1455 1456 ret = udma_get_rflow(uc, uc->rchan->id); 1457 if (ret) { 1458 ret = -EBUSY; 1459 goto err_rflow; 1460 } 1461 1462 rflow = uc->rflow; 1463 fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id; 1464 rflow->fd_ring = k3_ringacc_request_ring(ud->ringacc, fd_ring_id, 0); 1465 if (!rflow->fd_ring) { 1466 ret = -EBUSY; 1467 goto err_rx_ring; 1468 } 1469 1470 rflow->r_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0); 1471 if (!rflow->r_ring) { 1472 ret = -EBUSY; 1473 goto err_rxc_ring; 1474 } 1475 1476 memset(&ring_cfg, 0, sizeof(ring_cfg)); 1477 1478 if (uc->config.pkt_mode) 1479 ring_cfg.size = SG_MAX_SEGMENTS; 1480 else 1481 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; 1482 1483 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8; 1484 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE; 1485 1486 ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg); 1487 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; 1488 ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg); 1489 1490 if (ret) 1491 goto err_ringcfg; 1492 1493 return 0; 1494 1495 err_ringcfg: 1496 k3_ringacc_ring_free(rflow->r_ring); 1497 rflow->r_ring = NULL; 1498 err_rxc_ring: 1499 k3_ringacc_ring_free(rflow->fd_ring); 1500 rflow->fd_ring = NULL; 1501 err_rx_ring: 1502 udma_put_rflow(uc); 1503 err_rflow: 1504 udma_put_rchan(uc); 1505 1506 return ret; 1507 } 1508 1509 #define TISCI_TCHAN_VALID_PARAMS ( \ 1510 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ 1511 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \ 1512 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \ 1513 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \ 1514 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \ 1515 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \ 1516 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \ 1517 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID) 1518 1519 #define TISCI_RCHAN_VALID_PARAMS ( \ 1520 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ 1521 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \ 1522 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \ 1523 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \ 1524 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \ 1525 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \ 1526 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \ 1527 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \ 1528 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID) 1529 1530 static int udma_tisci_m2m_channel_config(struct udma_chan *uc) 1531 { 1532 struct udma_dev *ud = uc->ud; 1533 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 1534 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 1535 struct udma_tchan *tchan = uc->tchan; 1536 struct udma_rchan *rchan = uc->rchan; 1537 int ret = 0; 1538 1539 /* Non synchronized - mem to mem type of transfer */ 1540 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring); 1541 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; 1542 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; 1543 1544 req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS; 1545 req_tx.nav_id = tisci_rm->tisci_dev_id; 1546 req_tx.index = tchan->id; 1547 req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR; 1548 req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2; 1549 req_tx.txcq_qnum = tc_ring; 1550 req_tx.tx_atype = ud->atype; 1551 1552 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); 1553 if (ret) { 1554 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); 1555 return ret; 1556 } 1557 1558 req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS; 1559 req_rx.nav_id = tisci_rm->tisci_dev_id; 1560 req_rx.index = rchan->id; 1561 req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2; 1562 req_rx.rxcq_qnum = tc_ring; 1563 req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR; 1564 req_rx.rx_atype = ud->atype; 1565 1566 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); 1567 if (ret) 1568 dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret); 1569 1570 return ret; 1571 } 1572 1573 static int udma_tisci_tx_channel_config(struct udma_chan *uc) 1574 { 1575 struct udma_dev *ud = uc->ud; 1576 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 1577 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 1578 struct udma_tchan *tchan = uc->tchan; 1579 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring); 1580 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; 1581 u32 mode, fetch_size; 1582 int ret = 0; 1583 1584 if (uc->config.pkt_mode) { 1585 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; 1586 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib, 1587 uc->config.psd_size, 0); 1588 } else { 1589 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR; 1590 fetch_size = sizeof(struct cppi5_desc_hdr_t); 1591 } 1592 1593 req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS; 1594 req_tx.nav_id = tisci_rm->tisci_dev_id; 1595 req_tx.index = tchan->id; 1596 req_tx.tx_chan_type = mode; 1597 req_tx.tx_supr_tdpkt = uc->config.notdpkt; 1598 req_tx.tx_fetch_size = fetch_size >> 2; 1599 req_tx.txcq_qnum = tc_ring; 1600 req_tx.tx_atype = uc->config.atype; 1601 1602 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); 1603 if (ret) 1604 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); 1605 1606 return ret; 1607 } 1608 1609 static int udma_tisci_rx_channel_config(struct udma_chan *uc) 1610 { 1611 struct udma_dev *ud = uc->ud; 1612 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 1613 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 1614 struct udma_rchan *rchan = uc->rchan; 1615 int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring); 1616 int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring); 1617 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; 1618 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 }; 1619 u32 mode, fetch_size; 1620 int ret = 0; 1621 1622 if (uc->config.pkt_mode) { 1623 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; 1624 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib, 1625 uc->config.psd_size, 0); 1626 } else { 1627 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR; 1628 fetch_size = sizeof(struct cppi5_desc_hdr_t); 1629 } 1630 1631 req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS; 1632 req_rx.nav_id = tisci_rm->tisci_dev_id; 1633 req_rx.index = rchan->id; 1634 req_rx.rx_fetch_size = fetch_size >> 2; 1635 req_rx.rxcq_qnum = rx_ring; 1636 req_rx.rx_chan_type = mode; 1637 req_rx.rx_atype = uc->config.atype; 1638 1639 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); 1640 if (ret) { 1641 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret); 1642 return ret; 1643 } 1644 1645 flow_req.valid_params = 1646 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID | 1647 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID | 1648 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID | 1649 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID | 1650 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID | 1651 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID | 1652 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID | 1653 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID | 1654 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID | 1655 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID | 1656 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID | 1657 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID | 1658 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID; 1659 1660 flow_req.nav_id = tisci_rm->tisci_dev_id; 1661 flow_req.flow_index = rchan->id; 1662 1663 if (uc->config.needs_epib) 1664 flow_req.rx_einfo_present = 1; 1665 else 1666 flow_req.rx_einfo_present = 0; 1667 if (uc->config.psd_size) 1668 flow_req.rx_psinfo_present = 1; 1669 else 1670 flow_req.rx_psinfo_present = 0; 1671 flow_req.rx_error_handling = 1; 1672 flow_req.rx_dest_qnum = rx_ring; 1673 flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE; 1674 flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG; 1675 flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI; 1676 flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO; 1677 flow_req.rx_fdq0_sz0_qnum = fd_ring; 1678 flow_req.rx_fdq1_qnum = fd_ring; 1679 flow_req.rx_fdq2_qnum = fd_ring; 1680 flow_req.rx_fdq3_qnum = fd_ring; 1681 1682 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req); 1683 1684 if (ret) 1685 dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret); 1686 1687 return 0; 1688 } 1689 1690 static int udma_alloc_chan_resources(struct dma_chan *chan) 1691 { 1692 struct udma_chan *uc = to_udma_chan(chan); 1693 struct udma_dev *ud = to_udma_dev(chan->device); 1694 const struct udma_match_data *match_data = ud->match_data; 1695 struct k3_ring *irq_ring; 1696 u32 irq_udma_idx; 1697 int ret; 1698 1699 if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) { 1700 uc->use_dma_pool = true; 1701 /* in case of MEM_TO_MEM we have maximum of two TRs */ 1702 if (uc->config.dir == DMA_MEM_TO_MEM) { 1703 uc->config.hdesc_size = cppi5_trdesc_calc_size( 1704 sizeof(struct cppi5_tr_type15_t), 2); 1705 uc->config.pkt_mode = false; 1706 } 1707 } 1708 1709 if (uc->use_dma_pool) { 1710 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev, 1711 uc->config.hdesc_size, 1712 ud->desc_align, 1713 0); 1714 if (!uc->hdesc_pool) { 1715 dev_err(ud->ddev.dev, 1716 "Descriptor pool allocation failed\n"); 1717 uc->use_dma_pool = false; 1718 ret = -ENOMEM; 1719 goto err_cleanup; 1720 } 1721 } 1722 1723 /* 1724 * Make sure that the completion is in a known state: 1725 * No teardown, the channel is idle 1726 */ 1727 reinit_completion(&uc->teardown_completed); 1728 complete_all(&uc->teardown_completed); 1729 uc->state = UDMA_CHAN_IS_IDLE; 1730 1731 switch (uc->config.dir) { 1732 case DMA_MEM_TO_MEM: 1733 /* Non synchronized - mem to mem type of transfer */ 1734 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__, 1735 uc->id); 1736 1737 ret = udma_get_chan_pair(uc); 1738 if (ret) 1739 goto err_cleanup; 1740 1741 ret = udma_alloc_tx_resources(uc); 1742 if (ret) { 1743 udma_put_rchan(uc); 1744 goto err_cleanup; 1745 } 1746 1747 ret = udma_alloc_rx_resources(uc); 1748 if (ret) { 1749 udma_free_tx_resources(uc); 1750 goto err_cleanup; 1751 } 1752 1753 uc->config.src_thread = ud->psil_base + uc->tchan->id; 1754 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 1755 K3_PSIL_DST_THREAD_ID_OFFSET; 1756 1757 irq_ring = uc->tchan->tc_ring; 1758 irq_udma_idx = uc->tchan->id; 1759 1760 ret = udma_tisci_m2m_channel_config(uc); 1761 break; 1762 case DMA_MEM_TO_DEV: 1763 /* Slave transfer synchronized - mem to dev (TX) trasnfer */ 1764 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__, 1765 uc->id); 1766 1767 ret = udma_alloc_tx_resources(uc); 1768 if (ret) 1769 goto err_cleanup; 1770 1771 uc->config.src_thread = ud->psil_base + uc->tchan->id; 1772 uc->config.dst_thread = uc->config.remote_thread_id; 1773 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; 1774 1775 irq_ring = uc->tchan->tc_ring; 1776 irq_udma_idx = uc->tchan->id; 1777 1778 ret = udma_tisci_tx_channel_config(uc); 1779 break; 1780 case DMA_DEV_TO_MEM: 1781 /* Slave transfer synchronized - dev to mem (RX) trasnfer */ 1782 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__, 1783 uc->id); 1784 1785 ret = udma_alloc_rx_resources(uc); 1786 if (ret) 1787 goto err_cleanup; 1788 1789 uc->config.src_thread = uc->config.remote_thread_id; 1790 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 1791 K3_PSIL_DST_THREAD_ID_OFFSET; 1792 1793 irq_ring = uc->rflow->r_ring; 1794 irq_udma_idx = match_data->rchan_oes_offset + uc->rchan->id; 1795 1796 ret = udma_tisci_rx_channel_config(uc); 1797 break; 1798 default: 1799 /* Can not happen */ 1800 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", 1801 __func__, uc->id, uc->config.dir); 1802 ret = -EINVAL; 1803 goto err_cleanup; 1804 1805 } 1806 1807 /* check if the channel configuration was successful */ 1808 if (ret) 1809 goto err_res_free; 1810 1811 if (udma_is_chan_running(uc)) { 1812 dev_warn(ud->dev, "chan%d: is running!\n", uc->id); 1813 udma_reset_chan(uc, false); 1814 if (udma_is_chan_running(uc)) { 1815 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); 1816 ret = -EBUSY; 1817 goto err_res_free; 1818 } 1819 } 1820 1821 /* PSI-L pairing */ 1822 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread); 1823 if (ret) { 1824 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n", 1825 uc->config.src_thread, uc->config.dst_thread); 1826 goto err_res_free; 1827 } 1828 1829 uc->psil_paired = true; 1830 1831 uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring); 1832 if (uc->irq_num_ring <= 0) { 1833 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n", 1834 k3_ringacc_get_ring_id(irq_ring)); 1835 ret = -EINVAL; 1836 goto err_psi_free; 1837 } 1838 1839 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler, 1840 IRQF_TRIGGER_HIGH, uc->name, uc); 1841 if (ret) { 1842 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id); 1843 goto err_irq_free; 1844 } 1845 1846 /* Event from UDMA (TR events) only needed for slave TR mode channels */ 1847 if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) { 1848 uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev, 1849 irq_udma_idx); 1850 if (uc->irq_num_udma <= 0) { 1851 dev_err(ud->dev, "Failed to get udma irq (index: %u)\n", 1852 irq_udma_idx); 1853 free_irq(uc->irq_num_ring, uc); 1854 ret = -EINVAL; 1855 goto err_irq_free; 1856 } 1857 1858 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0, 1859 uc->name, uc); 1860 if (ret) { 1861 dev_err(ud->dev, "chan%d: UDMA irq request failed\n", 1862 uc->id); 1863 free_irq(uc->irq_num_ring, uc); 1864 goto err_irq_free; 1865 } 1866 } else { 1867 uc->irq_num_udma = 0; 1868 } 1869 1870 udma_reset_rings(uc); 1871 1872 return 0; 1873 1874 err_irq_free: 1875 uc->irq_num_ring = 0; 1876 uc->irq_num_udma = 0; 1877 err_psi_free: 1878 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread); 1879 uc->psil_paired = false; 1880 err_res_free: 1881 udma_free_tx_resources(uc); 1882 udma_free_rx_resources(uc); 1883 err_cleanup: 1884 udma_reset_uchan(uc); 1885 1886 if (uc->use_dma_pool) { 1887 dma_pool_destroy(uc->hdesc_pool); 1888 uc->use_dma_pool = false; 1889 } 1890 1891 return ret; 1892 } 1893 1894 static int udma_slave_config(struct dma_chan *chan, 1895 struct dma_slave_config *cfg) 1896 { 1897 struct udma_chan *uc = to_udma_chan(chan); 1898 1899 memcpy(&uc->cfg, cfg, sizeof(uc->cfg)); 1900 1901 return 0; 1902 } 1903 1904 static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc, 1905 size_t tr_size, int tr_count, 1906 enum dma_transfer_direction dir) 1907 { 1908 struct udma_hwdesc *hwdesc; 1909 struct cppi5_desc_hdr_t *tr_desc; 1910 struct udma_desc *d; 1911 u32 reload_count = 0; 1912 u32 ring_id; 1913 1914 switch (tr_size) { 1915 case 16: 1916 case 32: 1917 case 64: 1918 case 128: 1919 break; 1920 default: 1921 dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size); 1922 return NULL; 1923 } 1924 1925 /* We have only one descriptor containing multiple TRs */ 1926 d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT); 1927 if (!d) 1928 return NULL; 1929 1930 d->sglen = tr_count; 1931 1932 d->hwdesc_count = 1; 1933 hwdesc = &d->hwdesc[0]; 1934 1935 /* Allocate memory for DMA ring descriptor */ 1936 if (uc->use_dma_pool) { 1937 hwdesc->cppi5_desc_size = uc->config.hdesc_size; 1938 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, 1939 GFP_NOWAIT, 1940 &hwdesc->cppi5_desc_paddr); 1941 } else { 1942 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1943 tr_count); 1944 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size, 1945 uc->ud->desc_align); 1946 hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev, 1947 hwdesc->cppi5_desc_size, 1948 &hwdesc->cppi5_desc_paddr, 1949 GFP_NOWAIT); 1950 } 1951 1952 if (!hwdesc->cppi5_desc_vaddr) { 1953 kfree(d); 1954 return NULL; 1955 } 1956 1957 /* Start of the TR req records */ 1958 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size; 1959 /* Start address of the TR response array */ 1960 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count; 1961 1962 tr_desc = hwdesc->cppi5_desc_vaddr; 1963 1964 if (uc->cyclic) 1965 reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE; 1966 1967 if (dir == DMA_DEV_TO_MEM) 1968 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); 1969 else 1970 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); 1971 1972 cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count); 1973 cppi5_desc_set_pktids(tr_desc, uc->id, 1974 CPPI5_INFO1_DESC_FLOWID_DEFAULT); 1975 cppi5_desc_set_retpolicy(tr_desc, 0, ring_id); 1976 1977 return d; 1978 } 1979 1980 /** 1981 * udma_get_tr_counters - calculate TR counters for a given length 1982 * @len: Length of the trasnfer 1983 * @align_to: Preferred alignment 1984 * @tr0_cnt0: First TR icnt0 1985 * @tr0_cnt1: First TR icnt1 1986 * @tr1_cnt0: Second (if used) TR icnt0 1987 * 1988 * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated 1989 * For len >= SZ_64K two TRs are used in a simple way: 1990 * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1) 1991 * Second TR: the remaining length (tr1_cnt0) 1992 * 1993 * Returns the number of TRs the length needs (1 or 2) 1994 * -EINVAL if the length can not be supported 1995 */ 1996 static int udma_get_tr_counters(size_t len, unsigned long align_to, 1997 u16 *tr0_cnt0, u16 *tr0_cnt1, u16 *tr1_cnt0) 1998 { 1999 if (len < SZ_64K) { 2000 *tr0_cnt0 = len; 2001 *tr0_cnt1 = 1; 2002 2003 return 1; 2004 } 2005 2006 if (align_to > 3) 2007 align_to = 3; 2008 2009 realign: 2010 *tr0_cnt0 = SZ_64K - BIT(align_to); 2011 if (len / *tr0_cnt0 >= SZ_64K) { 2012 if (align_to) { 2013 align_to--; 2014 goto realign; 2015 } 2016 return -EINVAL; 2017 } 2018 2019 *tr0_cnt1 = len / *tr0_cnt0; 2020 *tr1_cnt0 = len % *tr0_cnt0; 2021 2022 return 2; 2023 } 2024 2025 static struct udma_desc * 2026 udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl, 2027 unsigned int sglen, enum dma_transfer_direction dir, 2028 unsigned long tx_flags, void *context) 2029 { 2030 struct scatterlist *sgent; 2031 struct udma_desc *d; 2032 struct cppi5_tr_type1_t *tr_req = NULL; 2033 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; 2034 unsigned int i; 2035 size_t tr_size; 2036 int num_tr = 0; 2037 int tr_idx = 0; 2038 2039 if (!is_slave_direction(dir)) { 2040 dev_err(uc->ud->dev, "Only slave cyclic is supported\n"); 2041 return NULL; 2042 } 2043 2044 /* estimate the number of TRs we will need */ 2045 for_each_sg(sgl, sgent, sglen, i) { 2046 if (sg_dma_len(sgent) < SZ_64K) 2047 num_tr++; 2048 else 2049 num_tr += 2; 2050 } 2051 2052 /* Now allocate and setup the descriptor. */ 2053 tr_size = sizeof(struct cppi5_tr_type1_t); 2054 d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir); 2055 if (!d) 2056 return NULL; 2057 2058 d->sglen = sglen; 2059 2060 tr_req = d->hwdesc[0].tr_req_base; 2061 for_each_sg(sgl, sgent, sglen, i) { 2062 dma_addr_t sg_addr = sg_dma_address(sgent); 2063 2064 num_tr = udma_get_tr_counters(sg_dma_len(sgent), __ffs(sg_addr), 2065 &tr0_cnt0, &tr0_cnt1, &tr1_cnt0); 2066 if (num_tr < 0) { 2067 dev_err(uc->ud->dev, "size %u is not supported\n", 2068 sg_dma_len(sgent)); 2069 udma_free_hwdesc(uc, d); 2070 kfree(d); 2071 return NULL; 2072 } 2073 2074 cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false, 2075 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 2076 cppi5_tr_csf_set(&tr_req[i].flags, CPPI5_TR_CSF_SUPR_EVT); 2077 2078 tr_req[tr_idx].addr = sg_addr; 2079 tr_req[tr_idx].icnt0 = tr0_cnt0; 2080 tr_req[tr_idx].icnt1 = tr0_cnt1; 2081 tr_req[tr_idx].dim1 = tr0_cnt0; 2082 tr_idx++; 2083 2084 if (num_tr == 2) { 2085 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, 2086 false, false, 2087 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 2088 cppi5_tr_csf_set(&tr_req[tr_idx].flags, 2089 CPPI5_TR_CSF_SUPR_EVT); 2090 2091 tr_req[tr_idx].addr = sg_addr + tr0_cnt1 * tr0_cnt0; 2092 tr_req[tr_idx].icnt0 = tr1_cnt0; 2093 tr_req[tr_idx].icnt1 = 1; 2094 tr_req[tr_idx].dim1 = tr1_cnt0; 2095 tr_idx++; 2096 } 2097 2098 d->residue += sg_dma_len(sgent); 2099 } 2100 2101 cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, 2102 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP); 2103 2104 return d; 2105 } 2106 2107 static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d, 2108 enum dma_slave_buswidth dev_width, 2109 u16 elcnt) 2110 { 2111 if (uc->config.ep_type != PSIL_EP_PDMA_XY) 2112 return 0; 2113 2114 /* Bus width translates to the element size (ES) */ 2115 switch (dev_width) { 2116 case DMA_SLAVE_BUSWIDTH_1_BYTE: 2117 d->static_tr.elsize = 0; 2118 break; 2119 case DMA_SLAVE_BUSWIDTH_2_BYTES: 2120 d->static_tr.elsize = 1; 2121 break; 2122 case DMA_SLAVE_BUSWIDTH_3_BYTES: 2123 d->static_tr.elsize = 2; 2124 break; 2125 case DMA_SLAVE_BUSWIDTH_4_BYTES: 2126 d->static_tr.elsize = 3; 2127 break; 2128 case DMA_SLAVE_BUSWIDTH_8_BYTES: 2129 d->static_tr.elsize = 4; 2130 break; 2131 default: /* not reached */ 2132 return -EINVAL; 2133 } 2134 2135 d->static_tr.elcnt = elcnt; 2136 2137 /* 2138 * PDMA must to close the packet when the channel is in packet mode. 2139 * For TR mode when the channel is not cyclic we also need PDMA to close 2140 * the packet otherwise the transfer will stall because PDMA holds on 2141 * the data it has received from the peripheral. 2142 */ 2143 if (uc->config.pkt_mode || !uc->cyclic) { 2144 unsigned int div = dev_width * elcnt; 2145 2146 if (uc->cyclic) 2147 d->static_tr.bstcnt = d->residue / d->sglen / div; 2148 else 2149 d->static_tr.bstcnt = d->residue / div; 2150 2151 if (uc->config.dir == DMA_DEV_TO_MEM && 2152 d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask) 2153 return -EINVAL; 2154 } else { 2155 d->static_tr.bstcnt = 0; 2156 } 2157 2158 return 0; 2159 } 2160 2161 static struct udma_desc * 2162 udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl, 2163 unsigned int sglen, enum dma_transfer_direction dir, 2164 unsigned long tx_flags, void *context) 2165 { 2166 struct scatterlist *sgent; 2167 struct cppi5_host_desc_t *h_desc = NULL; 2168 struct udma_desc *d; 2169 u32 ring_id; 2170 unsigned int i; 2171 2172 d = kzalloc(struct_size(d, hwdesc, sglen), GFP_NOWAIT); 2173 if (!d) 2174 return NULL; 2175 2176 d->sglen = sglen; 2177 d->hwdesc_count = sglen; 2178 2179 if (dir == DMA_DEV_TO_MEM) 2180 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); 2181 else 2182 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); 2183 2184 for_each_sg(sgl, sgent, sglen, i) { 2185 struct udma_hwdesc *hwdesc = &d->hwdesc[i]; 2186 dma_addr_t sg_addr = sg_dma_address(sgent); 2187 struct cppi5_host_desc_t *desc; 2188 size_t sg_len = sg_dma_len(sgent); 2189 2190 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, 2191 GFP_NOWAIT, 2192 &hwdesc->cppi5_desc_paddr); 2193 if (!hwdesc->cppi5_desc_vaddr) { 2194 dev_err(uc->ud->dev, 2195 "descriptor%d allocation failed\n", i); 2196 2197 udma_free_hwdesc(uc, d); 2198 kfree(d); 2199 return NULL; 2200 } 2201 2202 d->residue += sg_len; 2203 hwdesc->cppi5_desc_size = uc->config.hdesc_size; 2204 desc = hwdesc->cppi5_desc_vaddr; 2205 2206 if (i == 0) { 2207 cppi5_hdesc_init(desc, 0, 0); 2208 /* Flow and Packed ID */ 2209 cppi5_desc_set_pktids(&desc->hdr, uc->id, 2210 CPPI5_INFO1_DESC_FLOWID_DEFAULT); 2211 cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id); 2212 } else { 2213 cppi5_hdesc_reset_hbdesc(desc); 2214 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff); 2215 } 2216 2217 /* attach the sg buffer to the descriptor */ 2218 cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len); 2219 2220 /* Attach link as host buffer descriptor */ 2221 if (h_desc) 2222 cppi5_hdesc_link_hbdesc(h_desc, 2223 hwdesc->cppi5_desc_paddr); 2224 2225 if (dir == DMA_MEM_TO_DEV) 2226 h_desc = desc; 2227 } 2228 2229 if (d->residue >= SZ_4M) { 2230 dev_err(uc->ud->dev, 2231 "%s: Transfer size %u is over the supported 4M range\n", 2232 __func__, d->residue); 2233 udma_free_hwdesc(uc, d); 2234 kfree(d); 2235 return NULL; 2236 } 2237 2238 h_desc = d->hwdesc[0].cppi5_desc_vaddr; 2239 cppi5_hdesc_set_pktlen(h_desc, d->residue); 2240 2241 return d; 2242 } 2243 2244 static int udma_attach_metadata(struct dma_async_tx_descriptor *desc, 2245 void *data, size_t len) 2246 { 2247 struct udma_desc *d = to_udma_desc(desc); 2248 struct udma_chan *uc = to_udma_chan(desc->chan); 2249 struct cppi5_host_desc_t *h_desc; 2250 u32 psd_size = len; 2251 u32 flags = 0; 2252 2253 if (!uc->config.pkt_mode || !uc->config.metadata_size) 2254 return -ENOTSUPP; 2255 2256 if (!data || len > uc->config.metadata_size) 2257 return -EINVAL; 2258 2259 if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE) 2260 return -EINVAL; 2261 2262 h_desc = d->hwdesc[0].cppi5_desc_vaddr; 2263 if (d->dir == DMA_MEM_TO_DEV) 2264 memcpy(h_desc->epib, data, len); 2265 2266 if (uc->config.needs_epib) 2267 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE; 2268 2269 d->metadata = data; 2270 d->metadata_size = len; 2271 if (uc->config.needs_epib) 2272 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT; 2273 2274 cppi5_hdesc_update_flags(h_desc, flags); 2275 cppi5_hdesc_update_psdata_size(h_desc, psd_size); 2276 2277 return 0; 2278 } 2279 2280 static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc, 2281 size_t *payload_len, size_t *max_len) 2282 { 2283 struct udma_desc *d = to_udma_desc(desc); 2284 struct udma_chan *uc = to_udma_chan(desc->chan); 2285 struct cppi5_host_desc_t *h_desc; 2286 2287 if (!uc->config.pkt_mode || !uc->config.metadata_size) 2288 return ERR_PTR(-ENOTSUPP); 2289 2290 h_desc = d->hwdesc[0].cppi5_desc_vaddr; 2291 2292 *max_len = uc->config.metadata_size; 2293 2294 *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ? 2295 CPPI5_INFO0_HDESC_EPIB_SIZE : 0; 2296 *payload_len += cppi5_hdesc_get_psdata_size(h_desc); 2297 2298 return h_desc->epib; 2299 } 2300 2301 static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc, 2302 size_t payload_len) 2303 { 2304 struct udma_desc *d = to_udma_desc(desc); 2305 struct udma_chan *uc = to_udma_chan(desc->chan); 2306 struct cppi5_host_desc_t *h_desc; 2307 u32 psd_size = payload_len; 2308 u32 flags = 0; 2309 2310 if (!uc->config.pkt_mode || !uc->config.metadata_size) 2311 return -ENOTSUPP; 2312 2313 if (payload_len > uc->config.metadata_size) 2314 return -EINVAL; 2315 2316 if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE) 2317 return -EINVAL; 2318 2319 h_desc = d->hwdesc[0].cppi5_desc_vaddr; 2320 2321 if (uc->config.needs_epib) { 2322 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE; 2323 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT; 2324 } 2325 2326 cppi5_hdesc_update_flags(h_desc, flags); 2327 cppi5_hdesc_update_psdata_size(h_desc, psd_size); 2328 2329 return 0; 2330 } 2331 2332 static struct dma_descriptor_metadata_ops metadata_ops = { 2333 .attach = udma_attach_metadata, 2334 .get_ptr = udma_get_metadata_ptr, 2335 .set_len = udma_set_metadata_len, 2336 }; 2337 2338 static struct dma_async_tx_descriptor * 2339 udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 2340 unsigned int sglen, enum dma_transfer_direction dir, 2341 unsigned long tx_flags, void *context) 2342 { 2343 struct udma_chan *uc = to_udma_chan(chan); 2344 enum dma_slave_buswidth dev_width; 2345 struct udma_desc *d; 2346 u32 burst; 2347 2348 if (dir != uc->config.dir) { 2349 dev_err(chan->device->dev, 2350 "%s: chan%d is for %s, not supporting %s\n", 2351 __func__, uc->id, 2352 dmaengine_get_direction_text(uc->config.dir), 2353 dmaengine_get_direction_text(dir)); 2354 return NULL; 2355 } 2356 2357 if (dir == DMA_DEV_TO_MEM) { 2358 dev_width = uc->cfg.src_addr_width; 2359 burst = uc->cfg.src_maxburst; 2360 } else if (dir == DMA_MEM_TO_DEV) { 2361 dev_width = uc->cfg.dst_addr_width; 2362 burst = uc->cfg.dst_maxburst; 2363 } else { 2364 dev_err(chan->device->dev, "%s: bad direction?\n", __func__); 2365 return NULL; 2366 } 2367 2368 if (!burst) 2369 burst = 1; 2370 2371 if (uc->config.pkt_mode) 2372 d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags, 2373 context); 2374 else 2375 d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags, 2376 context); 2377 2378 if (!d) 2379 return NULL; 2380 2381 d->dir = dir; 2382 d->desc_idx = 0; 2383 d->tr_idx = 0; 2384 2385 /* static TR for remote PDMA */ 2386 if (udma_configure_statictr(uc, d, dev_width, burst)) { 2387 dev_err(uc->ud->dev, 2388 "%s: StaticTR Z is limited to maximum 4095 (%u)\n", 2389 __func__, d->static_tr.bstcnt); 2390 2391 udma_free_hwdesc(uc, d); 2392 kfree(d); 2393 return NULL; 2394 } 2395 2396 if (uc->config.metadata_size) 2397 d->vd.tx.metadata_ops = &metadata_ops; 2398 2399 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags); 2400 } 2401 2402 static struct udma_desc * 2403 udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr, 2404 size_t buf_len, size_t period_len, 2405 enum dma_transfer_direction dir, unsigned long flags) 2406 { 2407 struct udma_desc *d; 2408 size_t tr_size, period_addr; 2409 struct cppi5_tr_type1_t *tr_req; 2410 unsigned int periods = buf_len / period_len; 2411 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; 2412 unsigned int i; 2413 int num_tr; 2414 2415 if (!is_slave_direction(dir)) { 2416 dev_err(uc->ud->dev, "Only slave cyclic is supported\n"); 2417 return NULL; 2418 } 2419 2420 num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0, 2421 &tr0_cnt1, &tr1_cnt0); 2422 if (num_tr < 0) { 2423 dev_err(uc->ud->dev, "size %zu is not supported\n", 2424 period_len); 2425 return NULL; 2426 } 2427 2428 /* Now allocate and setup the descriptor. */ 2429 tr_size = sizeof(struct cppi5_tr_type1_t); 2430 d = udma_alloc_tr_desc(uc, tr_size, periods * num_tr, dir); 2431 if (!d) 2432 return NULL; 2433 2434 tr_req = d->hwdesc[0].tr_req_base; 2435 period_addr = buf_addr; 2436 for (i = 0; i < periods; i++) { 2437 int tr_idx = i * num_tr; 2438 2439 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false, 2440 false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 2441 2442 tr_req[tr_idx].addr = period_addr; 2443 tr_req[tr_idx].icnt0 = tr0_cnt0; 2444 tr_req[tr_idx].icnt1 = tr0_cnt1; 2445 tr_req[tr_idx].dim1 = tr0_cnt0; 2446 2447 if (num_tr == 2) { 2448 cppi5_tr_csf_set(&tr_req[tr_idx].flags, 2449 CPPI5_TR_CSF_SUPR_EVT); 2450 tr_idx++; 2451 2452 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, 2453 false, false, 2454 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 2455 2456 tr_req[tr_idx].addr = period_addr + tr0_cnt1 * tr0_cnt0; 2457 tr_req[tr_idx].icnt0 = tr1_cnt0; 2458 tr_req[tr_idx].icnt1 = 1; 2459 tr_req[tr_idx].dim1 = tr1_cnt0; 2460 } 2461 2462 if (!(flags & DMA_PREP_INTERRUPT)) 2463 cppi5_tr_csf_set(&tr_req[tr_idx].flags, 2464 CPPI5_TR_CSF_SUPR_EVT); 2465 2466 period_addr += period_len; 2467 } 2468 2469 return d; 2470 } 2471 2472 static struct udma_desc * 2473 udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr, 2474 size_t buf_len, size_t period_len, 2475 enum dma_transfer_direction dir, unsigned long flags) 2476 { 2477 struct udma_desc *d; 2478 u32 ring_id; 2479 int i; 2480 int periods = buf_len / period_len; 2481 2482 if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1)) 2483 return NULL; 2484 2485 if (period_len >= SZ_4M) 2486 return NULL; 2487 2488 d = kzalloc(struct_size(d, hwdesc, periods), GFP_NOWAIT); 2489 if (!d) 2490 return NULL; 2491 2492 d->hwdesc_count = periods; 2493 2494 /* TODO: re-check this... */ 2495 if (dir == DMA_DEV_TO_MEM) 2496 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); 2497 else 2498 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); 2499 2500 for (i = 0; i < periods; i++) { 2501 struct udma_hwdesc *hwdesc = &d->hwdesc[i]; 2502 dma_addr_t period_addr = buf_addr + (period_len * i); 2503 struct cppi5_host_desc_t *h_desc; 2504 2505 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, 2506 GFP_NOWAIT, 2507 &hwdesc->cppi5_desc_paddr); 2508 if (!hwdesc->cppi5_desc_vaddr) { 2509 dev_err(uc->ud->dev, 2510 "descriptor%d allocation failed\n", i); 2511 2512 udma_free_hwdesc(uc, d); 2513 kfree(d); 2514 return NULL; 2515 } 2516 2517 hwdesc->cppi5_desc_size = uc->config.hdesc_size; 2518 h_desc = hwdesc->cppi5_desc_vaddr; 2519 2520 cppi5_hdesc_init(h_desc, 0, 0); 2521 cppi5_hdesc_set_pktlen(h_desc, period_len); 2522 2523 /* Flow and Packed ID */ 2524 cppi5_desc_set_pktids(&h_desc->hdr, uc->id, 2525 CPPI5_INFO1_DESC_FLOWID_DEFAULT); 2526 cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id); 2527 2528 /* attach each period to a new descriptor */ 2529 cppi5_hdesc_attach_buf(h_desc, 2530 period_addr, period_len, 2531 period_addr, period_len); 2532 } 2533 2534 return d; 2535 } 2536 2537 static struct dma_async_tx_descriptor * 2538 udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 2539 size_t period_len, enum dma_transfer_direction dir, 2540 unsigned long flags) 2541 { 2542 struct udma_chan *uc = to_udma_chan(chan); 2543 enum dma_slave_buswidth dev_width; 2544 struct udma_desc *d; 2545 u32 burst; 2546 2547 if (dir != uc->config.dir) { 2548 dev_err(chan->device->dev, 2549 "%s: chan%d is for %s, not supporting %s\n", 2550 __func__, uc->id, 2551 dmaengine_get_direction_text(uc->config.dir), 2552 dmaengine_get_direction_text(dir)); 2553 return NULL; 2554 } 2555 2556 uc->cyclic = true; 2557 2558 if (dir == DMA_DEV_TO_MEM) { 2559 dev_width = uc->cfg.src_addr_width; 2560 burst = uc->cfg.src_maxburst; 2561 } else if (dir == DMA_MEM_TO_DEV) { 2562 dev_width = uc->cfg.dst_addr_width; 2563 burst = uc->cfg.dst_maxburst; 2564 } else { 2565 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); 2566 return NULL; 2567 } 2568 2569 if (!burst) 2570 burst = 1; 2571 2572 if (uc->config.pkt_mode) 2573 d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len, 2574 dir, flags); 2575 else 2576 d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len, 2577 dir, flags); 2578 2579 if (!d) 2580 return NULL; 2581 2582 d->sglen = buf_len / period_len; 2583 2584 d->dir = dir; 2585 d->residue = buf_len; 2586 2587 /* static TR for remote PDMA */ 2588 if (udma_configure_statictr(uc, d, dev_width, burst)) { 2589 dev_err(uc->ud->dev, 2590 "%s: StaticTR Z is limited to maximum 4095 (%u)\n", 2591 __func__, d->static_tr.bstcnt); 2592 2593 udma_free_hwdesc(uc, d); 2594 kfree(d); 2595 return NULL; 2596 } 2597 2598 if (uc->config.metadata_size) 2599 d->vd.tx.metadata_ops = &metadata_ops; 2600 2601 return vchan_tx_prep(&uc->vc, &d->vd, flags); 2602 } 2603 2604 static struct dma_async_tx_descriptor * 2605 udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 2606 size_t len, unsigned long tx_flags) 2607 { 2608 struct udma_chan *uc = to_udma_chan(chan); 2609 struct udma_desc *d; 2610 struct cppi5_tr_type15_t *tr_req; 2611 int num_tr; 2612 size_t tr_size = sizeof(struct cppi5_tr_type15_t); 2613 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; 2614 2615 if (uc->config.dir != DMA_MEM_TO_MEM) { 2616 dev_err(chan->device->dev, 2617 "%s: chan%d is for %s, not supporting %s\n", 2618 __func__, uc->id, 2619 dmaengine_get_direction_text(uc->config.dir), 2620 dmaengine_get_direction_text(DMA_MEM_TO_MEM)); 2621 return NULL; 2622 } 2623 2624 num_tr = udma_get_tr_counters(len, __ffs(src | dest), &tr0_cnt0, 2625 &tr0_cnt1, &tr1_cnt0); 2626 if (num_tr < 0) { 2627 dev_err(uc->ud->dev, "size %zu is not supported\n", 2628 len); 2629 return NULL; 2630 } 2631 2632 d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM); 2633 if (!d) 2634 return NULL; 2635 2636 d->dir = DMA_MEM_TO_MEM; 2637 d->desc_idx = 0; 2638 d->tr_idx = 0; 2639 d->residue = len; 2640 2641 tr_req = d->hwdesc[0].tr_req_base; 2642 2643 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true, 2644 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 2645 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT); 2646 2647 tr_req[0].addr = src; 2648 tr_req[0].icnt0 = tr0_cnt0; 2649 tr_req[0].icnt1 = tr0_cnt1; 2650 tr_req[0].icnt2 = 1; 2651 tr_req[0].icnt3 = 1; 2652 tr_req[0].dim1 = tr0_cnt0; 2653 2654 tr_req[0].daddr = dest; 2655 tr_req[0].dicnt0 = tr0_cnt0; 2656 tr_req[0].dicnt1 = tr0_cnt1; 2657 tr_req[0].dicnt2 = 1; 2658 tr_req[0].dicnt3 = 1; 2659 tr_req[0].ddim1 = tr0_cnt0; 2660 2661 if (num_tr == 2) { 2662 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true, 2663 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 2664 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT); 2665 2666 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0; 2667 tr_req[1].icnt0 = tr1_cnt0; 2668 tr_req[1].icnt1 = 1; 2669 tr_req[1].icnt2 = 1; 2670 tr_req[1].icnt3 = 1; 2671 2672 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0; 2673 tr_req[1].dicnt0 = tr1_cnt0; 2674 tr_req[1].dicnt1 = 1; 2675 tr_req[1].dicnt2 = 1; 2676 tr_req[1].dicnt3 = 1; 2677 } 2678 2679 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, 2680 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP); 2681 2682 if (uc->config.metadata_size) 2683 d->vd.tx.metadata_ops = &metadata_ops; 2684 2685 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags); 2686 } 2687 2688 static void udma_issue_pending(struct dma_chan *chan) 2689 { 2690 struct udma_chan *uc = to_udma_chan(chan); 2691 unsigned long flags; 2692 2693 spin_lock_irqsave(&uc->vc.lock, flags); 2694 2695 /* If we have something pending and no active descriptor, then */ 2696 if (vchan_issue_pending(&uc->vc) && !uc->desc) { 2697 /* 2698 * start a descriptor if the channel is NOT [marked as 2699 * terminating _and_ it is still running (teardown has not 2700 * completed yet)]. 2701 */ 2702 if (!(uc->state == UDMA_CHAN_IS_TERMINATING && 2703 udma_is_chan_running(uc))) 2704 udma_start(uc); 2705 } 2706 2707 spin_unlock_irqrestore(&uc->vc.lock, flags); 2708 } 2709 2710 static enum dma_status udma_tx_status(struct dma_chan *chan, 2711 dma_cookie_t cookie, 2712 struct dma_tx_state *txstate) 2713 { 2714 struct udma_chan *uc = to_udma_chan(chan); 2715 enum dma_status ret; 2716 unsigned long flags; 2717 2718 spin_lock_irqsave(&uc->vc.lock, flags); 2719 2720 ret = dma_cookie_status(chan, cookie, txstate); 2721 2722 if (!udma_is_chan_running(uc)) 2723 ret = DMA_COMPLETE; 2724 2725 if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc)) 2726 ret = DMA_PAUSED; 2727 2728 if (ret == DMA_COMPLETE || !txstate) 2729 goto out; 2730 2731 if (uc->desc && uc->desc->vd.tx.cookie == cookie) { 2732 u32 peer_bcnt = 0; 2733 u32 bcnt = 0; 2734 u32 residue = uc->desc->residue; 2735 u32 delay = 0; 2736 2737 if (uc->desc->dir == DMA_MEM_TO_DEV) { 2738 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG); 2739 2740 if (uc->config.ep_type != PSIL_EP_NATIVE) { 2741 peer_bcnt = udma_tchanrt_read(uc, 2742 UDMA_CHAN_RT_PEER_BCNT_REG); 2743 2744 if (bcnt > peer_bcnt) 2745 delay = bcnt - peer_bcnt; 2746 } 2747 } else if (uc->desc->dir == DMA_DEV_TO_MEM) { 2748 bcnt = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); 2749 2750 if (uc->config.ep_type != PSIL_EP_NATIVE) { 2751 peer_bcnt = udma_rchanrt_read(uc, 2752 UDMA_CHAN_RT_PEER_BCNT_REG); 2753 2754 if (peer_bcnt > bcnt) 2755 delay = peer_bcnt - bcnt; 2756 } 2757 } else { 2758 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); 2759 } 2760 2761 bcnt -= uc->bcnt; 2762 if (bcnt && !(bcnt % uc->desc->residue)) 2763 residue = 0; 2764 else 2765 residue -= bcnt % uc->desc->residue; 2766 2767 if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) { 2768 ret = DMA_COMPLETE; 2769 delay = 0; 2770 } 2771 2772 dma_set_residue(txstate, residue); 2773 dma_set_in_flight_bytes(txstate, delay); 2774 2775 } else { 2776 ret = DMA_COMPLETE; 2777 } 2778 2779 out: 2780 spin_unlock_irqrestore(&uc->vc.lock, flags); 2781 return ret; 2782 } 2783 2784 static int udma_pause(struct dma_chan *chan) 2785 { 2786 struct udma_chan *uc = to_udma_chan(chan); 2787 2788 /* pause the channel */ 2789 switch (uc->config.dir) { 2790 case DMA_DEV_TO_MEM: 2791 udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 2792 UDMA_PEER_RT_EN_PAUSE, 2793 UDMA_PEER_RT_EN_PAUSE); 2794 break; 2795 case DMA_MEM_TO_DEV: 2796 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 2797 UDMA_PEER_RT_EN_PAUSE, 2798 UDMA_PEER_RT_EN_PAUSE); 2799 break; 2800 case DMA_MEM_TO_MEM: 2801 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG, 2802 UDMA_CHAN_RT_CTL_PAUSE, 2803 UDMA_CHAN_RT_CTL_PAUSE); 2804 break; 2805 default: 2806 return -EINVAL; 2807 } 2808 2809 return 0; 2810 } 2811 2812 static int udma_resume(struct dma_chan *chan) 2813 { 2814 struct udma_chan *uc = to_udma_chan(chan); 2815 2816 /* resume the channel */ 2817 switch (uc->config.dir) { 2818 case DMA_DEV_TO_MEM: 2819 udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 2820 UDMA_PEER_RT_EN_PAUSE, 0); 2821 2822 break; 2823 case DMA_MEM_TO_DEV: 2824 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 2825 UDMA_PEER_RT_EN_PAUSE, 0); 2826 break; 2827 case DMA_MEM_TO_MEM: 2828 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG, 2829 UDMA_CHAN_RT_CTL_PAUSE, 0); 2830 break; 2831 default: 2832 return -EINVAL; 2833 } 2834 2835 return 0; 2836 } 2837 2838 static int udma_terminate_all(struct dma_chan *chan) 2839 { 2840 struct udma_chan *uc = to_udma_chan(chan); 2841 unsigned long flags; 2842 LIST_HEAD(head); 2843 2844 spin_lock_irqsave(&uc->vc.lock, flags); 2845 2846 if (udma_is_chan_running(uc)) 2847 udma_stop(uc); 2848 2849 if (uc->desc) { 2850 uc->terminated_desc = uc->desc; 2851 uc->desc = NULL; 2852 uc->terminated_desc->terminated = true; 2853 cancel_delayed_work(&uc->tx_drain.work); 2854 } 2855 2856 uc->paused = false; 2857 2858 vchan_get_all_descriptors(&uc->vc, &head); 2859 spin_unlock_irqrestore(&uc->vc.lock, flags); 2860 vchan_dma_desc_free_list(&uc->vc, &head); 2861 2862 return 0; 2863 } 2864 2865 static void udma_synchronize(struct dma_chan *chan) 2866 { 2867 struct udma_chan *uc = to_udma_chan(chan); 2868 unsigned long timeout = msecs_to_jiffies(1000); 2869 2870 vchan_synchronize(&uc->vc); 2871 2872 if (uc->state == UDMA_CHAN_IS_TERMINATING) { 2873 timeout = wait_for_completion_timeout(&uc->teardown_completed, 2874 timeout); 2875 if (!timeout) { 2876 dev_warn(uc->ud->dev, "chan%d teardown timeout!\n", 2877 uc->id); 2878 udma_dump_chan_stdata(uc); 2879 udma_reset_chan(uc, true); 2880 } 2881 } 2882 2883 udma_reset_chan(uc, false); 2884 if (udma_is_chan_running(uc)) 2885 dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id); 2886 2887 cancel_delayed_work_sync(&uc->tx_drain.work); 2888 udma_reset_rings(uc); 2889 } 2890 2891 static void udma_desc_pre_callback(struct virt_dma_chan *vc, 2892 struct virt_dma_desc *vd, 2893 struct dmaengine_result *result) 2894 { 2895 struct udma_chan *uc = to_udma_chan(&vc->chan); 2896 struct udma_desc *d; 2897 2898 if (!vd) 2899 return; 2900 2901 d = to_udma_desc(&vd->tx); 2902 2903 if (d->metadata_size) 2904 udma_fetch_epib(uc, d); 2905 2906 /* Provide residue information for the client */ 2907 if (result) { 2908 void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx); 2909 2910 if (cppi5_desc_get_type(desc_vaddr) == 2911 CPPI5_INFO0_DESC_TYPE_VAL_HOST) { 2912 result->residue = d->residue - 2913 cppi5_hdesc_get_pktlen(desc_vaddr); 2914 if (result->residue) 2915 result->result = DMA_TRANS_ABORTED; 2916 else 2917 result->result = DMA_TRANS_NOERROR; 2918 } else { 2919 result->residue = 0; 2920 result->result = DMA_TRANS_NOERROR; 2921 } 2922 } 2923 } 2924 2925 /* 2926 * This tasklet handles the completion of a DMA descriptor by 2927 * calling its callback and freeing it. 2928 */ 2929 static void udma_vchan_complete(unsigned long arg) 2930 { 2931 struct virt_dma_chan *vc = (struct virt_dma_chan *)arg; 2932 struct virt_dma_desc *vd, *_vd; 2933 struct dmaengine_desc_callback cb; 2934 LIST_HEAD(head); 2935 2936 spin_lock_irq(&vc->lock); 2937 list_splice_tail_init(&vc->desc_completed, &head); 2938 vd = vc->cyclic; 2939 if (vd) { 2940 vc->cyclic = NULL; 2941 dmaengine_desc_get_callback(&vd->tx, &cb); 2942 } else { 2943 memset(&cb, 0, sizeof(cb)); 2944 } 2945 spin_unlock_irq(&vc->lock); 2946 2947 udma_desc_pre_callback(vc, vd, NULL); 2948 dmaengine_desc_callback_invoke(&cb, NULL); 2949 2950 list_for_each_entry_safe(vd, _vd, &head, node) { 2951 struct dmaengine_result result; 2952 2953 dmaengine_desc_get_callback(&vd->tx, &cb); 2954 2955 list_del(&vd->node); 2956 2957 udma_desc_pre_callback(vc, vd, &result); 2958 dmaengine_desc_callback_invoke(&cb, &result); 2959 2960 vchan_vdesc_fini(vd); 2961 } 2962 } 2963 2964 static void udma_free_chan_resources(struct dma_chan *chan) 2965 { 2966 struct udma_chan *uc = to_udma_chan(chan); 2967 struct udma_dev *ud = to_udma_dev(chan->device); 2968 2969 udma_terminate_all(chan); 2970 if (uc->terminated_desc) { 2971 udma_reset_chan(uc, false); 2972 udma_reset_rings(uc); 2973 } 2974 2975 cancel_delayed_work_sync(&uc->tx_drain.work); 2976 2977 if (uc->irq_num_ring > 0) { 2978 free_irq(uc->irq_num_ring, uc); 2979 2980 uc->irq_num_ring = 0; 2981 } 2982 if (uc->irq_num_udma > 0) { 2983 free_irq(uc->irq_num_udma, uc); 2984 2985 uc->irq_num_udma = 0; 2986 } 2987 2988 /* Release PSI-L pairing */ 2989 if (uc->psil_paired) { 2990 navss_psil_unpair(ud, uc->config.src_thread, 2991 uc->config.dst_thread); 2992 uc->psil_paired = false; 2993 } 2994 2995 vchan_free_chan_resources(&uc->vc); 2996 tasklet_kill(&uc->vc.task); 2997 2998 udma_free_tx_resources(uc); 2999 udma_free_rx_resources(uc); 3000 udma_reset_uchan(uc); 3001 3002 if (uc->use_dma_pool) { 3003 dma_pool_destroy(uc->hdesc_pool); 3004 uc->use_dma_pool = false; 3005 } 3006 } 3007 3008 static struct platform_driver udma_driver; 3009 3010 struct udma_filter_param { 3011 int remote_thread_id; 3012 u32 atype; 3013 }; 3014 3015 static bool udma_dma_filter_fn(struct dma_chan *chan, void *param) 3016 { 3017 struct udma_chan_config *ucc; 3018 struct psil_endpoint_config *ep_config; 3019 struct udma_filter_param *filter_param; 3020 struct udma_chan *uc; 3021 struct udma_dev *ud; 3022 3023 if (chan->device->dev->driver != &udma_driver.driver) 3024 return false; 3025 3026 uc = to_udma_chan(chan); 3027 ucc = &uc->config; 3028 ud = uc->ud; 3029 filter_param = param; 3030 3031 if (filter_param->atype > 2) { 3032 dev_err(ud->dev, "Invalid channel atype: %u\n", 3033 filter_param->atype); 3034 return false; 3035 } 3036 3037 ucc->remote_thread_id = filter_param->remote_thread_id; 3038 ucc->atype = filter_param->atype; 3039 3040 if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET) 3041 ucc->dir = DMA_MEM_TO_DEV; 3042 else 3043 ucc->dir = DMA_DEV_TO_MEM; 3044 3045 ep_config = psil_get_ep_config(ucc->remote_thread_id); 3046 if (IS_ERR(ep_config)) { 3047 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n", 3048 ucc->remote_thread_id); 3049 ucc->dir = DMA_MEM_TO_MEM; 3050 ucc->remote_thread_id = -1; 3051 ucc->atype = 0; 3052 return false; 3053 } 3054 3055 ucc->pkt_mode = ep_config->pkt_mode; 3056 ucc->channel_tpl = ep_config->channel_tpl; 3057 ucc->notdpkt = ep_config->notdpkt; 3058 ucc->ep_type = ep_config->ep_type; 3059 3060 if (ucc->ep_type != PSIL_EP_NATIVE) { 3061 const struct udma_match_data *match_data = ud->match_data; 3062 3063 if (match_data->flags & UDMA_FLAG_PDMA_ACC32) 3064 ucc->enable_acc32 = ep_config->pdma_acc32; 3065 if (match_data->flags & UDMA_FLAG_PDMA_BURST) 3066 ucc->enable_burst = ep_config->pdma_burst; 3067 } 3068 3069 ucc->needs_epib = ep_config->needs_epib; 3070 ucc->psd_size = ep_config->psd_size; 3071 ucc->metadata_size = 3072 (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) + 3073 ucc->psd_size; 3074 3075 if (ucc->pkt_mode) 3076 ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) + 3077 ucc->metadata_size, ud->desc_align); 3078 3079 dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id, 3080 ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir)); 3081 3082 return true; 3083 } 3084 3085 static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec, 3086 struct of_dma *ofdma) 3087 { 3088 struct udma_dev *ud = ofdma->of_dma_data; 3089 dma_cap_mask_t mask = ud->ddev.cap_mask; 3090 struct udma_filter_param filter_param; 3091 struct dma_chan *chan; 3092 3093 if (dma_spec->args_count != 1 && dma_spec->args_count != 2) 3094 return NULL; 3095 3096 filter_param.remote_thread_id = dma_spec->args[0]; 3097 if (dma_spec->args_count == 2) 3098 filter_param.atype = dma_spec->args[1]; 3099 else 3100 filter_param.atype = 0; 3101 3102 chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param, 3103 ofdma->of_node); 3104 if (!chan) { 3105 dev_err(ud->dev, "get channel fail in %s.\n", __func__); 3106 return ERR_PTR(-EINVAL); 3107 } 3108 3109 return chan; 3110 } 3111 3112 static struct udma_match_data am654_main_data = { 3113 .psil_base = 0x1000, 3114 .enable_memcpy_support = true, 3115 .statictr_z_mask = GENMASK(11, 0), 3116 .rchan_oes_offset = 0x2000, 3117 }; 3118 3119 static struct udma_match_data am654_mcu_data = { 3120 .psil_base = 0x6000, 3121 .enable_memcpy_support = false, 3122 .statictr_z_mask = GENMASK(11, 0), 3123 .rchan_oes_offset = 0x2000, 3124 }; 3125 3126 static struct udma_match_data j721e_main_data = { 3127 .psil_base = 0x1000, 3128 .enable_memcpy_support = true, 3129 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST, 3130 .statictr_z_mask = GENMASK(23, 0), 3131 .rchan_oes_offset = 0x400, 3132 }; 3133 3134 static struct udma_match_data j721e_mcu_data = { 3135 .psil_base = 0x6000, 3136 .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */ 3137 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST, 3138 .statictr_z_mask = GENMASK(23, 0), 3139 .rchan_oes_offset = 0x400, 3140 }; 3141 3142 static const struct of_device_id udma_of_match[] = { 3143 { 3144 .compatible = "ti,am654-navss-main-udmap", 3145 .data = &am654_main_data, 3146 }, 3147 { 3148 .compatible = "ti,am654-navss-mcu-udmap", 3149 .data = &am654_mcu_data, 3150 }, { 3151 .compatible = "ti,j721e-navss-main-udmap", 3152 .data = &j721e_main_data, 3153 }, { 3154 .compatible = "ti,j721e-navss-mcu-udmap", 3155 .data = &j721e_mcu_data, 3156 }, 3157 { /* Sentinel */ }, 3158 }; 3159 3160 static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud) 3161 { 3162 struct resource *res; 3163 int i; 3164 3165 for (i = 0; i < MMR_LAST; i++) { 3166 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 3167 mmr_names[i]); 3168 ud->mmrs[i] = devm_ioremap_resource(&pdev->dev, res); 3169 if (IS_ERR(ud->mmrs[i])) 3170 return PTR_ERR(ud->mmrs[i]); 3171 } 3172 3173 return 0; 3174 } 3175 3176 static int udma_setup_resources(struct udma_dev *ud) 3177 { 3178 struct device *dev = ud->dev; 3179 int ch_count, ret, i, j; 3180 u32 cap2, cap3; 3181 struct ti_sci_resource_desc *rm_desc; 3182 struct ti_sci_resource *rm_res, irq_res; 3183 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 3184 static const char * const range_names[] = { "ti,sci-rm-range-tchan", 3185 "ti,sci-rm-range-rchan", 3186 "ti,sci-rm-range-rflow" }; 3187 3188 cap2 = udma_read(ud->mmrs[MMR_GCFG], UDMA_CAP_REG(2)); 3189 cap3 = udma_read(ud->mmrs[MMR_GCFG], UDMA_CAP_REG(3)); 3190 3191 ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3); 3192 ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2); 3193 ud->echan_cnt = UDMA_CAP2_ECHAN_CNT(cap2); 3194 ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2); 3195 ch_count = ud->tchan_cnt + ud->rchan_cnt; 3196 3197 /* Set up the throughput level start indexes */ 3198 if (of_device_is_compatible(dev->of_node, 3199 "ti,am654-navss-main-udmap")) { 3200 ud->tpl_levels = 2; 3201 ud->tpl_start_idx[0] = 8; 3202 } else if (of_device_is_compatible(dev->of_node, 3203 "ti,am654-navss-mcu-udmap")) { 3204 ud->tpl_levels = 2; 3205 ud->tpl_start_idx[0] = 2; 3206 } else if (UDMA_CAP3_UCHAN_CNT(cap3)) { 3207 ud->tpl_levels = 3; 3208 ud->tpl_start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3); 3209 ud->tpl_start_idx[0] = ud->tpl_start_idx[1] + 3210 UDMA_CAP3_HCHAN_CNT(cap3); 3211 } else if (UDMA_CAP3_HCHAN_CNT(cap3)) { 3212 ud->tpl_levels = 2; 3213 ud->tpl_start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); 3214 } else { 3215 ud->tpl_levels = 1; 3216 } 3217 3218 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), 3219 sizeof(unsigned long), GFP_KERNEL); 3220 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans), 3221 GFP_KERNEL); 3222 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), 3223 sizeof(unsigned long), GFP_KERNEL); 3224 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans), 3225 GFP_KERNEL); 3226 ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt), 3227 sizeof(unsigned long), 3228 GFP_KERNEL); 3229 ud->rflow_gp_map_allocated = devm_kcalloc(dev, 3230 BITS_TO_LONGS(ud->rflow_cnt), 3231 sizeof(unsigned long), 3232 GFP_KERNEL); 3233 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt), 3234 sizeof(unsigned long), 3235 GFP_KERNEL); 3236 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows), 3237 GFP_KERNEL); 3238 3239 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map || 3240 !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans || 3241 !ud->rflows || !ud->rflow_in_use) 3242 return -ENOMEM; 3243 3244 /* 3245 * RX flows with the same Ids as RX channels are reserved to be used 3246 * as default flows if remote HW can't generate flow_ids. Those 3247 * RX flows can be requested only explicitly by id. 3248 */ 3249 bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt); 3250 3251 /* by default no GP rflows are assigned to Linux */ 3252 bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt); 3253 3254 /* Get resource ranges from tisci */ 3255 for (i = 0; i < RM_RANGE_LAST; i++) 3256 tisci_rm->rm_ranges[i] = 3257 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, 3258 tisci_rm->tisci_dev_id, 3259 (char *)range_names[i]); 3260 3261 /* tchan ranges */ 3262 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; 3263 if (IS_ERR(rm_res)) { 3264 bitmap_zero(ud->tchan_map, ud->tchan_cnt); 3265 } else { 3266 bitmap_fill(ud->tchan_map, ud->tchan_cnt); 3267 for (i = 0; i < rm_res->sets; i++) { 3268 rm_desc = &rm_res->desc[i]; 3269 bitmap_clear(ud->tchan_map, rm_desc->start, 3270 rm_desc->num); 3271 dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n", 3272 rm_desc->start, rm_desc->num); 3273 } 3274 } 3275 irq_res.sets = rm_res->sets; 3276 3277 /* rchan and matching default flow ranges */ 3278 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; 3279 if (IS_ERR(rm_res)) { 3280 bitmap_zero(ud->rchan_map, ud->rchan_cnt); 3281 } else { 3282 bitmap_fill(ud->rchan_map, ud->rchan_cnt); 3283 for (i = 0; i < rm_res->sets; i++) { 3284 rm_desc = &rm_res->desc[i]; 3285 bitmap_clear(ud->rchan_map, rm_desc->start, 3286 rm_desc->num); 3287 dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n", 3288 rm_desc->start, rm_desc->num); 3289 } 3290 } 3291 3292 irq_res.sets += rm_res->sets; 3293 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL); 3294 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; 3295 for (i = 0; i < rm_res->sets; i++) { 3296 irq_res.desc[i].start = rm_res->desc[i].start; 3297 irq_res.desc[i].num = rm_res->desc[i].num; 3298 } 3299 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; 3300 for (j = 0; j < rm_res->sets; j++, i++) { 3301 irq_res.desc[i].start = rm_res->desc[j].start + 3302 ud->match_data->rchan_oes_offset; 3303 irq_res.desc[i].num = rm_res->desc[j].num; 3304 } 3305 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); 3306 kfree(irq_res.desc); 3307 if (ret) { 3308 dev_err(ud->dev, "Failed to allocate MSI interrupts\n"); 3309 return ret; 3310 } 3311 3312 /* GP rflow ranges */ 3313 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; 3314 if (IS_ERR(rm_res)) { 3315 /* all gp flows are assigned exclusively to Linux */ 3316 bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt, 3317 ud->rflow_cnt - ud->rchan_cnt); 3318 } else { 3319 for (i = 0; i < rm_res->sets; i++) { 3320 rm_desc = &rm_res->desc[i]; 3321 bitmap_clear(ud->rflow_gp_map, rm_desc->start, 3322 rm_desc->num); 3323 dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n", 3324 rm_desc->start, rm_desc->num); 3325 } 3326 } 3327 3328 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt); 3329 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt); 3330 if (!ch_count) 3331 return -ENODEV; 3332 3333 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels), 3334 GFP_KERNEL); 3335 if (!ud->channels) 3336 return -ENOMEM; 3337 3338 dev_info(dev, "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n", 3339 ch_count, 3340 ud->tchan_cnt - bitmap_weight(ud->tchan_map, ud->tchan_cnt), 3341 ud->rchan_cnt - bitmap_weight(ud->rchan_map, ud->rchan_cnt), 3342 ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map, 3343 ud->rflow_cnt)); 3344 3345 return ch_count; 3346 } 3347 3348 static int udma_setup_rx_flush(struct udma_dev *ud) 3349 { 3350 struct udma_rx_flush *rx_flush = &ud->rx_flush; 3351 struct cppi5_desc_hdr_t *tr_desc; 3352 struct cppi5_tr_type1_t *tr_req; 3353 struct cppi5_host_desc_t *desc; 3354 struct device *dev = ud->dev; 3355 struct udma_hwdesc *hwdesc; 3356 size_t tr_size; 3357 3358 /* Allocate 1K buffer for discarded data on RX channel teardown */ 3359 rx_flush->buffer_size = SZ_1K; 3360 rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size, 3361 GFP_KERNEL); 3362 if (!rx_flush->buffer_vaddr) 3363 return -ENOMEM; 3364 3365 rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr, 3366 rx_flush->buffer_size, 3367 DMA_TO_DEVICE); 3368 if (dma_mapping_error(dev, rx_flush->buffer_paddr)) 3369 return -ENOMEM; 3370 3371 /* Set up descriptor to be used for TR mode */ 3372 hwdesc = &rx_flush->hwdescs[0]; 3373 tr_size = sizeof(struct cppi5_tr_type1_t); 3374 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1); 3375 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size, 3376 ud->desc_align); 3377 3378 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size, 3379 GFP_KERNEL); 3380 if (!hwdesc->cppi5_desc_vaddr) 3381 return -ENOMEM; 3382 3383 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr, 3384 hwdesc->cppi5_desc_size, 3385 DMA_TO_DEVICE); 3386 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr)) 3387 return -ENOMEM; 3388 3389 /* Start of the TR req records */ 3390 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size; 3391 /* Start address of the TR response array */ 3392 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size; 3393 3394 tr_desc = hwdesc->cppi5_desc_vaddr; 3395 cppi5_trdesc_init(tr_desc, 1, tr_size, 0, 0); 3396 cppi5_desc_set_pktids(tr_desc, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT); 3397 cppi5_desc_set_retpolicy(tr_desc, 0, 0); 3398 3399 tr_req = hwdesc->tr_req_base; 3400 cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false, 3401 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 3402 cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT); 3403 3404 tr_req->addr = rx_flush->buffer_paddr; 3405 tr_req->icnt0 = rx_flush->buffer_size; 3406 tr_req->icnt1 = 1; 3407 3408 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr, 3409 hwdesc->cppi5_desc_size, DMA_TO_DEVICE); 3410 3411 /* Set up descriptor to be used for packet mode */ 3412 hwdesc = &rx_flush->hwdescs[1]; 3413 hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) + 3414 CPPI5_INFO0_HDESC_EPIB_SIZE + 3415 CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE, 3416 ud->desc_align); 3417 3418 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size, 3419 GFP_KERNEL); 3420 if (!hwdesc->cppi5_desc_vaddr) 3421 return -ENOMEM; 3422 3423 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr, 3424 hwdesc->cppi5_desc_size, 3425 DMA_TO_DEVICE); 3426 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr)) 3427 return -ENOMEM; 3428 3429 desc = hwdesc->cppi5_desc_vaddr; 3430 cppi5_hdesc_init(desc, 0, 0); 3431 cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT); 3432 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0); 3433 3434 cppi5_hdesc_attach_buf(desc, 3435 rx_flush->buffer_paddr, rx_flush->buffer_size, 3436 rx_flush->buffer_paddr, rx_flush->buffer_size); 3437 3438 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr, 3439 hwdesc->cppi5_desc_size, DMA_TO_DEVICE); 3440 return 0; 3441 } 3442 3443 #ifdef CONFIG_DEBUG_FS 3444 static void udma_dbg_summary_show_chan(struct seq_file *s, 3445 struct dma_chan *chan) 3446 { 3447 struct udma_chan *uc = to_udma_chan(chan); 3448 struct udma_chan_config *ucc = &uc->config; 3449 3450 seq_printf(s, " %-13s| %s", dma_chan_name(chan), 3451 chan->dbg_client_name ?: "in-use"); 3452 seq_printf(s, " (%s, ", dmaengine_get_direction_text(uc->config.dir)); 3453 3454 switch (uc->config.dir) { 3455 case DMA_MEM_TO_MEM: 3456 seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id, 3457 ucc->src_thread, ucc->dst_thread); 3458 break; 3459 case DMA_DEV_TO_MEM: 3460 seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id, 3461 ucc->src_thread, ucc->dst_thread); 3462 break; 3463 case DMA_MEM_TO_DEV: 3464 seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id, 3465 ucc->src_thread, ucc->dst_thread); 3466 break; 3467 default: 3468 seq_printf(s, ")\n"); 3469 return; 3470 } 3471 3472 if (ucc->ep_type == PSIL_EP_NATIVE) { 3473 seq_printf(s, "PSI-L Native"); 3474 if (ucc->metadata_size) { 3475 seq_printf(s, "[%s", ucc->needs_epib ? " EPIB" : ""); 3476 if (ucc->psd_size) 3477 seq_printf(s, " PSDsize:%u", ucc->psd_size); 3478 seq_printf(s, " ]"); 3479 } 3480 } else { 3481 seq_printf(s, "PDMA"); 3482 if (ucc->enable_acc32 || ucc->enable_burst) 3483 seq_printf(s, "[%s%s ]", 3484 ucc->enable_acc32 ? " ACC32" : "", 3485 ucc->enable_burst ? " BURST" : ""); 3486 } 3487 3488 seq_printf(s, ", %s)\n", ucc->pkt_mode ? "Packet mode" : "TR mode"); 3489 } 3490 3491 static void udma_dbg_summary_show(struct seq_file *s, 3492 struct dma_device *dma_dev) 3493 { 3494 struct dma_chan *chan; 3495 3496 list_for_each_entry(chan, &dma_dev->channels, device_node) { 3497 if (chan->client_count) 3498 udma_dbg_summary_show_chan(s, chan); 3499 } 3500 } 3501 #endif /* CONFIG_DEBUG_FS */ 3502 3503 #define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ 3504 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 3505 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ 3506 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ 3507 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) 3508 3509 static int udma_probe(struct platform_device *pdev) 3510 { 3511 struct device_node *navss_node = pdev->dev.parent->of_node; 3512 struct device *dev = &pdev->dev; 3513 struct udma_dev *ud; 3514 const struct of_device_id *match; 3515 int i, ret; 3516 int ch_count; 3517 3518 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48)); 3519 if (ret) 3520 dev_err(dev, "failed to set dma mask stuff\n"); 3521 3522 ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL); 3523 if (!ud) 3524 return -ENOMEM; 3525 3526 ret = udma_get_mmrs(pdev, ud); 3527 if (ret) 3528 return ret; 3529 3530 ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci"); 3531 if (IS_ERR(ud->tisci_rm.tisci)) 3532 return PTR_ERR(ud->tisci_rm.tisci); 3533 3534 ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", 3535 &ud->tisci_rm.tisci_dev_id); 3536 if (ret) { 3537 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret); 3538 return ret; 3539 } 3540 pdev->id = ud->tisci_rm.tisci_dev_id; 3541 3542 ret = of_property_read_u32(navss_node, "ti,sci-dev-id", 3543 &ud->tisci_rm.tisci_navss_dev_id); 3544 if (ret) { 3545 dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret); 3546 return ret; 3547 } 3548 3549 ret = of_property_read_u32(dev->of_node, "ti,udma-atype", &ud->atype); 3550 if (!ret && ud->atype > 2) { 3551 dev_err(dev, "Invalid atype: %u\n", ud->atype); 3552 return -EINVAL; 3553 } 3554 3555 ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops; 3556 ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops; 3557 3558 ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc"); 3559 if (IS_ERR(ud->ringacc)) 3560 return PTR_ERR(ud->ringacc); 3561 3562 dev->msi_domain = of_msi_get_domain(dev, dev->of_node, 3563 DOMAIN_BUS_TI_SCI_INTA_MSI); 3564 if (!dev->msi_domain) { 3565 dev_err(dev, "Failed to get MSI domain\n"); 3566 return -EPROBE_DEFER; 3567 } 3568 3569 match = of_match_node(udma_of_match, dev->of_node); 3570 if (!match) { 3571 dev_err(dev, "No compatible match found\n"); 3572 return -ENODEV; 3573 } 3574 ud->match_data = match->data; 3575 3576 dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask); 3577 dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask); 3578 3579 ud->ddev.device_alloc_chan_resources = udma_alloc_chan_resources; 3580 ud->ddev.device_config = udma_slave_config; 3581 ud->ddev.device_prep_slave_sg = udma_prep_slave_sg; 3582 ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic; 3583 ud->ddev.device_issue_pending = udma_issue_pending; 3584 ud->ddev.device_tx_status = udma_tx_status; 3585 ud->ddev.device_pause = udma_pause; 3586 ud->ddev.device_resume = udma_resume; 3587 ud->ddev.device_terminate_all = udma_terminate_all; 3588 ud->ddev.device_synchronize = udma_synchronize; 3589 #ifdef CONFIG_DEBUG_FS 3590 ud->ddev.dbg_summary_show = udma_dbg_summary_show; 3591 #endif 3592 3593 ud->ddev.device_free_chan_resources = udma_free_chan_resources; 3594 ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS; 3595 ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS; 3596 ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 3597 ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 3598 ud->ddev.copy_align = DMAENGINE_ALIGN_8_BYTES; 3599 ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT | 3600 DESC_METADATA_ENGINE; 3601 if (ud->match_data->enable_memcpy_support) { 3602 dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask); 3603 ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy; 3604 ud->ddev.directions |= BIT(DMA_MEM_TO_MEM); 3605 } 3606 3607 ud->ddev.dev = dev; 3608 ud->dev = dev; 3609 ud->psil_base = ud->match_data->psil_base; 3610 3611 INIT_LIST_HEAD(&ud->ddev.channels); 3612 INIT_LIST_HEAD(&ud->desc_to_purge); 3613 3614 ch_count = udma_setup_resources(ud); 3615 if (ch_count <= 0) 3616 return ch_count; 3617 3618 spin_lock_init(&ud->lock); 3619 INIT_WORK(&ud->purge_work, udma_purge_desc_work); 3620 3621 ud->desc_align = 64; 3622 if (ud->desc_align < dma_get_cache_alignment()) 3623 ud->desc_align = dma_get_cache_alignment(); 3624 3625 ret = udma_setup_rx_flush(ud); 3626 if (ret) 3627 return ret; 3628 3629 for (i = 0; i < ud->tchan_cnt; i++) { 3630 struct udma_tchan *tchan = &ud->tchans[i]; 3631 3632 tchan->id = i; 3633 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000; 3634 } 3635 3636 for (i = 0; i < ud->rchan_cnt; i++) { 3637 struct udma_rchan *rchan = &ud->rchans[i]; 3638 3639 rchan->id = i; 3640 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000; 3641 } 3642 3643 for (i = 0; i < ud->rflow_cnt; i++) { 3644 struct udma_rflow *rflow = &ud->rflows[i]; 3645 3646 rflow->id = i; 3647 } 3648 3649 for (i = 0; i < ch_count; i++) { 3650 struct udma_chan *uc = &ud->channels[i]; 3651 3652 uc->ud = ud; 3653 uc->vc.desc_free = udma_desc_free; 3654 uc->id = i; 3655 uc->tchan = NULL; 3656 uc->rchan = NULL; 3657 uc->config.remote_thread_id = -1; 3658 uc->config.dir = DMA_MEM_TO_MEM; 3659 uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d", 3660 dev_name(dev), i); 3661 3662 vchan_init(&uc->vc, &ud->ddev); 3663 /* Use custom vchan completion handling */ 3664 tasklet_init(&uc->vc.task, udma_vchan_complete, 3665 (unsigned long)&uc->vc); 3666 init_completion(&uc->teardown_completed); 3667 INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion); 3668 } 3669 3670 ret = dma_async_device_register(&ud->ddev); 3671 if (ret) { 3672 dev_err(dev, "failed to register slave DMA engine: %d\n", ret); 3673 return ret; 3674 } 3675 3676 platform_set_drvdata(pdev, ud); 3677 3678 ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud); 3679 if (ret) { 3680 dev_err(dev, "failed to register of_dma controller\n"); 3681 dma_async_device_unregister(&ud->ddev); 3682 } 3683 3684 return ret; 3685 } 3686 3687 static struct platform_driver udma_driver = { 3688 .driver = { 3689 .name = "ti-udma", 3690 .of_match_table = udma_of_match, 3691 .suppress_bind_attrs = true, 3692 }, 3693 .probe = udma_probe, 3694 }; 3695 builtin_platform_driver(udma_driver); 3696 3697 /* Private interfaces to UDMA */ 3698 #include "k3-udma-private.c" 3699