1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com 4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> 5 */ 6 7 #include <linux/kernel.h> 8 #include <linux/module.h> 9 #include <linux/delay.h> 10 #include <linux/dmaengine.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/dmapool.h> 13 #include <linux/err.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/list.h> 17 #include <linux/platform_device.h> 18 #include <linux/slab.h> 19 #include <linux/spinlock.h> 20 #include <linux/sys_soc.h> 21 #include <linux/of.h> 22 #include <linux/of_dma.h> 23 #include <linux/of_irq.h> 24 #include <linux/workqueue.h> 25 #include <linux/completion.h> 26 #include <linux/soc/ti/k3-ringacc.h> 27 #include <linux/soc/ti/ti_sci_protocol.h> 28 #include <linux/soc/ti/ti_sci_inta_msi.h> 29 #include <linux/dma/k3-event-router.h> 30 #include <linux/dma/ti-cppi5.h> 31 32 #include "../virt-dma.h" 33 #include "k3-udma.h" 34 #include "k3-psil-priv.h" 35 36 struct udma_static_tr { 37 u8 elsize; /* RPSTR0 */ 38 u16 elcnt; /* RPSTR0 */ 39 u16 bstcnt; /* RPSTR1 */ 40 }; 41 42 #define K3_UDMA_MAX_RFLOWS 1024 43 #define K3_UDMA_DEFAULT_RING_SIZE 16 44 45 /* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */ 46 #define UDMA_RFLOW_SRCTAG_NONE 0 47 #define UDMA_RFLOW_SRCTAG_CFG_TAG 1 48 #define UDMA_RFLOW_SRCTAG_FLOW_ID 2 49 #define UDMA_RFLOW_SRCTAG_SRC_TAG 4 50 51 #define UDMA_RFLOW_DSTTAG_NONE 0 52 #define UDMA_RFLOW_DSTTAG_CFG_TAG 1 53 #define UDMA_RFLOW_DSTTAG_FLOW_ID 2 54 #define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4 55 #define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5 56 57 struct udma_chan; 58 59 enum k3_dma_type { 60 DMA_TYPE_UDMA = 0, 61 DMA_TYPE_BCDMA, 62 DMA_TYPE_PKTDMA, 63 }; 64 65 enum udma_mmr { 66 MMR_GCFG = 0, 67 MMR_BCHANRT, 68 MMR_RCHANRT, 69 MMR_TCHANRT, 70 MMR_LAST, 71 }; 72 73 static const char * const mmr_names[] = { 74 [MMR_GCFG] = "gcfg", 75 [MMR_BCHANRT] = "bchanrt", 76 [MMR_RCHANRT] = "rchanrt", 77 [MMR_TCHANRT] = "tchanrt", 78 }; 79 80 struct udma_tchan { 81 void __iomem *reg_rt; 82 83 int id; 84 struct k3_ring *t_ring; /* Transmit ring */ 85 struct k3_ring *tc_ring; /* Transmit Completion ring */ 86 int tflow_id; /* applicable only for PKTDMA */ 87 88 }; 89 90 #define udma_bchan udma_tchan 91 92 struct udma_rflow { 93 int id; 94 struct k3_ring *fd_ring; /* Free Descriptor ring */ 95 struct k3_ring *r_ring; /* Receive ring */ 96 }; 97 98 struct udma_rchan { 99 void __iomem *reg_rt; 100 101 int id; 102 }; 103 104 struct udma_oes_offsets { 105 /* K3 UDMA Output Event Offset */ 106 u32 udma_rchan; 107 108 /* BCDMA Output Event Offsets */ 109 u32 bcdma_bchan_data; 110 u32 bcdma_bchan_ring; 111 u32 bcdma_tchan_data; 112 u32 bcdma_tchan_ring; 113 u32 bcdma_rchan_data; 114 u32 bcdma_rchan_ring; 115 116 /* PKTDMA Output Event Offsets */ 117 u32 pktdma_tchan_flow; 118 u32 pktdma_rchan_flow; 119 }; 120 121 #define UDMA_FLAG_PDMA_ACC32 BIT(0) 122 #define UDMA_FLAG_PDMA_BURST BIT(1) 123 #define UDMA_FLAG_TDTYPE BIT(2) 124 #define UDMA_FLAG_BURST_SIZE BIT(3) 125 #define UDMA_FLAGS_J7_CLASS (UDMA_FLAG_PDMA_ACC32 | \ 126 UDMA_FLAG_PDMA_BURST | \ 127 UDMA_FLAG_TDTYPE | \ 128 UDMA_FLAG_BURST_SIZE) 129 130 struct udma_match_data { 131 enum k3_dma_type type; 132 u32 psil_base; 133 bool enable_memcpy_support; 134 u32 flags; 135 u32 statictr_z_mask; 136 u8 burst_size[3]; 137 struct udma_soc_data *soc_data; 138 }; 139 140 struct udma_soc_data { 141 struct udma_oes_offsets oes; 142 u32 bcdma_trigger_event_offset; 143 }; 144 145 struct udma_hwdesc { 146 size_t cppi5_desc_size; 147 void *cppi5_desc_vaddr; 148 dma_addr_t cppi5_desc_paddr; 149 150 /* TR descriptor internal pointers */ 151 void *tr_req_base; 152 struct cppi5_tr_resp_t *tr_resp_base; 153 }; 154 155 struct udma_rx_flush { 156 struct udma_hwdesc hwdescs[2]; 157 158 size_t buffer_size; 159 void *buffer_vaddr; 160 dma_addr_t buffer_paddr; 161 }; 162 163 struct udma_tpl { 164 u8 levels; 165 u32 start_idx[3]; 166 }; 167 168 struct udma_dev { 169 struct dma_device ddev; 170 struct device *dev; 171 void __iomem *mmrs[MMR_LAST]; 172 const struct udma_match_data *match_data; 173 const struct udma_soc_data *soc_data; 174 175 struct udma_tpl bchan_tpl; 176 struct udma_tpl tchan_tpl; 177 struct udma_tpl rchan_tpl; 178 179 size_t desc_align; /* alignment to use for descriptors */ 180 181 struct udma_tisci_rm tisci_rm; 182 183 struct k3_ringacc *ringacc; 184 185 struct work_struct purge_work; 186 struct list_head desc_to_purge; 187 spinlock_t lock; 188 189 struct udma_rx_flush rx_flush; 190 191 int bchan_cnt; 192 int tchan_cnt; 193 int echan_cnt; 194 int rchan_cnt; 195 int rflow_cnt; 196 int tflow_cnt; 197 unsigned long *bchan_map; 198 unsigned long *tchan_map; 199 unsigned long *rchan_map; 200 unsigned long *rflow_gp_map; 201 unsigned long *rflow_gp_map_allocated; 202 unsigned long *rflow_in_use; 203 unsigned long *tflow_map; 204 205 struct udma_bchan *bchans; 206 struct udma_tchan *tchans; 207 struct udma_rchan *rchans; 208 struct udma_rflow *rflows; 209 210 struct udma_chan *channels; 211 u32 psil_base; 212 u32 atype; 213 u32 asel; 214 }; 215 216 struct udma_desc { 217 struct virt_dma_desc vd; 218 219 bool terminated; 220 221 enum dma_transfer_direction dir; 222 223 struct udma_static_tr static_tr; 224 u32 residue; 225 226 unsigned int sglen; 227 unsigned int desc_idx; /* Only used for cyclic in packet mode */ 228 unsigned int tr_idx; 229 230 u32 metadata_size; 231 void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */ 232 233 unsigned int hwdesc_count; 234 struct udma_hwdesc hwdesc[]; 235 }; 236 237 enum udma_chan_state { 238 UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */ 239 UDMA_CHAN_IS_ACTIVE, /* Normal operation */ 240 UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */ 241 }; 242 243 struct udma_tx_drain { 244 struct delayed_work work; 245 ktime_t tstamp; 246 u32 residue; 247 }; 248 249 struct udma_chan_config { 250 bool pkt_mode; /* TR or packet */ 251 bool needs_epib; /* EPIB is needed for the communication or not */ 252 u32 psd_size; /* size of Protocol Specific Data */ 253 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */ 254 u32 hdesc_size; /* Size of a packet descriptor in packet mode */ 255 bool notdpkt; /* Suppress sending TDC packet */ 256 int remote_thread_id; 257 u32 atype; 258 u32 asel; 259 u32 src_thread; 260 u32 dst_thread; 261 enum psil_endpoint_type ep_type; 262 bool enable_acc32; 263 bool enable_burst; 264 enum udma_tp_level channel_tpl; /* Channel Throughput Level */ 265 266 u32 tr_trigger_type; 267 unsigned long tx_flags; 268 269 /* PKDMA mapped channel */ 270 int mapped_channel_id; 271 /* PKTDMA default tflow or rflow for mapped channel */ 272 int default_flow_id; 273 274 enum dma_transfer_direction dir; 275 }; 276 277 struct udma_chan { 278 struct virt_dma_chan vc; 279 struct dma_slave_config cfg; 280 struct udma_dev *ud; 281 struct device *dma_dev; 282 struct udma_desc *desc; 283 struct udma_desc *terminated_desc; 284 struct udma_static_tr static_tr; 285 char *name; 286 287 struct udma_bchan *bchan; 288 struct udma_tchan *tchan; 289 struct udma_rchan *rchan; 290 struct udma_rflow *rflow; 291 292 bool psil_paired; 293 294 int irq_num_ring; 295 int irq_num_udma; 296 297 bool cyclic; 298 bool paused; 299 300 enum udma_chan_state state; 301 struct completion teardown_completed; 302 303 struct udma_tx_drain tx_drain; 304 305 /* Channel configuration parameters */ 306 struct udma_chan_config config; 307 /* Channel configuration parameters (backup) */ 308 struct udma_chan_config backup_config; 309 310 /* dmapool for packet mode descriptors */ 311 bool use_dma_pool; 312 struct dma_pool *hdesc_pool; 313 314 u32 id; 315 }; 316 317 static inline struct udma_dev *to_udma_dev(struct dma_device *d) 318 { 319 return container_of(d, struct udma_dev, ddev); 320 } 321 322 static inline struct udma_chan *to_udma_chan(struct dma_chan *c) 323 { 324 return container_of(c, struct udma_chan, vc.chan); 325 } 326 327 static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t) 328 { 329 return container_of(t, struct udma_desc, vd.tx); 330 } 331 332 /* Generic register access functions */ 333 static inline u32 udma_read(void __iomem *base, int reg) 334 { 335 return readl(base + reg); 336 } 337 338 static inline void udma_write(void __iomem *base, int reg, u32 val) 339 { 340 writel(val, base + reg); 341 } 342 343 static inline void udma_update_bits(void __iomem *base, int reg, 344 u32 mask, u32 val) 345 { 346 u32 tmp, orig; 347 348 orig = readl(base + reg); 349 tmp = orig & ~mask; 350 tmp |= (val & mask); 351 352 if (tmp != orig) 353 writel(tmp, base + reg); 354 } 355 356 /* TCHANRT */ 357 static inline u32 udma_tchanrt_read(struct udma_chan *uc, int reg) 358 { 359 if (!uc->tchan) 360 return 0; 361 return udma_read(uc->tchan->reg_rt, reg); 362 } 363 364 static inline void udma_tchanrt_write(struct udma_chan *uc, int reg, u32 val) 365 { 366 if (!uc->tchan) 367 return; 368 udma_write(uc->tchan->reg_rt, reg, val); 369 } 370 371 static inline void udma_tchanrt_update_bits(struct udma_chan *uc, int reg, 372 u32 mask, u32 val) 373 { 374 if (!uc->tchan) 375 return; 376 udma_update_bits(uc->tchan->reg_rt, reg, mask, val); 377 } 378 379 /* RCHANRT */ 380 static inline u32 udma_rchanrt_read(struct udma_chan *uc, int reg) 381 { 382 if (!uc->rchan) 383 return 0; 384 return udma_read(uc->rchan->reg_rt, reg); 385 } 386 387 static inline void udma_rchanrt_write(struct udma_chan *uc, int reg, u32 val) 388 { 389 if (!uc->rchan) 390 return; 391 udma_write(uc->rchan->reg_rt, reg, val); 392 } 393 394 static inline void udma_rchanrt_update_bits(struct udma_chan *uc, int reg, 395 u32 mask, u32 val) 396 { 397 if (!uc->rchan) 398 return; 399 udma_update_bits(uc->rchan->reg_rt, reg, mask, val); 400 } 401 402 static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread) 403 { 404 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 405 406 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; 407 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci, 408 tisci_rm->tisci_navss_dev_id, 409 src_thread, dst_thread); 410 } 411 412 static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread, 413 u32 dst_thread) 414 { 415 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 416 417 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; 418 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci, 419 tisci_rm->tisci_navss_dev_id, 420 src_thread, dst_thread); 421 } 422 423 static void k3_configure_chan_coherency(struct dma_chan *chan, u32 asel) 424 { 425 struct device *chan_dev = &chan->dev->device; 426 427 if (asel == 0) { 428 /* No special handling for the channel */ 429 chan->dev->chan_dma_dev = false; 430 431 chan_dev->dma_coherent = false; 432 chan_dev->dma_parms = NULL; 433 } else if (asel == 14 || asel == 15) { 434 chan->dev->chan_dma_dev = true; 435 436 chan_dev->dma_coherent = true; 437 dma_coerce_mask_and_coherent(chan_dev, DMA_BIT_MASK(48)); 438 chan_dev->dma_parms = chan_dev->parent->dma_parms; 439 } else { 440 dev_warn(chan->device->dev, "Invalid ASEL value: %u\n", asel); 441 442 chan_dev->dma_coherent = false; 443 chan_dev->dma_parms = NULL; 444 } 445 } 446 447 static u8 udma_get_chan_tpl_index(struct udma_tpl *tpl_map, int chan_id) 448 { 449 int i; 450 451 for (i = 0; i < tpl_map->levels; i++) { 452 if (chan_id >= tpl_map->start_idx[i]) 453 return i; 454 } 455 456 return 0; 457 } 458 459 static void udma_reset_uchan(struct udma_chan *uc) 460 { 461 memset(&uc->config, 0, sizeof(uc->config)); 462 uc->config.remote_thread_id = -1; 463 uc->config.mapped_channel_id = -1; 464 uc->config.default_flow_id = -1; 465 uc->state = UDMA_CHAN_IS_IDLE; 466 } 467 468 static void udma_dump_chan_stdata(struct udma_chan *uc) 469 { 470 struct device *dev = uc->ud->dev; 471 u32 offset; 472 int i; 473 474 if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) { 475 dev_dbg(dev, "TCHAN State data:\n"); 476 for (i = 0; i < 32; i++) { 477 offset = UDMA_CHAN_RT_STDATA_REG + i * 4; 478 dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i, 479 udma_tchanrt_read(uc, offset)); 480 } 481 } 482 483 if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) { 484 dev_dbg(dev, "RCHAN State data:\n"); 485 for (i = 0; i < 32; i++) { 486 offset = UDMA_CHAN_RT_STDATA_REG + i * 4; 487 dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i, 488 udma_rchanrt_read(uc, offset)); 489 } 490 } 491 } 492 493 static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d, 494 int idx) 495 { 496 return d->hwdesc[idx].cppi5_desc_paddr; 497 } 498 499 static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx) 500 { 501 return d->hwdesc[idx].cppi5_desc_vaddr; 502 } 503 504 static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc, 505 dma_addr_t paddr) 506 { 507 struct udma_desc *d = uc->terminated_desc; 508 509 if (d) { 510 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, 511 d->desc_idx); 512 513 if (desc_paddr != paddr) 514 d = NULL; 515 } 516 517 if (!d) { 518 d = uc->desc; 519 if (d) { 520 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, 521 d->desc_idx); 522 523 if (desc_paddr != paddr) 524 d = NULL; 525 } 526 } 527 528 return d; 529 } 530 531 static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d) 532 { 533 if (uc->use_dma_pool) { 534 int i; 535 536 for (i = 0; i < d->hwdesc_count; i++) { 537 if (!d->hwdesc[i].cppi5_desc_vaddr) 538 continue; 539 540 dma_pool_free(uc->hdesc_pool, 541 d->hwdesc[i].cppi5_desc_vaddr, 542 d->hwdesc[i].cppi5_desc_paddr); 543 544 d->hwdesc[i].cppi5_desc_vaddr = NULL; 545 } 546 } else if (d->hwdesc[0].cppi5_desc_vaddr) { 547 dma_free_coherent(uc->dma_dev, d->hwdesc[0].cppi5_desc_size, 548 d->hwdesc[0].cppi5_desc_vaddr, 549 d->hwdesc[0].cppi5_desc_paddr); 550 551 d->hwdesc[0].cppi5_desc_vaddr = NULL; 552 } 553 } 554 555 static void udma_purge_desc_work(struct work_struct *work) 556 { 557 struct udma_dev *ud = container_of(work, typeof(*ud), purge_work); 558 struct virt_dma_desc *vd, *_vd; 559 unsigned long flags; 560 LIST_HEAD(head); 561 562 spin_lock_irqsave(&ud->lock, flags); 563 list_splice_tail_init(&ud->desc_to_purge, &head); 564 spin_unlock_irqrestore(&ud->lock, flags); 565 566 list_for_each_entry_safe(vd, _vd, &head, node) { 567 struct udma_chan *uc = to_udma_chan(vd->tx.chan); 568 struct udma_desc *d = to_udma_desc(&vd->tx); 569 570 udma_free_hwdesc(uc, d); 571 list_del(&vd->node); 572 kfree(d); 573 } 574 575 /* If more to purge, schedule the work again */ 576 if (!list_empty(&ud->desc_to_purge)) 577 schedule_work(&ud->purge_work); 578 } 579 580 static void udma_desc_free(struct virt_dma_desc *vd) 581 { 582 struct udma_dev *ud = to_udma_dev(vd->tx.chan->device); 583 struct udma_chan *uc = to_udma_chan(vd->tx.chan); 584 struct udma_desc *d = to_udma_desc(&vd->tx); 585 unsigned long flags; 586 587 if (uc->terminated_desc == d) 588 uc->terminated_desc = NULL; 589 590 if (uc->use_dma_pool) { 591 udma_free_hwdesc(uc, d); 592 kfree(d); 593 return; 594 } 595 596 spin_lock_irqsave(&ud->lock, flags); 597 list_add_tail(&vd->node, &ud->desc_to_purge); 598 spin_unlock_irqrestore(&ud->lock, flags); 599 600 schedule_work(&ud->purge_work); 601 } 602 603 static bool udma_is_chan_running(struct udma_chan *uc) 604 { 605 u32 trt_ctl = 0; 606 u32 rrt_ctl = 0; 607 608 if (uc->tchan) 609 trt_ctl = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG); 610 if (uc->rchan) 611 rrt_ctl = udma_rchanrt_read(uc, UDMA_CHAN_RT_CTL_REG); 612 613 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN) 614 return true; 615 616 return false; 617 } 618 619 static bool udma_is_chan_paused(struct udma_chan *uc) 620 { 621 u32 val, pause_mask; 622 623 switch (uc->config.dir) { 624 case DMA_DEV_TO_MEM: 625 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG); 626 pause_mask = UDMA_PEER_RT_EN_PAUSE; 627 break; 628 case DMA_MEM_TO_DEV: 629 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG); 630 pause_mask = UDMA_PEER_RT_EN_PAUSE; 631 break; 632 case DMA_MEM_TO_MEM: 633 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG); 634 pause_mask = UDMA_CHAN_RT_CTL_PAUSE; 635 break; 636 default: 637 return false; 638 } 639 640 if (val & pause_mask) 641 return true; 642 643 return false; 644 } 645 646 static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc) 647 { 648 return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr; 649 } 650 651 static int udma_push_to_ring(struct udma_chan *uc, int idx) 652 { 653 struct udma_desc *d = uc->desc; 654 struct k3_ring *ring = NULL; 655 dma_addr_t paddr; 656 657 switch (uc->config.dir) { 658 case DMA_DEV_TO_MEM: 659 ring = uc->rflow->fd_ring; 660 break; 661 case DMA_MEM_TO_DEV: 662 case DMA_MEM_TO_MEM: 663 ring = uc->tchan->t_ring; 664 break; 665 default: 666 return -EINVAL; 667 } 668 669 /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */ 670 if (idx == -1) { 671 paddr = udma_get_rx_flush_hwdesc_paddr(uc); 672 } else { 673 paddr = udma_curr_cppi5_desc_paddr(d, idx); 674 675 wmb(); /* Ensure that writes are not moved over this point */ 676 } 677 678 return k3_ringacc_ring_push(ring, &paddr); 679 } 680 681 static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr) 682 { 683 if (uc->config.dir != DMA_DEV_TO_MEM) 684 return false; 685 686 if (addr == udma_get_rx_flush_hwdesc_paddr(uc)) 687 return true; 688 689 return false; 690 } 691 692 static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr) 693 { 694 struct k3_ring *ring = NULL; 695 int ret; 696 697 switch (uc->config.dir) { 698 case DMA_DEV_TO_MEM: 699 ring = uc->rflow->r_ring; 700 break; 701 case DMA_MEM_TO_DEV: 702 case DMA_MEM_TO_MEM: 703 ring = uc->tchan->tc_ring; 704 break; 705 default: 706 return -ENOENT; 707 } 708 709 ret = k3_ringacc_ring_pop(ring, addr); 710 if (ret) 711 return ret; 712 713 rmb(); /* Ensure that reads are not moved before this point */ 714 715 /* Teardown completion */ 716 if (cppi5_desc_is_tdcm(*addr)) 717 return 0; 718 719 /* Check for flush descriptor */ 720 if (udma_desc_is_rx_flush(uc, *addr)) 721 return -ENOENT; 722 723 return 0; 724 } 725 726 static void udma_reset_rings(struct udma_chan *uc) 727 { 728 struct k3_ring *ring1 = NULL; 729 struct k3_ring *ring2 = NULL; 730 731 switch (uc->config.dir) { 732 case DMA_DEV_TO_MEM: 733 if (uc->rchan) { 734 ring1 = uc->rflow->fd_ring; 735 ring2 = uc->rflow->r_ring; 736 } 737 break; 738 case DMA_MEM_TO_DEV: 739 case DMA_MEM_TO_MEM: 740 if (uc->tchan) { 741 ring1 = uc->tchan->t_ring; 742 ring2 = uc->tchan->tc_ring; 743 } 744 break; 745 default: 746 break; 747 } 748 749 if (ring1) 750 k3_ringacc_ring_reset_dma(ring1, 751 k3_ringacc_ring_get_occ(ring1)); 752 if (ring2) 753 k3_ringacc_ring_reset(ring2); 754 755 /* make sure we are not leaking memory by stalled descriptor */ 756 if (uc->terminated_desc) { 757 udma_desc_free(&uc->terminated_desc->vd); 758 uc->terminated_desc = NULL; 759 } 760 } 761 762 static void udma_decrement_byte_counters(struct udma_chan *uc, u32 val) 763 { 764 if (uc->desc->dir == DMA_DEV_TO_MEM) { 765 udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val); 766 udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val); 767 if (uc->config.ep_type != PSIL_EP_NATIVE) 768 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val); 769 } else { 770 udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val); 771 udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val); 772 if (!uc->bchan && uc->config.ep_type != PSIL_EP_NATIVE) 773 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val); 774 } 775 } 776 777 static void udma_reset_counters(struct udma_chan *uc) 778 { 779 u32 val; 780 781 if (uc->tchan) { 782 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); 783 udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val); 784 785 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG); 786 udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val); 787 788 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG); 789 udma_tchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val); 790 791 if (!uc->bchan) { 792 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG); 793 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val); 794 } 795 } 796 797 if (uc->rchan) { 798 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); 799 udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val); 800 801 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG); 802 udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val); 803 804 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG); 805 udma_rchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val); 806 807 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG); 808 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val); 809 } 810 } 811 812 static int udma_reset_chan(struct udma_chan *uc, bool hard) 813 { 814 switch (uc->config.dir) { 815 case DMA_DEV_TO_MEM: 816 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0); 817 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0); 818 break; 819 case DMA_MEM_TO_DEV: 820 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0); 821 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0); 822 break; 823 case DMA_MEM_TO_MEM: 824 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0); 825 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0); 826 break; 827 default: 828 return -EINVAL; 829 } 830 831 /* Reset all counters */ 832 udma_reset_counters(uc); 833 834 /* Hard reset: re-initialize the channel to reset */ 835 if (hard) { 836 struct udma_chan_config ucc_backup; 837 int ret; 838 839 memcpy(&ucc_backup, &uc->config, sizeof(uc->config)); 840 uc->ud->ddev.device_free_chan_resources(&uc->vc.chan); 841 842 /* restore the channel configuration */ 843 memcpy(&uc->config, &ucc_backup, sizeof(uc->config)); 844 ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan); 845 if (ret) 846 return ret; 847 848 /* 849 * Setting forced teardown after forced reset helps recovering 850 * the rchan. 851 */ 852 if (uc->config.dir == DMA_DEV_TO_MEM) 853 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 854 UDMA_CHAN_RT_CTL_EN | 855 UDMA_CHAN_RT_CTL_TDOWN | 856 UDMA_CHAN_RT_CTL_FTDOWN); 857 } 858 uc->state = UDMA_CHAN_IS_IDLE; 859 860 return 0; 861 } 862 863 static void udma_start_desc(struct udma_chan *uc) 864 { 865 struct udma_chan_config *ucc = &uc->config; 866 867 if (uc->ud->match_data->type == DMA_TYPE_UDMA && ucc->pkt_mode && 868 (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) { 869 int i; 870 871 /* 872 * UDMA only: Push all descriptors to ring for packet mode 873 * cyclic or RX 874 * PKTDMA supports pre-linked descriptor and cyclic is not 875 * supported 876 */ 877 for (i = 0; i < uc->desc->sglen; i++) 878 udma_push_to_ring(uc, i); 879 } else { 880 udma_push_to_ring(uc, 0); 881 } 882 } 883 884 static bool udma_chan_needs_reconfiguration(struct udma_chan *uc) 885 { 886 /* Only PDMAs have staticTR */ 887 if (uc->config.ep_type == PSIL_EP_NATIVE) 888 return false; 889 890 /* Check if the staticTR configuration has changed for TX */ 891 if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr))) 892 return true; 893 894 return false; 895 } 896 897 static int udma_start(struct udma_chan *uc) 898 { 899 struct virt_dma_desc *vd = vchan_next_desc(&uc->vc); 900 901 if (!vd) { 902 uc->desc = NULL; 903 return -ENOENT; 904 } 905 906 list_del(&vd->node); 907 908 uc->desc = to_udma_desc(&vd->tx); 909 910 /* Channel is already running and does not need reconfiguration */ 911 if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) { 912 udma_start_desc(uc); 913 goto out; 914 } 915 916 /* Make sure that we clear the teardown bit, if it is set */ 917 udma_reset_chan(uc, false); 918 919 /* Push descriptors before we start the channel */ 920 udma_start_desc(uc); 921 922 switch (uc->desc->dir) { 923 case DMA_DEV_TO_MEM: 924 /* Config remote TR */ 925 if (uc->config.ep_type == PSIL_EP_PDMA_XY) { 926 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) | 927 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize); 928 const struct udma_match_data *match_data = 929 uc->ud->match_data; 930 931 if (uc->config.enable_acc32) 932 val |= PDMA_STATIC_TR_XY_ACC32; 933 if (uc->config.enable_burst) 934 val |= PDMA_STATIC_TR_XY_BURST; 935 936 udma_rchanrt_write(uc, 937 UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG, 938 val); 939 940 udma_rchanrt_write(uc, 941 UDMA_CHAN_RT_PEER_STATIC_TR_Z_REG, 942 PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt, 943 match_data->statictr_z_mask)); 944 945 /* save the current staticTR configuration */ 946 memcpy(&uc->static_tr, &uc->desc->static_tr, 947 sizeof(uc->static_tr)); 948 } 949 950 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 951 UDMA_CHAN_RT_CTL_EN); 952 953 /* Enable remote */ 954 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 955 UDMA_PEER_RT_EN_ENABLE); 956 957 break; 958 case DMA_MEM_TO_DEV: 959 /* Config remote TR */ 960 if (uc->config.ep_type == PSIL_EP_PDMA_XY) { 961 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) | 962 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize); 963 964 if (uc->config.enable_acc32) 965 val |= PDMA_STATIC_TR_XY_ACC32; 966 if (uc->config.enable_burst) 967 val |= PDMA_STATIC_TR_XY_BURST; 968 969 udma_tchanrt_write(uc, 970 UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG, 971 val); 972 973 /* save the current staticTR configuration */ 974 memcpy(&uc->static_tr, &uc->desc->static_tr, 975 sizeof(uc->static_tr)); 976 } 977 978 /* Enable remote */ 979 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 980 UDMA_PEER_RT_EN_ENABLE); 981 982 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 983 UDMA_CHAN_RT_CTL_EN); 984 985 break; 986 case DMA_MEM_TO_MEM: 987 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 988 UDMA_CHAN_RT_CTL_EN); 989 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 990 UDMA_CHAN_RT_CTL_EN); 991 992 break; 993 default: 994 return -EINVAL; 995 } 996 997 uc->state = UDMA_CHAN_IS_ACTIVE; 998 out: 999 1000 return 0; 1001 } 1002 1003 static int udma_stop(struct udma_chan *uc) 1004 { 1005 enum udma_chan_state old_state = uc->state; 1006 1007 uc->state = UDMA_CHAN_IS_TERMINATING; 1008 reinit_completion(&uc->teardown_completed); 1009 1010 switch (uc->config.dir) { 1011 case DMA_DEV_TO_MEM: 1012 if (!uc->cyclic && !uc->desc) 1013 udma_push_to_ring(uc, -1); 1014 1015 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 1016 UDMA_PEER_RT_EN_ENABLE | 1017 UDMA_PEER_RT_EN_TEARDOWN); 1018 break; 1019 case DMA_MEM_TO_DEV: 1020 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 1021 UDMA_PEER_RT_EN_ENABLE | 1022 UDMA_PEER_RT_EN_FLUSH); 1023 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 1024 UDMA_CHAN_RT_CTL_EN | 1025 UDMA_CHAN_RT_CTL_TDOWN); 1026 break; 1027 case DMA_MEM_TO_MEM: 1028 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 1029 UDMA_CHAN_RT_CTL_EN | 1030 UDMA_CHAN_RT_CTL_TDOWN); 1031 break; 1032 default: 1033 uc->state = old_state; 1034 complete_all(&uc->teardown_completed); 1035 return -EINVAL; 1036 } 1037 1038 return 0; 1039 } 1040 1041 static void udma_cyclic_packet_elapsed(struct udma_chan *uc) 1042 { 1043 struct udma_desc *d = uc->desc; 1044 struct cppi5_host_desc_t *h_desc; 1045 1046 h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr; 1047 cppi5_hdesc_reset_to_original(h_desc); 1048 udma_push_to_ring(uc, d->desc_idx); 1049 d->desc_idx = (d->desc_idx + 1) % d->sglen; 1050 } 1051 1052 static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d) 1053 { 1054 struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr; 1055 1056 memcpy(d->metadata, h_desc->epib, d->metadata_size); 1057 } 1058 1059 static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d) 1060 { 1061 u32 peer_bcnt, bcnt; 1062 1063 /* 1064 * Only TX towards PDMA is affected. 1065 * If DMA_PREP_INTERRUPT is not set by consumer then skip the transfer 1066 * completion calculation, consumer must ensure that there is no stale 1067 * data in DMA fabric in this case. 1068 */ 1069 if (uc->config.ep_type == PSIL_EP_NATIVE || 1070 uc->config.dir != DMA_MEM_TO_DEV || !(uc->config.tx_flags & DMA_PREP_INTERRUPT)) 1071 return true; 1072 1073 peer_bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG); 1074 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); 1075 1076 /* Transfer is incomplete, store current residue and time stamp */ 1077 if (peer_bcnt < bcnt) { 1078 uc->tx_drain.residue = bcnt - peer_bcnt; 1079 uc->tx_drain.tstamp = ktime_get(); 1080 return false; 1081 } 1082 1083 return true; 1084 } 1085 1086 static void udma_check_tx_completion(struct work_struct *work) 1087 { 1088 struct udma_chan *uc = container_of(work, typeof(*uc), 1089 tx_drain.work.work); 1090 bool desc_done = true; 1091 u32 residue_diff; 1092 ktime_t time_diff; 1093 unsigned long delay; 1094 unsigned long flags; 1095 1096 while (1) { 1097 spin_lock_irqsave(&uc->vc.lock, flags); 1098 1099 if (uc->desc) { 1100 /* Get previous residue and time stamp */ 1101 residue_diff = uc->tx_drain.residue; 1102 time_diff = uc->tx_drain.tstamp; 1103 /* 1104 * Get current residue and time stamp or see if 1105 * transfer is complete 1106 */ 1107 desc_done = udma_is_desc_really_done(uc, uc->desc); 1108 } 1109 1110 if (!desc_done) { 1111 /* 1112 * Find the time delta and residue delta w.r.t 1113 * previous poll 1114 */ 1115 time_diff = ktime_sub(uc->tx_drain.tstamp, 1116 time_diff) + 1; 1117 residue_diff -= uc->tx_drain.residue; 1118 if (residue_diff) { 1119 /* 1120 * Try to guess when we should check 1121 * next time by calculating rate at 1122 * which data is being drained at the 1123 * peer device 1124 */ 1125 delay = (time_diff / residue_diff) * 1126 uc->tx_drain.residue; 1127 } else { 1128 /* No progress, check again in 1 second */ 1129 schedule_delayed_work(&uc->tx_drain.work, HZ); 1130 break; 1131 } 1132 1133 spin_unlock_irqrestore(&uc->vc.lock, flags); 1134 1135 usleep_range(ktime_to_us(delay), 1136 ktime_to_us(delay) + 10); 1137 continue; 1138 } 1139 1140 if (uc->desc) { 1141 struct udma_desc *d = uc->desc; 1142 1143 udma_decrement_byte_counters(uc, d->residue); 1144 udma_start(uc); 1145 vchan_cookie_complete(&d->vd); 1146 break; 1147 } 1148 1149 break; 1150 } 1151 1152 spin_unlock_irqrestore(&uc->vc.lock, flags); 1153 } 1154 1155 static irqreturn_t udma_ring_irq_handler(int irq, void *data) 1156 { 1157 struct udma_chan *uc = data; 1158 struct udma_desc *d; 1159 dma_addr_t paddr = 0; 1160 1161 if (udma_pop_from_ring(uc, &paddr) || !paddr) 1162 return IRQ_HANDLED; 1163 1164 spin_lock(&uc->vc.lock); 1165 1166 /* Teardown completion message */ 1167 if (cppi5_desc_is_tdcm(paddr)) { 1168 complete_all(&uc->teardown_completed); 1169 1170 if (uc->terminated_desc) { 1171 udma_desc_free(&uc->terminated_desc->vd); 1172 uc->terminated_desc = NULL; 1173 } 1174 1175 if (!uc->desc) 1176 udma_start(uc); 1177 1178 goto out; 1179 } 1180 1181 d = udma_udma_desc_from_paddr(uc, paddr); 1182 1183 if (d) { 1184 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, 1185 d->desc_idx); 1186 if (desc_paddr != paddr) { 1187 dev_err(uc->ud->dev, "not matching descriptors!\n"); 1188 goto out; 1189 } 1190 1191 if (d == uc->desc) { 1192 /* active descriptor */ 1193 if (uc->cyclic) { 1194 udma_cyclic_packet_elapsed(uc); 1195 vchan_cyclic_callback(&d->vd); 1196 } else { 1197 if (udma_is_desc_really_done(uc, d)) { 1198 udma_decrement_byte_counters(uc, d->residue); 1199 udma_start(uc); 1200 vchan_cookie_complete(&d->vd); 1201 } else { 1202 schedule_delayed_work(&uc->tx_drain.work, 1203 0); 1204 } 1205 } 1206 } else { 1207 /* 1208 * terminated descriptor, mark the descriptor as 1209 * completed to update the channel's cookie marker 1210 */ 1211 dma_cookie_complete(&d->vd.tx); 1212 } 1213 } 1214 out: 1215 spin_unlock(&uc->vc.lock); 1216 1217 return IRQ_HANDLED; 1218 } 1219 1220 static irqreturn_t udma_udma_irq_handler(int irq, void *data) 1221 { 1222 struct udma_chan *uc = data; 1223 struct udma_desc *d; 1224 1225 spin_lock(&uc->vc.lock); 1226 d = uc->desc; 1227 if (d) { 1228 d->tr_idx = (d->tr_idx + 1) % d->sglen; 1229 1230 if (uc->cyclic) { 1231 vchan_cyclic_callback(&d->vd); 1232 } else { 1233 /* TODO: figure out the real amount of data */ 1234 udma_decrement_byte_counters(uc, d->residue); 1235 udma_start(uc); 1236 vchan_cookie_complete(&d->vd); 1237 } 1238 } 1239 1240 spin_unlock(&uc->vc.lock); 1241 1242 return IRQ_HANDLED; 1243 } 1244 1245 /** 1246 * __udma_alloc_gp_rflow_range - alloc range of GP RX flows 1247 * @ud: UDMA device 1248 * @from: Start the search from this flow id number 1249 * @cnt: Number of consecutive flow ids to allocate 1250 * 1251 * Allocate range of RX flow ids for future use, those flows can be requested 1252 * only using explicit flow id number. if @from is set to -1 it will try to find 1253 * first free range. if @from is positive value it will force allocation only 1254 * of the specified range of flows. 1255 * 1256 * Returns -ENOMEM if can't find free range. 1257 * -EEXIST if requested range is busy. 1258 * -EINVAL if wrong input values passed. 1259 * Returns flow id on success. 1260 */ 1261 static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt) 1262 { 1263 int start, tmp_from; 1264 DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS); 1265 1266 tmp_from = from; 1267 if (tmp_from < 0) 1268 tmp_from = ud->rchan_cnt; 1269 /* default flows can't be allocated and accessible only by id */ 1270 if (tmp_from < ud->rchan_cnt) 1271 return -EINVAL; 1272 1273 if (tmp_from + cnt > ud->rflow_cnt) 1274 return -EINVAL; 1275 1276 bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated, 1277 ud->rflow_cnt); 1278 1279 start = bitmap_find_next_zero_area(tmp, 1280 ud->rflow_cnt, 1281 tmp_from, cnt, 0); 1282 if (start >= ud->rflow_cnt) 1283 return -ENOMEM; 1284 1285 if (from >= 0 && start != from) 1286 return -EEXIST; 1287 1288 bitmap_set(ud->rflow_gp_map_allocated, start, cnt); 1289 return start; 1290 } 1291 1292 static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt) 1293 { 1294 if (from < ud->rchan_cnt) 1295 return -EINVAL; 1296 if (from + cnt > ud->rflow_cnt) 1297 return -EINVAL; 1298 1299 bitmap_clear(ud->rflow_gp_map_allocated, from, cnt); 1300 return 0; 1301 } 1302 1303 static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id) 1304 { 1305 /* 1306 * Attempt to request rflow by ID can be made for any rflow 1307 * if not in use with assumption that caller knows what's doing. 1308 * TI-SCI FW will perform additional permission check ant way, it's 1309 * safe 1310 */ 1311 1312 if (id < 0 || id >= ud->rflow_cnt) 1313 return ERR_PTR(-ENOENT); 1314 1315 if (test_bit(id, ud->rflow_in_use)) 1316 return ERR_PTR(-ENOENT); 1317 1318 if (ud->rflow_gp_map) { 1319 /* GP rflow has to be allocated first */ 1320 if (!test_bit(id, ud->rflow_gp_map) && 1321 !test_bit(id, ud->rflow_gp_map_allocated)) 1322 return ERR_PTR(-EINVAL); 1323 } 1324 1325 dev_dbg(ud->dev, "get rflow%d\n", id); 1326 set_bit(id, ud->rflow_in_use); 1327 return &ud->rflows[id]; 1328 } 1329 1330 static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow) 1331 { 1332 if (!test_bit(rflow->id, ud->rflow_in_use)) { 1333 dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id); 1334 return; 1335 } 1336 1337 dev_dbg(ud->dev, "put rflow%d\n", rflow->id); 1338 clear_bit(rflow->id, ud->rflow_in_use); 1339 } 1340 1341 #define UDMA_RESERVE_RESOURCE(res) \ 1342 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \ 1343 enum udma_tp_level tpl, \ 1344 int id) \ 1345 { \ 1346 if (id >= 0) { \ 1347 if (test_bit(id, ud->res##_map)) { \ 1348 dev_err(ud->dev, "res##%d is in use\n", id); \ 1349 return ERR_PTR(-ENOENT); \ 1350 } \ 1351 } else { \ 1352 int start; \ 1353 \ 1354 if (tpl >= ud->res##_tpl.levels) \ 1355 tpl = ud->res##_tpl.levels - 1; \ 1356 \ 1357 start = ud->res##_tpl.start_idx[tpl]; \ 1358 \ 1359 id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \ 1360 start); \ 1361 if (id == ud->res##_cnt) { \ 1362 return ERR_PTR(-ENOENT); \ 1363 } \ 1364 } \ 1365 \ 1366 set_bit(id, ud->res##_map); \ 1367 return &ud->res##s[id]; \ 1368 } 1369 1370 UDMA_RESERVE_RESOURCE(bchan); 1371 UDMA_RESERVE_RESOURCE(tchan); 1372 UDMA_RESERVE_RESOURCE(rchan); 1373 1374 static int bcdma_get_bchan(struct udma_chan *uc) 1375 { 1376 struct udma_dev *ud = uc->ud; 1377 enum udma_tp_level tpl; 1378 int ret; 1379 1380 if (uc->bchan) { 1381 dev_dbg(ud->dev, "chan%d: already have bchan%d allocated\n", 1382 uc->id, uc->bchan->id); 1383 return 0; 1384 } 1385 1386 /* 1387 * Use normal channels for peripherals, and highest TPL channel for 1388 * mem2mem 1389 */ 1390 if (uc->config.tr_trigger_type) 1391 tpl = 0; 1392 else 1393 tpl = ud->bchan_tpl.levels - 1; 1394 1395 uc->bchan = __udma_reserve_bchan(ud, tpl, -1); 1396 if (IS_ERR(uc->bchan)) { 1397 ret = PTR_ERR(uc->bchan); 1398 uc->bchan = NULL; 1399 return ret; 1400 } 1401 1402 uc->tchan = uc->bchan; 1403 1404 return 0; 1405 } 1406 1407 static int udma_get_tchan(struct udma_chan *uc) 1408 { 1409 struct udma_dev *ud = uc->ud; 1410 int ret; 1411 1412 if (uc->tchan) { 1413 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n", 1414 uc->id, uc->tchan->id); 1415 return 0; 1416 } 1417 1418 /* 1419 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels. 1420 * For PKTDMA mapped channels it is configured to a channel which must 1421 * be used to service the peripheral. 1422 */ 1423 uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, 1424 uc->config.mapped_channel_id); 1425 if (IS_ERR(uc->tchan)) { 1426 ret = PTR_ERR(uc->tchan); 1427 uc->tchan = NULL; 1428 return ret; 1429 } 1430 1431 if (ud->tflow_cnt) { 1432 int tflow_id; 1433 1434 /* Only PKTDMA have support for tx flows */ 1435 if (uc->config.default_flow_id >= 0) 1436 tflow_id = uc->config.default_flow_id; 1437 else 1438 tflow_id = uc->tchan->id; 1439 1440 if (test_bit(tflow_id, ud->tflow_map)) { 1441 dev_err(ud->dev, "tflow%d is in use\n", tflow_id); 1442 clear_bit(uc->tchan->id, ud->tchan_map); 1443 uc->tchan = NULL; 1444 return -ENOENT; 1445 } 1446 1447 uc->tchan->tflow_id = tflow_id; 1448 set_bit(tflow_id, ud->tflow_map); 1449 } else { 1450 uc->tchan->tflow_id = -1; 1451 } 1452 1453 return 0; 1454 } 1455 1456 static int udma_get_rchan(struct udma_chan *uc) 1457 { 1458 struct udma_dev *ud = uc->ud; 1459 int ret; 1460 1461 if (uc->rchan) { 1462 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n", 1463 uc->id, uc->rchan->id); 1464 return 0; 1465 } 1466 1467 /* 1468 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels. 1469 * For PKTDMA mapped channels it is configured to a channel which must 1470 * be used to service the peripheral. 1471 */ 1472 uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, 1473 uc->config.mapped_channel_id); 1474 if (IS_ERR(uc->rchan)) { 1475 ret = PTR_ERR(uc->rchan); 1476 uc->rchan = NULL; 1477 return ret; 1478 } 1479 1480 return 0; 1481 } 1482 1483 static int udma_get_chan_pair(struct udma_chan *uc) 1484 { 1485 struct udma_dev *ud = uc->ud; 1486 int chan_id, end; 1487 1488 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) { 1489 dev_info(ud->dev, "chan%d: already have %d pair allocated\n", 1490 uc->id, uc->tchan->id); 1491 return 0; 1492 } 1493 1494 if (uc->tchan) { 1495 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n", 1496 uc->id, uc->tchan->id); 1497 return -EBUSY; 1498 } else if (uc->rchan) { 1499 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n", 1500 uc->id, uc->rchan->id); 1501 return -EBUSY; 1502 } 1503 1504 /* Can be optimized, but let's have it like this for now */ 1505 end = min(ud->tchan_cnt, ud->rchan_cnt); 1506 /* 1507 * Try to use the highest TPL channel pair for MEM_TO_MEM channels 1508 * Note: in UDMAP the channel TPL is symmetric between tchan and rchan 1509 */ 1510 chan_id = ud->tchan_tpl.start_idx[ud->tchan_tpl.levels - 1]; 1511 for (; chan_id < end; chan_id++) { 1512 if (!test_bit(chan_id, ud->tchan_map) && 1513 !test_bit(chan_id, ud->rchan_map)) 1514 break; 1515 } 1516 1517 if (chan_id == end) 1518 return -ENOENT; 1519 1520 set_bit(chan_id, ud->tchan_map); 1521 set_bit(chan_id, ud->rchan_map); 1522 uc->tchan = &ud->tchans[chan_id]; 1523 uc->rchan = &ud->rchans[chan_id]; 1524 1525 /* UDMA does not use tx flows */ 1526 uc->tchan->tflow_id = -1; 1527 1528 return 0; 1529 } 1530 1531 static int udma_get_rflow(struct udma_chan *uc, int flow_id) 1532 { 1533 struct udma_dev *ud = uc->ud; 1534 int ret; 1535 1536 if (!uc->rchan) { 1537 dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id); 1538 return -EINVAL; 1539 } 1540 1541 if (uc->rflow) { 1542 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n", 1543 uc->id, uc->rflow->id); 1544 return 0; 1545 } 1546 1547 uc->rflow = __udma_get_rflow(ud, flow_id); 1548 if (IS_ERR(uc->rflow)) { 1549 ret = PTR_ERR(uc->rflow); 1550 uc->rflow = NULL; 1551 return ret; 1552 } 1553 1554 return 0; 1555 } 1556 1557 static void bcdma_put_bchan(struct udma_chan *uc) 1558 { 1559 struct udma_dev *ud = uc->ud; 1560 1561 if (uc->bchan) { 1562 dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id, 1563 uc->bchan->id); 1564 clear_bit(uc->bchan->id, ud->bchan_map); 1565 uc->bchan = NULL; 1566 uc->tchan = NULL; 1567 } 1568 } 1569 1570 static void udma_put_rchan(struct udma_chan *uc) 1571 { 1572 struct udma_dev *ud = uc->ud; 1573 1574 if (uc->rchan) { 1575 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id, 1576 uc->rchan->id); 1577 clear_bit(uc->rchan->id, ud->rchan_map); 1578 uc->rchan = NULL; 1579 } 1580 } 1581 1582 static void udma_put_tchan(struct udma_chan *uc) 1583 { 1584 struct udma_dev *ud = uc->ud; 1585 1586 if (uc->tchan) { 1587 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id, 1588 uc->tchan->id); 1589 clear_bit(uc->tchan->id, ud->tchan_map); 1590 1591 if (uc->tchan->tflow_id >= 0) 1592 clear_bit(uc->tchan->tflow_id, ud->tflow_map); 1593 1594 uc->tchan = NULL; 1595 } 1596 } 1597 1598 static void udma_put_rflow(struct udma_chan *uc) 1599 { 1600 struct udma_dev *ud = uc->ud; 1601 1602 if (uc->rflow) { 1603 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id, 1604 uc->rflow->id); 1605 __udma_put_rflow(ud, uc->rflow); 1606 uc->rflow = NULL; 1607 } 1608 } 1609 1610 static void bcdma_free_bchan_resources(struct udma_chan *uc) 1611 { 1612 if (!uc->bchan) 1613 return; 1614 1615 k3_ringacc_ring_free(uc->bchan->tc_ring); 1616 k3_ringacc_ring_free(uc->bchan->t_ring); 1617 uc->bchan->tc_ring = NULL; 1618 uc->bchan->t_ring = NULL; 1619 k3_configure_chan_coherency(&uc->vc.chan, 0); 1620 1621 bcdma_put_bchan(uc); 1622 } 1623 1624 static int bcdma_alloc_bchan_resources(struct udma_chan *uc) 1625 { 1626 struct k3_ring_cfg ring_cfg; 1627 struct udma_dev *ud = uc->ud; 1628 int ret; 1629 1630 ret = bcdma_get_bchan(uc); 1631 if (ret) 1632 return ret; 1633 1634 ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1, 1635 &uc->bchan->t_ring, 1636 &uc->bchan->tc_ring); 1637 if (ret) { 1638 ret = -EBUSY; 1639 goto err_ring; 1640 } 1641 1642 memset(&ring_cfg, 0, sizeof(ring_cfg)); 1643 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; 1644 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8; 1645 ring_cfg.mode = K3_RINGACC_RING_MODE_RING; 1646 1647 k3_configure_chan_coherency(&uc->vc.chan, ud->asel); 1648 ring_cfg.asel = ud->asel; 1649 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan); 1650 1651 ret = k3_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg); 1652 if (ret) 1653 goto err_ringcfg; 1654 1655 return 0; 1656 1657 err_ringcfg: 1658 k3_ringacc_ring_free(uc->bchan->tc_ring); 1659 uc->bchan->tc_ring = NULL; 1660 k3_ringacc_ring_free(uc->bchan->t_ring); 1661 uc->bchan->t_ring = NULL; 1662 k3_configure_chan_coherency(&uc->vc.chan, 0); 1663 err_ring: 1664 bcdma_put_bchan(uc); 1665 1666 return ret; 1667 } 1668 1669 static void udma_free_tx_resources(struct udma_chan *uc) 1670 { 1671 if (!uc->tchan) 1672 return; 1673 1674 k3_ringacc_ring_free(uc->tchan->t_ring); 1675 k3_ringacc_ring_free(uc->tchan->tc_ring); 1676 uc->tchan->t_ring = NULL; 1677 uc->tchan->tc_ring = NULL; 1678 1679 udma_put_tchan(uc); 1680 } 1681 1682 static int udma_alloc_tx_resources(struct udma_chan *uc) 1683 { 1684 struct k3_ring_cfg ring_cfg; 1685 struct udma_dev *ud = uc->ud; 1686 struct udma_tchan *tchan; 1687 int ring_idx, ret; 1688 1689 ret = udma_get_tchan(uc); 1690 if (ret) 1691 return ret; 1692 1693 tchan = uc->tchan; 1694 if (tchan->tflow_id >= 0) 1695 ring_idx = tchan->tflow_id; 1696 else 1697 ring_idx = ud->bchan_cnt + tchan->id; 1698 1699 ret = k3_ringacc_request_rings_pair(ud->ringacc, ring_idx, -1, 1700 &tchan->t_ring, 1701 &tchan->tc_ring); 1702 if (ret) { 1703 ret = -EBUSY; 1704 goto err_ring; 1705 } 1706 1707 memset(&ring_cfg, 0, sizeof(ring_cfg)); 1708 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; 1709 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8; 1710 if (ud->match_data->type == DMA_TYPE_UDMA) { 1711 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE; 1712 } else { 1713 ring_cfg.mode = K3_RINGACC_RING_MODE_RING; 1714 1715 k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel); 1716 ring_cfg.asel = uc->config.asel; 1717 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan); 1718 } 1719 1720 ret = k3_ringacc_ring_cfg(tchan->t_ring, &ring_cfg); 1721 ret |= k3_ringacc_ring_cfg(tchan->tc_ring, &ring_cfg); 1722 1723 if (ret) 1724 goto err_ringcfg; 1725 1726 return 0; 1727 1728 err_ringcfg: 1729 k3_ringacc_ring_free(uc->tchan->tc_ring); 1730 uc->tchan->tc_ring = NULL; 1731 k3_ringacc_ring_free(uc->tchan->t_ring); 1732 uc->tchan->t_ring = NULL; 1733 err_ring: 1734 udma_put_tchan(uc); 1735 1736 return ret; 1737 } 1738 1739 static void udma_free_rx_resources(struct udma_chan *uc) 1740 { 1741 if (!uc->rchan) 1742 return; 1743 1744 if (uc->rflow) { 1745 struct udma_rflow *rflow = uc->rflow; 1746 1747 k3_ringacc_ring_free(rflow->fd_ring); 1748 k3_ringacc_ring_free(rflow->r_ring); 1749 rflow->fd_ring = NULL; 1750 rflow->r_ring = NULL; 1751 1752 udma_put_rflow(uc); 1753 } 1754 1755 udma_put_rchan(uc); 1756 } 1757 1758 static int udma_alloc_rx_resources(struct udma_chan *uc) 1759 { 1760 struct udma_dev *ud = uc->ud; 1761 struct k3_ring_cfg ring_cfg; 1762 struct udma_rflow *rflow; 1763 int fd_ring_id; 1764 int ret; 1765 1766 ret = udma_get_rchan(uc); 1767 if (ret) 1768 return ret; 1769 1770 /* For MEM_TO_MEM we don't need rflow or rings */ 1771 if (uc->config.dir == DMA_MEM_TO_MEM) 1772 return 0; 1773 1774 if (uc->config.default_flow_id >= 0) 1775 ret = udma_get_rflow(uc, uc->config.default_flow_id); 1776 else 1777 ret = udma_get_rflow(uc, uc->rchan->id); 1778 1779 if (ret) { 1780 ret = -EBUSY; 1781 goto err_rflow; 1782 } 1783 1784 rflow = uc->rflow; 1785 if (ud->tflow_cnt) 1786 fd_ring_id = ud->tflow_cnt + rflow->id; 1787 else 1788 fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt + 1789 uc->rchan->id; 1790 1791 ret = k3_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1, 1792 &rflow->fd_ring, &rflow->r_ring); 1793 if (ret) { 1794 ret = -EBUSY; 1795 goto err_ring; 1796 } 1797 1798 memset(&ring_cfg, 0, sizeof(ring_cfg)); 1799 1800 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8; 1801 if (ud->match_data->type == DMA_TYPE_UDMA) { 1802 if (uc->config.pkt_mode) 1803 ring_cfg.size = SG_MAX_SEGMENTS; 1804 else 1805 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; 1806 1807 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE; 1808 } else { 1809 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; 1810 ring_cfg.mode = K3_RINGACC_RING_MODE_RING; 1811 1812 k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel); 1813 ring_cfg.asel = uc->config.asel; 1814 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan); 1815 } 1816 1817 ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg); 1818 1819 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; 1820 ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg); 1821 1822 if (ret) 1823 goto err_ringcfg; 1824 1825 return 0; 1826 1827 err_ringcfg: 1828 k3_ringacc_ring_free(rflow->r_ring); 1829 rflow->r_ring = NULL; 1830 k3_ringacc_ring_free(rflow->fd_ring); 1831 rflow->fd_ring = NULL; 1832 err_ring: 1833 udma_put_rflow(uc); 1834 err_rflow: 1835 udma_put_rchan(uc); 1836 1837 return ret; 1838 } 1839 1840 #define TISCI_BCDMA_BCHAN_VALID_PARAMS ( \ 1841 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ 1842 TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID) 1843 1844 #define TISCI_BCDMA_TCHAN_VALID_PARAMS ( \ 1845 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ 1846 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID) 1847 1848 #define TISCI_BCDMA_RCHAN_VALID_PARAMS ( \ 1849 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID) 1850 1851 #define TISCI_UDMA_TCHAN_VALID_PARAMS ( \ 1852 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ 1853 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \ 1854 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \ 1855 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \ 1856 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \ 1857 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \ 1858 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \ 1859 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID) 1860 1861 #define TISCI_UDMA_RCHAN_VALID_PARAMS ( \ 1862 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ 1863 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \ 1864 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \ 1865 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \ 1866 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \ 1867 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \ 1868 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \ 1869 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \ 1870 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID) 1871 1872 static int udma_tisci_m2m_channel_config(struct udma_chan *uc) 1873 { 1874 struct udma_dev *ud = uc->ud; 1875 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 1876 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 1877 struct udma_tchan *tchan = uc->tchan; 1878 struct udma_rchan *rchan = uc->rchan; 1879 u8 burst_size = 0; 1880 int ret; 1881 u8 tpl; 1882 1883 /* Non synchronized - mem to mem type of transfer */ 1884 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring); 1885 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; 1886 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; 1887 1888 if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) { 1889 tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, tchan->id); 1890 1891 burst_size = ud->match_data->burst_size[tpl]; 1892 } 1893 1894 req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS; 1895 req_tx.nav_id = tisci_rm->tisci_dev_id; 1896 req_tx.index = tchan->id; 1897 req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR; 1898 req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2; 1899 req_tx.txcq_qnum = tc_ring; 1900 req_tx.tx_atype = ud->atype; 1901 if (burst_size) { 1902 req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID; 1903 req_tx.tx_burst_size = burst_size; 1904 } 1905 1906 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); 1907 if (ret) { 1908 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); 1909 return ret; 1910 } 1911 1912 req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS; 1913 req_rx.nav_id = tisci_rm->tisci_dev_id; 1914 req_rx.index = rchan->id; 1915 req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2; 1916 req_rx.rxcq_qnum = tc_ring; 1917 req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR; 1918 req_rx.rx_atype = ud->atype; 1919 if (burst_size) { 1920 req_rx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID; 1921 req_rx.rx_burst_size = burst_size; 1922 } 1923 1924 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); 1925 if (ret) 1926 dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret); 1927 1928 return ret; 1929 } 1930 1931 static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc) 1932 { 1933 struct udma_dev *ud = uc->ud; 1934 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 1935 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 1936 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; 1937 struct udma_bchan *bchan = uc->bchan; 1938 u8 burst_size = 0; 1939 int ret; 1940 u8 tpl; 1941 1942 if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) { 1943 tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, bchan->id); 1944 1945 burst_size = ud->match_data->burst_size[tpl]; 1946 } 1947 1948 req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS; 1949 req_tx.nav_id = tisci_rm->tisci_dev_id; 1950 req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN; 1951 req_tx.index = bchan->id; 1952 if (burst_size) { 1953 req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID; 1954 req_tx.tx_burst_size = burst_size; 1955 } 1956 1957 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); 1958 if (ret) 1959 dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret); 1960 1961 return ret; 1962 } 1963 1964 static int udma_tisci_tx_channel_config(struct udma_chan *uc) 1965 { 1966 struct udma_dev *ud = uc->ud; 1967 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 1968 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 1969 struct udma_tchan *tchan = uc->tchan; 1970 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring); 1971 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; 1972 u32 mode, fetch_size; 1973 int ret; 1974 1975 if (uc->config.pkt_mode) { 1976 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; 1977 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib, 1978 uc->config.psd_size, 0); 1979 } else { 1980 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR; 1981 fetch_size = sizeof(struct cppi5_desc_hdr_t); 1982 } 1983 1984 req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS; 1985 req_tx.nav_id = tisci_rm->tisci_dev_id; 1986 req_tx.index = tchan->id; 1987 req_tx.tx_chan_type = mode; 1988 req_tx.tx_supr_tdpkt = uc->config.notdpkt; 1989 req_tx.tx_fetch_size = fetch_size >> 2; 1990 req_tx.txcq_qnum = tc_ring; 1991 req_tx.tx_atype = uc->config.atype; 1992 if (uc->config.ep_type == PSIL_EP_PDMA_XY && 1993 ud->match_data->flags & UDMA_FLAG_TDTYPE) { 1994 /* wait for peer to complete the teardown for PDMAs */ 1995 req_tx.valid_params |= 1996 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID; 1997 req_tx.tx_tdtype = 1; 1998 } 1999 2000 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); 2001 if (ret) 2002 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); 2003 2004 return ret; 2005 } 2006 2007 static int bcdma_tisci_tx_channel_config(struct udma_chan *uc) 2008 { 2009 struct udma_dev *ud = uc->ud; 2010 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 2011 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 2012 struct udma_tchan *tchan = uc->tchan; 2013 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; 2014 int ret; 2015 2016 req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS; 2017 req_tx.nav_id = tisci_rm->tisci_dev_id; 2018 req_tx.index = tchan->id; 2019 req_tx.tx_supr_tdpkt = uc->config.notdpkt; 2020 if (ud->match_data->flags & UDMA_FLAG_TDTYPE) { 2021 /* wait for peer to complete the teardown for PDMAs */ 2022 req_tx.valid_params |= 2023 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID; 2024 req_tx.tx_tdtype = 1; 2025 } 2026 2027 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); 2028 if (ret) 2029 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); 2030 2031 return ret; 2032 } 2033 2034 #define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config 2035 2036 static int udma_tisci_rx_channel_config(struct udma_chan *uc) 2037 { 2038 struct udma_dev *ud = uc->ud; 2039 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 2040 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 2041 struct udma_rchan *rchan = uc->rchan; 2042 int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring); 2043 int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring); 2044 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; 2045 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 }; 2046 u32 mode, fetch_size; 2047 int ret; 2048 2049 if (uc->config.pkt_mode) { 2050 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; 2051 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib, 2052 uc->config.psd_size, 0); 2053 } else { 2054 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR; 2055 fetch_size = sizeof(struct cppi5_desc_hdr_t); 2056 } 2057 2058 req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS; 2059 req_rx.nav_id = tisci_rm->tisci_dev_id; 2060 req_rx.index = rchan->id; 2061 req_rx.rx_fetch_size = fetch_size >> 2; 2062 req_rx.rxcq_qnum = rx_ring; 2063 req_rx.rx_chan_type = mode; 2064 req_rx.rx_atype = uc->config.atype; 2065 2066 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); 2067 if (ret) { 2068 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret); 2069 return ret; 2070 } 2071 2072 flow_req.valid_params = 2073 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID | 2074 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID | 2075 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID | 2076 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID | 2077 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID | 2078 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID | 2079 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID | 2080 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID | 2081 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID | 2082 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID | 2083 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID | 2084 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID | 2085 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID; 2086 2087 flow_req.nav_id = tisci_rm->tisci_dev_id; 2088 flow_req.flow_index = rchan->id; 2089 2090 if (uc->config.needs_epib) 2091 flow_req.rx_einfo_present = 1; 2092 else 2093 flow_req.rx_einfo_present = 0; 2094 if (uc->config.psd_size) 2095 flow_req.rx_psinfo_present = 1; 2096 else 2097 flow_req.rx_psinfo_present = 0; 2098 flow_req.rx_error_handling = 1; 2099 flow_req.rx_dest_qnum = rx_ring; 2100 flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE; 2101 flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG; 2102 flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI; 2103 flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO; 2104 flow_req.rx_fdq0_sz0_qnum = fd_ring; 2105 flow_req.rx_fdq1_qnum = fd_ring; 2106 flow_req.rx_fdq2_qnum = fd_ring; 2107 flow_req.rx_fdq3_qnum = fd_ring; 2108 2109 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req); 2110 2111 if (ret) 2112 dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret); 2113 2114 return 0; 2115 } 2116 2117 static int bcdma_tisci_rx_channel_config(struct udma_chan *uc) 2118 { 2119 struct udma_dev *ud = uc->ud; 2120 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 2121 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 2122 struct udma_rchan *rchan = uc->rchan; 2123 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; 2124 int ret; 2125 2126 req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS; 2127 req_rx.nav_id = tisci_rm->tisci_dev_id; 2128 req_rx.index = rchan->id; 2129 2130 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); 2131 if (ret) 2132 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret); 2133 2134 return ret; 2135 } 2136 2137 static int pktdma_tisci_rx_channel_config(struct udma_chan *uc) 2138 { 2139 struct udma_dev *ud = uc->ud; 2140 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 2141 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 2142 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; 2143 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 }; 2144 int ret; 2145 2146 req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS; 2147 req_rx.nav_id = tisci_rm->tisci_dev_id; 2148 req_rx.index = uc->rchan->id; 2149 2150 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); 2151 if (ret) { 2152 dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret); 2153 return ret; 2154 } 2155 2156 flow_req.valid_params = 2157 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID | 2158 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID | 2159 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID; 2160 2161 flow_req.nav_id = tisci_rm->tisci_dev_id; 2162 flow_req.flow_index = uc->rflow->id; 2163 2164 if (uc->config.needs_epib) 2165 flow_req.rx_einfo_present = 1; 2166 else 2167 flow_req.rx_einfo_present = 0; 2168 if (uc->config.psd_size) 2169 flow_req.rx_psinfo_present = 1; 2170 else 2171 flow_req.rx_psinfo_present = 0; 2172 flow_req.rx_error_handling = 1; 2173 2174 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req); 2175 2176 if (ret) 2177 dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id, 2178 ret); 2179 2180 return ret; 2181 } 2182 2183 static int udma_alloc_chan_resources(struct dma_chan *chan) 2184 { 2185 struct udma_chan *uc = to_udma_chan(chan); 2186 struct udma_dev *ud = to_udma_dev(chan->device); 2187 const struct udma_soc_data *soc_data = ud->soc_data; 2188 struct k3_ring *irq_ring; 2189 u32 irq_udma_idx; 2190 int ret; 2191 2192 uc->dma_dev = ud->dev; 2193 2194 if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) { 2195 uc->use_dma_pool = true; 2196 /* in case of MEM_TO_MEM we have maximum of two TRs */ 2197 if (uc->config.dir == DMA_MEM_TO_MEM) { 2198 uc->config.hdesc_size = cppi5_trdesc_calc_size( 2199 sizeof(struct cppi5_tr_type15_t), 2); 2200 uc->config.pkt_mode = false; 2201 } 2202 } 2203 2204 if (uc->use_dma_pool) { 2205 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev, 2206 uc->config.hdesc_size, 2207 ud->desc_align, 2208 0); 2209 if (!uc->hdesc_pool) { 2210 dev_err(ud->ddev.dev, 2211 "Descriptor pool allocation failed\n"); 2212 uc->use_dma_pool = false; 2213 ret = -ENOMEM; 2214 goto err_cleanup; 2215 } 2216 } 2217 2218 /* 2219 * Make sure that the completion is in a known state: 2220 * No teardown, the channel is idle 2221 */ 2222 reinit_completion(&uc->teardown_completed); 2223 complete_all(&uc->teardown_completed); 2224 uc->state = UDMA_CHAN_IS_IDLE; 2225 2226 switch (uc->config.dir) { 2227 case DMA_MEM_TO_MEM: 2228 /* Non synchronized - mem to mem type of transfer */ 2229 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__, 2230 uc->id); 2231 2232 ret = udma_get_chan_pair(uc); 2233 if (ret) 2234 goto err_cleanup; 2235 2236 ret = udma_alloc_tx_resources(uc); 2237 if (ret) { 2238 udma_put_rchan(uc); 2239 goto err_cleanup; 2240 } 2241 2242 ret = udma_alloc_rx_resources(uc); 2243 if (ret) { 2244 udma_free_tx_resources(uc); 2245 goto err_cleanup; 2246 } 2247 2248 uc->config.src_thread = ud->psil_base + uc->tchan->id; 2249 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 2250 K3_PSIL_DST_THREAD_ID_OFFSET; 2251 2252 irq_ring = uc->tchan->tc_ring; 2253 irq_udma_idx = uc->tchan->id; 2254 2255 ret = udma_tisci_m2m_channel_config(uc); 2256 break; 2257 case DMA_MEM_TO_DEV: 2258 /* Slave transfer synchronized - mem to dev (TX) trasnfer */ 2259 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__, 2260 uc->id); 2261 2262 ret = udma_alloc_tx_resources(uc); 2263 if (ret) 2264 goto err_cleanup; 2265 2266 uc->config.src_thread = ud->psil_base + uc->tchan->id; 2267 uc->config.dst_thread = uc->config.remote_thread_id; 2268 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; 2269 2270 irq_ring = uc->tchan->tc_ring; 2271 irq_udma_idx = uc->tchan->id; 2272 2273 ret = udma_tisci_tx_channel_config(uc); 2274 break; 2275 case DMA_DEV_TO_MEM: 2276 /* Slave transfer synchronized - dev to mem (RX) trasnfer */ 2277 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__, 2278 uc->id); 2279 2280 ret = udma_alloc_rx_resources(uc); 2281 if (ret) 2282 goto err_cleanup; 2283 2284 uc->config.src_thread = uc->config.remote_thread_id; 2285 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 2286 K3_PSIL_DST_THREAD_ID_OFFSET; 2287 2288 irq_ring = uc->rflow->r_ring; 2289 irq_udma_idx = soc_data->oes.udma_rchan + uc->rchan->id; 2290 2291 ret = udma_tisci_rx_channel_config(uc); 2292 break; 2293 default: 2294 /* Can not happen */ 2295 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", 2296 __func__, uc->id, uc->config.dir); 2297 ret = -EINVAL; 2298 goto err_cleanup; 2299 2300 } 2301 2302 /* check if the channel configuration was successful */ 2303 if (ret) 2304 goto err_res_free; 2305 2306 if (udma_is_chan_running(uc)) { 2307 dev_warn(ud->dev, "chan%d: is running!\n", uc->id); 2308 udma_reset_chan(uc, false); 2309 if (udma_is_chan_running(uc)) { 2310 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); 2311 ret = -EBUSY; 2312 goto err_res_free; 2313 } 2314 } 2315 2316 /* PSI-L pairing */ 2317 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread); 2318 if (ret) { 2319 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n", 2320 uc->config.src_thread, uc->config.dst_thread); 2321 goto err_res_free; 2322 } 2323 2324 uc->psil_paired = true; 2325 2326 uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring); 2327 if (uc->irq_num_ring <= 0) { 2328 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n", 2329 k3_ringacc_get_ring_id(irq_ring)); 2330 ret = -EINVAL; 2331 goto err_psi_free; 2332 } 2333 2334 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler, 2335 IRQF_TRIGGER_HIGH, uc->name, uc); 2336 if (ret) { 2337 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id); 2338 goto err_irq_free; 2339 } 2340 2341 /* Event from UDMA (TR events) only needed for slave TR mode channels */ 2342 if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) { 2343 uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx); 2344 if (uc->irq_num_udma <= 0) { 2345 dev_err(ud->dev, "Failed to get udma irq (index: %u)\n", 2346 irq_udma_idx); 2347 free_irq(uc->irq_num_ring, uc); 2348 ret = -EINVAL; 2349 goto err_irq_free; 2350 } 2351 2352 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0, 2353 uc->name, uc); 2354 if (ret) { 2355 dev_err(ud->dev, "chan%d: UDMA irq request failed\n", 2356 uc->id); 2357 free_irq(uc->irq_num_ring, uc); 2358 goto err_irq_free; 2359 } 2360 } else { 2361 uc->irq_num_udma = 0; 2362 } 2363 2364 udma_reset_rings(uc); 2365 2366 return 0; 2367 2368 err_irq_free: 2369 uc->irq_num_ring = 0; 2370 uc->irq_num_udma = 0; 2371 err_psi_free: 2372 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread); 2373 uc->psil_paired = false; 2374 err_res_free: 2375 udma_free_tx_resources(uc); 2376 udma_free_rx_resources(uc); 2377 err_cleanup: 2378 udma_reset_uchan(uc); 2379 2380 if (uc->use_dma_pool) { 2381 dma_pool_destroy(uc->hdesc_pool); 2382 uc->use_dma_pool = false; 2383 } 2384 2385 return ret; 2386 } 2387 2388 static int bcdma_alloc_chan_resources(struct dma_chan *chan) 2389 { 2390 struct udma_chan *uc = to_udma_chan(chan); 2391 struct udma_dev *ud = to_udma_dev(chan->device); 2392 const struct udma_oes_offsets *oes = &ud->soc_data->oes; 2393 u32 irq_udma_idx, irq_ring_idx; 2394 int ret; 2395 2396 /* Only TR mode is supported */ 2397 uc->config.pkt_mode = false; 2398 2399 /* 2400 * Make sure that the completion is in a known state: 2401 * No teardown, the channel is idle 2402 */ 2403 reinit_completion(&uc->teardown_completed); 2404 complete_all(&uc->teardown_completed); 2405 uc->state = UDMA_CHAN_IS_IDLE; 2406 2407 switch (uc->config.dir) { 2408 case DMA_MEM_TO_MEM: 2409 /* Non synchronized - mem to mem type of transfer */ 2410 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__, 2411 uc->id); 2412 2413 ret = bcdma_alloc_bchan_resources(uc); 2414 if (ret) 2415 return ret; 2416 2417 irq_ring_idx = uc->bchan->id + oes->bcdma_bchan_ring; 2418 irq_udma_idx = uc->bchan->id + oes->bcdma_bchan_data; 2419 2420 ret = bcdma_tisci_m2m_channel_config(uc); 2421 break; 2422 case DMA_MEM_TO_DEV: 2423 /* Slave transfer synchronized - mem to dev (TX) trasnfer */ 2424 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__, 2425 uc->id); 2426 2427 ret = udma_alloc_tx_resources(uc); 2428 if (ret) { 2429 uc->config.remote_thread_id = -1; 2430 return ret; 2431 } 2432 2433 uc->config.src_thread = ud->psil_base + uc->tchan->id; 2434 uc->config.dst_thread = uc->config.remote_thread_id; 2435 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; 2436 2437 irq_ring_idx = uc->tchan->id + oes->bcdma_tchan_ring; 2438 irq_udma_idx = uc->tchan->id + oes->bcdma_tchan_data; 2439 2440 ret = bcdma_tisci_tx_channel_config(uc); 2441 break; 2442 case DMA_DEV_TO_MEM: 2443 /* Slave transfer synchronized - dev to mem (RX) trasnfer */ 2444 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__, 2445 uc->id); 2446 2447 ret = udma_alloc_rx_resources(uc); 2448 if (ret) { 2449 uc->config.remote_thread_id = -1; 2450 return ret; 2451 } 2452 2453 uc->config.src_thread = uc->config.remote_thread_id; 2454 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 2455 K3_PSIL_DST_THREAD_ID_OFFSET; 2456 2457 irq_ring_idx = uc->rchan->id + oes->bcdma_rchan_ring; 2458 irq_udma_idx = uc->rchan->id + oes->bcdma_rchan_data; 2459 2460 ret = bcdma_tisci_rx_channel_config(uc); 2461 break; 2462 default: 2463 /* Can not happen */ 2464 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", 2465 __func__, uc->id, uc->config.dir); 2466 return -EINVAL; 2467 } 2468 2469 /* check if the channel configuration was successful */ 2470 if (ret) 2471 goto err_res_free; 2472 2473 if (udma_is_chan_running(uc)) { 2474 dev_warn(ud->dev, "chan%d: is running!\n", uc->id); 2475 udma_reset_chan(uc, false); 2476 if (udma_is_chan_running(uc)) { 2477 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); 2478 ret = -EBUSY; 2479 goto err_res_free; 2480 } 2481 } 2482 2483 uc->dma_dev = dmaengine_get_dma_device(chan); 2484 if (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type) { 2485 uc->config.hdesc_size = cppi5_trdesc_calc_size( 2486 sizeof(struct cppi5_tr_type15_t), 2); 2487 2488 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev, 2489 uc->config.hdesc_size, 2490 ud->desc_align, 2491 0); 2492 if (!uc->hdesc_pool) { 2493 dev_err(ud->ddev.dev, 2494 "Descriptor pool allocation failed\n"); 2495 uc->use_dma_pool = false; 2496 ret = -ENOMEM; 2497 goto err_res_free; 2498 } 2499 2500 uc->use_dma_pool = true; 2501 } else if (uc->config.dir != DMA_MEM_TO_MEM) { 2502 /* PSI-L pairing */ 2503 ret = navss_psil_pair(ud, uc->config.src_thread, 2504 uc->config.dst_thread); 2505 if (ret) { 2506 dev_err(ud->dev, 2507 "PSI-L pairing failed: 0x%04x -> 0x%04x\n", 2508 uc->config.src_thread, uc->config.dst_thread); 2509 goto err_res_free; 2510 } 2511 2512 uc->psil_paired = true; 2513 } 2514 2515 uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx); 2516 if (uc->irq_num_ring <= 0) { 2517 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n", 2518 irq_ring_idx); 2519 ret = -EINVAL; 2520 goto err_psi_free; 2521 } 2522 2523 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler, 2524 IRQF_TRIGGER_HIGH, uc->name, uc); 2525 if (ret) { 2526 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id); 2527 goto err_irq_free; 2528 } 2529 2530 /* Event from BCDMA (TR events) only needed for slave channels */ 2531 if (is_slave_direction(uc->config.dir)) { 2532 uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx); 2533 if (uc->irq_num_udma <= 0) { 2534 dev_err(ud->dev, "Failed to get bcdma irq (index: %u)\n", 2535 irq_udma_idx); 2536 free_irq(uc->irq_num_ring, uc); 2537 ret = -EINVAL; 2538 goto err_irq_free; 2539 } 2540 2541 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0, 2542 uc->name, uc); 2543 if (ret) { 2544 dev_err(ud->dev, "chan%d: BCDMA irq request failed\n", 2545 uc->id); 2546 free_irq(uc->irq_num_ring, uc); 2547 goto err_irq_free; 2548 } 2549 } else { 2550 uc->irq_num_udma = 0; 2551 } 2552 2553 udma_reset_rings(uc); 2554 2555 INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work, 2556 udma_check_tx_completion); 2557 return 0; 2558 2559 err_irq_free: 2560 uc->irq_num_ring = 0; 2561 uc->irq_num_udma = 0; 2562 err_psi_free: 2563 if (uc->psil_paired) 2564 navss_psil_unpair(ud, uc->config.src_thread, 2565 uc->config.dst_thread); 2566 uc->psil_paired = false; 2567 err_res_free: 2568 bcdma_free_bchan_resources(uc); 2569 udma_free_tx_resources(uc); 2570 udma_free_rx_resources(uc); 2571 2572 udma_reset_uchan(uc); 2573 2574 if (uc->use_dma_pool) { 2575 dma_pool_destroy(uc->hdesc_pool); 2576 uc->use_dma_pool = false; 2577 } 2578 2579 return ret; 2580 } 2581 2582 static int bcdma_router_config(struct dma_chan *chan) 2583 { 2584 struct k3_event_route_data *router_data = chan->route_data; 2585 struct udma_chan *uc = to_udma_chan(chan); 2586 u32 trigger_event; 2587 2588 if (!uc->bchan) 2589 return -EINVAL; 2590 2591 if (uc->config.tr_trigger_type != 1 && uc->config.tr_trigger_type != 2) 2592 return -EINVAL; 2593 2594 trigger_event = uc->ud->soc_data->bcdma_trigger_event_offset; 2595 trigger_event += (uc->bchan->id * 2) + uc->config.tr_trigger_type - 1; 2596 2597 return router_data->set_event(router_data->priv, trigger_event); 2598 } 2599 2600 static int pktdma_alloc_chan_resources(struct dma_chan *chan) 2601 { 2602 struct udma_chan *uc = to_udma_chan(chan); 2603 struct udma_dev *ud = to_udma_dev(chan->device); 2604 const struct udma_oes_offsets *oes = &ud->soc_data->oes; 2605 u32 irq_ring_idx; 2606 int ret; 2607 2608 /* 2609 * Make sure that the completion is in a known state: 2610 * No teardown, the channel is idle 2611 */ 2612 reinit_completion(&uc->teardown_completed); 2613 complete_all(&uc->teardown_completed); 2614 uc->state = UDMA_CHAN_IS_IDLE; 2615 2616 switch (uc->config.dir) { 2617 case DMA_MEM_TO_DEV: 2618 /* Slave transfer synchronized - mem to dev (TX) trasnfer */ 2619 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__, 2620 uc->id); 2621 2622 ret = udma_alloc_tx_resources(uc); 2623 if (ret) { 2624 uc->config.remote_thread_id = -1; 2625 return ret; 2626 } 2627 2628 uc->config.src_thread = ud->psil_base + uc->tchan->id; 2629 uc->config.dst_thread = uc->config.remote_thread_id; 2630 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; 2631 2632 irq_ring_idx = uc->tchan->tflow_id + oes->pktdma_tchan_flow; 2633 2634 ret = pktdma_tisci_tx_channel_config(uc); 2635 break; 2636 case DMA_DEV_TO_MEM: 2637 /* Slave transfer synchronized - dev to mem (RX) trasnfer */ 2638 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__, 2639 uc->id); 2640 2641 ret = udma_alloc_rx_resources(uc); 2642 if (ret) { 2643 uc->config.remote_thread_id = -1; 2644 return ret; 2645 } 2646 2647 uc->config.src_thread = uc->config.remote_thread_id; 2648 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 2649 K3_PSIL_DST_THREAD_ID_OFFSET; 2650 2651 irq_ring_idx = uc->rflow->id + oes->pktdma_rchan_flow; 2652 2653 ret = pktdma_tisci_rx_channel_config(uc); 2654 break; 2655 default: 2656 /* Can not happen */ 2657 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", 2658 __func__, uc->id, uc->config.dir); 2659 return -EINVAL; 2660 } 2661 2662 /* check if the channel configuration was successful */ 2663 if (ret) 2664 goto err_res_free; 2665 2666 if (udma_is_chan_running(uc)) { 2667 dev_warn(ud->dev, "chan%d: is running!\n", uc->id); 2668 udma_reset_chan(uc, false); 2669 if (udma_is_chan_running(uc)) { 2670 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); 2671 ret = -EBUSY; 2672 goto err_res_free; 2673 } 2674 } 2675 2676 uc->dma_dev = dmaengine_get_dma_device(chan); 2677 uc->hdesc_pool = dma_pool_create(uc->name, uc->dma_dev, 2678 uc->config.hdesc_size, ud->desc_align, 2679 0); 2680 if (!uc->hdesc_pool) { 2681 dev_err(ud->ddev.dev, 2682 "Descriptor pool allocation failed\n"); 2683 uc->use_dma_pool = false; 2684 ret = -ENOMEM; 2685 goto err_res_free; 2686 } 2687 2688 uc->use_dma_pool = true; 2689 2690 /* PSI-L pairing */ 2691 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread); 2692 if (ret) { 2693 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n", 2694 uc->config.src_thread, uc->config.dst_thread); 2695 goto err_res_free; 2696 } 2697 2698 uc->psil_paired = true; 2699 2700 uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx); 2701 if (uc->irq_num_ring <= 0) { 2702 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n", 2703 irq_ring_idx); 2704 ret = -EINVAL; 2705 goto err_psi_free; 2706 } 2707 2708 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler, 2709 IRQF_TRIGGER_HIGH, uc->name, uc); 2710 if (ret) { 2711 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id); 2712 goto err_irq_free; 2713 } 2714 2715 uc->irq_num_udma = 0; 2716 2717 udma_reset_rings(uc); 2718 2719 INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work, 2720 udma_check_tx_completion); 2721 2722 if (uc->tchan) 2723 dev_dbg(ud->dev, 2724 "chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n", 2725 uc->id, uc->tchan->id, uc->tchan->tflow_id, 2726 uc->config.remote_thread_id); 2727 else if (uc->rchan) 2728 dev_dbg(ud->dev, 2729 "chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n", 2730 uc->id, uc->rchan->id, uc->rflow->id, 2731 uc->config.remote_thread_id); 2732 return 0; 2733 2734 err_irq_free: 2735 uc->irq_num_ring = 0; 2736 err_psi_free: 2737 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread); 2738 uc->psil_paired = false; 2739 err_res_free: 2740 udma_free_tx_resources(uc); 2741 udma_free_rx_resources(uc); 2742 2743 udma_reset_uchan(uc); 2744 2745 dma_pool_destroy(uc->hdesc_pool); 2746 uc->use_dma_pool = false; 2747 2748 return ret; 2749 } 2750 2751 static int udma_slave_config(struct dma_chan *chan, 2752 struct dma_slave_config *cfg) 2753 { 2754 struct udma_chan *uc = to_udma_chan(chan); 2755 2756 memcpy(&uc->cfg, cfg, sizeof(uc->cfg)); 2757 2758 return 0; 2759 } 2760 2761 static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc, 2762 size_t tr_size, int tr_count, 2763 enum dma_transfer_direction dir) 2764 { 2765 struct udma_hwdesc *hwdesc; 2766 struct cppi5_desc_hdr_t *tr_desc; 2767 struct udma_desc *d; 2768 u32 reload_count = 0; 2769 u32 ring_id; 2770 2771 switch (tr_size) { 2772 case 16: 2773 case 32: 2774 case 64: 2775 case 128: 2776 break; 2777 default: 2778 dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size); 2779 return NULL; 2780 } 2781 2782 /* We have only one descriptor containing multiple TRs */ 2783 d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT); 2784 if (!d) 2785 return NULL; 2786 2787 d->sglen = tr_count; 2788 2789 d->hwdesc_count = 1; 2790 hwdesc = &d->hwdesc[0]; 2791 2792 /* Allocate memory for DMA ring descriptor */ 2793 if (uc->use_dma_pool) { 2794 hwdesc->cppi5_desc_size = uc->config.hdesc_size; 2795 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, 2796 GFP_NOWAIT, 2797 &hwdesc->cppi5_desc_paddr); 2798 } else { 2799 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 2800 tr_count); 2801 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size, 2802 uc->ud->desc_align); 2803 hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev, 2804 hwdesc->cppi5_desc_size, 2805 &hwdesc->cppi5_desc_paddr, 2806 GFP_NOWAIT); 2807 } 2808 2809 if (!hwdesc->cppi5_desc_vaddr) { 2810 kfree(d); 2811 return NULL; 2812 } 2813 2814 /* Start of the TR req records */ 2815 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size; 2816 /* Start address of the TR response array */ 2817 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count; 2818 2819 tr_desc = hwdesc->cppi5_desc_vaddr; 2820 2821 if (uc->cyclic) 2822 reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE; 2823 2824 if (dir == DMA_DEV_TO_MEM) 2825 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); 2826 else 2827 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); 2828 2829 cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count); 2830 cppi5_desc_set_pktids(tr_desc, uc->id, 2831 CPPI5_INFO1_DESC_FLOWID_DEFAULT); 2832 cppi5_desc_set_retpolicy(tr_desc, 0, ring_id); 2833 2834 return d; 2835 } 2836 2837 /** 2838 * udma_get_tr_counters - calculate TR counters for a given length 2839 * @len: Length of the trasnfer 2840 * @align_to: Preferred alignment 2841 * @tr0_cnt0: First TR icnt0 2842 * @tr0_cnt1: First TR icnt1 2843 * @tr1_cnt0: Second (if used) TR icnt0 2844 * 2845 * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated 2846 * For len >= SZ_64K two TRs are used in a simple way: 2847 * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1) 2848 * Second TR: the remaining length (tr1_cnt0) 2849 * 2850 * Returns the number of TRs the length needs (1 or 2) 2851 * -EINVAL if the length can not be supported 2852 */ 2853 static int udma_get_tr_counters(size_t len, unsigned long align_to, 2854 u16 *tr0_cnt0, u16 *tr0_cnt1, u16 *tr1_cnt0) 2855 { 2856 if (len < SZ_64K) { 2857 *tr0_cnt0 = len; 2858 *tr0_cnt1 = 1; 2859 2860 return 1; 2861 } 2862 2863 if (align_to > 3) 2864 align_to = 3; 2865 2866 realign: 2867 *tr0_cnt0 = SZ_64K - BIT(align_to); 2868 if (len / *tr0_cnt0 >= SZ_64K) { 2869 if (align_to) { 2870 align_to--; 2871 goto realign; 2872 } 2873 return -EINVAL; 2874 } 2875 2876 *tr0_cnt1 = len / *tr0_cnt0; 2877 *tr1_cnt0 = len % *tr0_cnt0; 2878 2879 return 2; 2880 } 2881 2882 static struct udma_desc * 2883 udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl, 2884 unsigned int sglen, enum dma_transfer_direction dir, 2885 unsigned long tx_flags, void *context) 2886 { 2887 struct scatterlist *sgent; 2888 struct udma_desc *d; 2889 struct cppi5_tr_type1_t *tr_req = NULL; 2890 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; 2891 unsigned int i; 2892 size_t tr_size; 2893 int num_tr = 0; 2894 int tr_idx = 0; 2895 u64 asel; 2896 2897 /* estimate the number of TRs we will need */ 2898 for_each_sg(sgl, sgent, sglen, i) { 2899 if (sg_dma_len(sgent) < SZ_64K) 2900 num_tr++; 2901 else 2902 num_tr += 2; 2903 } 2904 2905 /* Now allocate and setup the descriptor. */ 2906 tr_size = sizeof(struct cppi5_tr_type1_t); 2907 d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir); 2908 if (!d) 2909 return NULL; 2910 2911 d->sglen = sglen; 2912 2913 if (uc->ud->match_data->type == DMA_TYPE_UDMA) 2914 asel = 0; 2915 else 2916 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; 2917 2918 tr_req = d->hwdesc[0].tr_req_base; 2919 for_each_sg(sgl, sgent, sglen, i) { 2920 dma_addr_t sg_addr = sg_dma_address(sgent); 2921 2922 num_tr = udma_get_tr_counters(sg_dma_len(sgent), __ffs(sg_addr), 2923 &tr0_cnt0, &tr0_cnt1, &tr1_cnt0); 2924 if (num_tr < 0) { 2925 dev_err(uc->ud->dev, "size %u is not supported\n", 2926 sg_dma_len(sgent)); 2927 udma_free_hwdesc(uc, d); 2928 kfree(d); 2929 return NULL; 2930 } 2931 2932 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false, 2933 false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 2934 cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT); 2935 2936 sg_addr |= asel; 2937 tr_req[tr_idx].addr = sg_addr; 2938 tr_req[tr_idx].icnt0 = tr0_cnt0; 2939 tr_req[tr_idx].icnt1 = tr0_cnt1; 2940 tr_req[tr_idx].dim1 = tr0_cnt0; 2941 tr_idx++; 2942 2943 if (num_tr == 2) { 2944 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, 2945 false, false, 2946 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 2947 cppi5_tr_csf_set(&tr_req[tr_idx].flags, 2948 CPPI5_TR_CSF_SUPR_EVT); 2949 2950 tr_req[tr_idx].addr = sg_addr + tr0_cnt1 * tr0_cnt0; 2951 tr_req[tr_idx].icnt0 = tr1_cnt0; 2952 tr_req[tr_idx].icnt1 = 1; 2953 tr_req[tr_idx].dim1 = tr1_cnt0; 2954 tr_idx++; 2955 } 2956 2957 d->residue += sg_dma_len(sgent); 2958 } 2959 2960 cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, 2961 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP); 2962 2963 return d; 2964 } 2965 2966 static struct udma_desc * 2967 udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl, 2968 unsigned int sglen, 2969 enum dma_transfer_direction dir, 2970 unsigned long tx_flags, void *context) 2971 { 2972 struct scatterlist *sgent; 2973 struct cppi5_tr_type15_t *tr_req = NULL; 2974 enum dma_slave_buswidth dev_width; 2975 u32 csf = CPPI5_TR_CSF_SUPR_EVT; 2976 u16 tr_cnt0, tr_cnt1; 2977 dma_addr_t dev_addr; 2978 struct udma_desc *d; 2979 unsigned int i; 2980 size_t tr_size, sg_len; 2981 int num_tr = 0; 2982 int tr_idx = 0; 2983 u32 burst, trigger_size, port_window; 2984 u64 asel; 2985 2986 if (dir == DMA_DEV_TO_MEM) { 2987 dev_addr = uc->cfg.src_addr; 2988 dev_width = uc->cfg.src_addr_width; 2989 burst = uc->cfg.src_maxburst; 2990 port_window = uc->cfg.src_port_window_size; 2991 } else if (dir == DMA_MEM_TO_DEV) { 2992 dev_addr = uc->cfg.dst_addr; 2993 dev_width = uc->cfg.dst_addr_width; 2994 burst = uc->cfg.dst_maxburst; 2995 port_window = uc->cfg.dst_port_window_size; 2996 } else { 2997 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); 2998 return NULL; 2999 } 3000 3001 if (!burst) 3002 burst = 1; 3003 3004 if (port_window) { 3005 if (port_window != burst) { 3006 dev_err(uc->ud->dev, 3007 "The burst must be equal to port_window\n"); 3008 return NULL; 3009 } 3010 3011 tr_cnt0 = dev_width * port_window; 3012 tr_cnt1 = 1; 3013 } else { 3014 tr_cnt0 = dev_width; 3015 tr_cnt1 = burst; 3016 } 3017 trigger_size = tr_cnt0 * tr_cnt1; 3018 3019 /* estimate the number of TRs we will need */ 3020 for_each_sg(sgl, sgent, sglen, i) { 3021 sg_len = sg_dma_len(sgent); 3022 3023 if (sg_len % trigger_size) { 3024 dev_err(uc->ud->dev, 3025 "Not aligned SG entry (%zu for %u)\n", sg_len, 3026 trigger_size); 3027 return NULL; 3028 } 3029 3030 if (sg_len / trigger_size < SZ_64K) 3031 num_tr++; 3032 else 3033 num_tr += 2; 3034 } 3035 3036 /* Now allocate and setup the descriptor. */ 3037 tr_size = sizeof(struct cppi5_tr_type15_t); 3038 d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir); 3039 if (!d) 3040 return NULL; 3041 3042 d->sglen = sglen; 3043 3044 if (uc->ud->match_data->type == DMA_TYPE_UDMA) { 3045 asel = 0; 3046 csf |= CPPI5_TR_CSF_EOL_ICNT0; 3047 } else { 3048 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; 3049 dev_addr |= asel; 3050 } 3051 3052 tr_req = d->hwdesc[0].tr_req_base; 3053 for_each_sg(sgl, sgent, sglen, i) { 3054 u16 tr0_cnt2, tr0_cnt3, tr1_cnt2; 3055 dma_addr_t sg_addr = sg_dma_address(sgent); 3056 3057 sg_len = sg_dma_len(sgent); 3058 num_tr = udma_get_tr_counters(sg_len / trigger_size, 0, 3059 &tr0_cnt2, &tr0_cnt3, &tr1_cnt2); 3060 if (num_tr < 0) { 3061 dev_err(uc->ud->dev, "size %zu is not supported\n", 3062 sg_len); 3063 udma_free_hwdesc(uc, d); 3064 kfree(d); 3065 return NULL; 3066 } 3067 3068 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15, false, 3069 true, CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 3070 cppi5_tr_csf_set(&tr_req[tr_idx].flags, csf); 3071 cppi5_tr_set_trigger(&tr_req[tr_idx].flags, 3072 uc->config.tr_trigger_type, 3073 CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, 0, 0); 3074 3075 sg_addr |= asel; 3076 if (dir == DMA_DEV_TO_MEM) { 3077 tr_req[tr_idx].addr = dev_addr; 3078 tr_req[tr_idx].icnt0 = tr_cnt0; 3079 tr_req[tr_idx].icnt1 = tr_cnt1; 3080 tr_req[tr_idx].icnt2 = tr0_cnt2; 3081 tr_req[tr_idx].icnt3 = tr0_cnt3; 3082 tr_req[tr_idx].dim1 = (-1) * tr_cnt0; 3083 3084 tr_req[tr_idx].daddr = sg_addr; 3085 tr_req[tr_idx].dicnt0 = tr_cnt0; 3086 tr_req[tr_idx].dicnt1 = tr_cnt1; 3087 tr_req[tr_idx].dicnt2 = tr0_cnt2; 3088 tr_req[tr_idx].dicnt3 = tr0_cnt3; 3089 tr_req[tr_idx].ddim1 = tr_cnt0; 3090 tr_req[tr_idx].ddim2 = trigger_size; 3091 tr_req[tr_idx].ddim3 = trigger_size * tr0_cnt2; 3092 } else { 3093 tr_req[tr_idx].addr = sg_addr; 3094 tr_req[tr_idx].icnt0 = tr_cnt0; 3095 tr_req[tr_idx].icnt1 = tr_cnt1; 3096 tr_req[tr_idx].icnt2 = tr0_cnt2; 3097 tr_req[tr_idx].icnt3 = tr0_cnt3; 3098 tr_req[tr_idx].dim1 = tr_cnt0; 3099 tr_req[tr_idx].dim2 = trigger_size; 3100 tr_req[tr_idx].dim3 = trigger_size * tr0_cnt2; 3101 3102 tr_req[tr_idx].daddr = dev_addr; 3103 tr_req[tr_idx].dicnt0 = tr_cnt0; 3104 tr_req[tr_idx].dicnt1 = tr_cnt1; 3105 tr_req[tr_idx].dicnt2 = tr0_cnt2; 3106 tr_req[tr_idx].dicnt3 = tr0_cnt3; 3107 tr_req[tr_idx].ddim1 = (-1) * tr_cnt0; 3108 } 3109 3110 tr_idx++; 3111 3112 if (num_tr == 2) { 3113 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15, 3114 false, true, 3115 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 3116 cppi5_tr_csf_set(&tr_req[tr_idx].flags, csf); 3117 cppi5_tr_set_trigger(&tr_req[tr_idx].flags, 3118 uc->config.tr_trigger_type, 3119 CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, 3120 0, 0); 3121 3122 sg_addr += trigger_size * tr0_cnt2 * tr0_cnt3; 3123 if (dir == DMA_DEV_TO_MEM) { 3124 tr_req[tr_idx].addr = dev_addr; 3125 tr_req[tr_idx].icnt0 = tr_cnt0; 3126 tr_req[tr_idx].icnt1 = tr_cnt1; 3127 tr_req[tr_idx].icnt2 = tr1_cnt2; 3128 tr_req[tr_idx].icnt3 = 1; 3129 tr_req[tr_idx].dim1 = (-1) * tr_cnt0; 3130 3131 tr_req[tr_idx].daddr = sg_addr; 3132 tr_req[tr_idx].dicnt0 = tr_cnt0; 3133 tr_req[tr_idx].dicnt1 = tr_cnt1; 3134 tr_req[tr_idx].dicnt2 = tr1_cnt2; 3135 tr_req[tr_idx].dicnt3 = 1; 3136 tr_req[tr_idx].ddim1 = tr_cnt0; 3137 tr_req[tr_idx].ddim2 = trigger_size; 3138 } else { 3139 tr_req[tr_idx].addr = sg_addr; 3140 tr_req[tr_idx].icnt0 = tr_cnt0; 3141 tr_req[tr_idx].icnt1 = tr_cnt1; 3142 tr_req[tr_idx].icnt2 = tr1_cnt2; 3143 tr_req[tr_idx].icnt3 = 1; 3144 tr_req[tr_idx].dim1 = tr_cnt0; 3145 tr_req[tr_idx].dim2 = trigger_size; 3146 3147 tr_req[tr_idx].daddr = dev_addr; 3148 tr_req[tr_idx].dicnt0 = tr_cnt0; 3149 tr_req[tr_idx].dicnt1 = tr_cnt1; 3150 tr_req[tr_idx].dicnt2 = tr1_cnt2; 3151 tr_req[tr_idx].dicnt3 = 1; 3152 tr_req[tr_idx].ddim1 = (-1) * tr_cnt0; 3153 } 3154 tr_idx++; 3155 } 3156 3157 d->residue += sg_len; 3158 } 3159 3160 cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, csf | CPPI5_TR_CSF_EOP); 3161 3162 return d; 3163 } 3164 3165 static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d, 3166 enum dma_slave_buswidth dev_width, 3167 u16 elcnt) 3168 { 3169 if (uc->config.ep_type != PSIL_EP_PDMA_XY) 3170 return 0; 3171 3172 /* Bus width translates to the element size (ES) */ 3173 switch (dev_width) { 3174 case DMA_SLAVE_BUSWIDTH_1_BYTE: 3175 d->static_tr.elsize = 0; 3176 break; 3177 case DMA_SLAVE_BUSWIDTH_2_BYTES: 3178 d->static_tr.elsize = 1; 3179 break; 3180 case DMA_SLAVE_BUSWIDTH_3_BYTES: 3181 d->static_tr.elsize = 2; 3182 break; 3183 case DMA_SLAVE_BUSWIDTH_4_BYTES: 3184 d->static_tr.elsize = 3; 3185 break; 3186 case DMA_SLAVE_BUSWIDTH_8_BYTES: 3187 d->static_tr.elsize = 4; 3188 break; 3189 default: /* not reached */ 3190 return -EINVAL; 3191 } 3192 3193 d->static_tr.elcnt = elcnt; 3194 3195 if (uc->config.pkt_mode || !uc->cyclic) { 3196 /* 3197 * PDMA must close the packet when the channel is in packet mode. 3198 * For TR mode when the channel is not cyclic we also need PDMA 3199 * to close the packet otherwise the transfer will stall because 3200 * PDMA holds on the data it has received from the peripheral. 3201 */ 3202 unsigned int div = dev_width * elcnt; 3203 3204 if (uc->cyclic) 3205 d->static_tr.bstcnt = d->residue / d->sglen / div; 3206 else 3207 d->static_tr.bstcnt = d->residue / div; 3208 } else if (uc->ud->match_data->type == DMA_TYPE_BCDMA && 3209 uc->config.dir == DMA_DEV_TO_MEM && 3210 uc->cyclic) { 3211 /* 3212 * For cyclic mode with BCDMA we have to set EOP in each TR to 3213 * prevent short packet errors seen on channel teardown. So the 3214 * PDMA must close the packet after every TR transfer by setting 3215 * burst count equal to the number of bytes transferred. 3216 */ 3217 struct cppi5_tr_type1_t *tr_req = d->hwdesc[0].tr_req_base; 3218 3219 d->static_tr.bstcnt = 3220 (tr_req->icnt0 * tr_req->icnt1) / dev_width; 3221 } else { 3222 d->static_tr.bstcnt = 0; 3223 } 3224 3225 if (uc->config.dir == DMA_DEV_TO_MEM && 3226 d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask) 3227 return -EINVAL; 3228 3229 return 0; 3230 } 3231 3232 static struct udma_desc * 3233 udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl, 3234 unsigned int sglen, enum dma_transfer_direction dir, 3235 unsigned long tx_flags, void *context) 3236 { 3237 struct scatterlist *sgent; 3238 struct cppi5_host_desc_t *h_desc = NULL; 3239 struct udma_desc *d; 3240 u32 ring_id; 3241 unsigned int i; 3242 u64 asel; 3243 3244 d = kzalloc(struct_size(d, hwdesc, sglen), GFP_NOWAIT); 3245 if (!d) 3246 return NULL; 3247 3248 d->sglen = sglen; 3249 d->hwdesc_count = sglen; 3250 3251 if (dir == DMA_DEV_TO_MEM) 3252 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); 3253 else 3254 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); 3255 3256 if (uc->ud->match_data->type == DMA_TYPE_UDMA) 3257 asel = 0; 3258 else 3259 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; 3260 3261 for_each_sg(sgl, sgent, sglen, i) { 3262 struct udma_hwdesc *hwdesc = &d->hwdesc[i]; 3263 dma_addr_t sg_addr = sg_dma_address(sgent); 3264 struct cppi5_host_desc_t *desc; 3265 size_t sg_len = sg_dma_len(sgent); 3266 3267 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, 3268 GFP_NOWAIT, 3269 &hwdesc->cppi5_desc_paddr); 3270 if (!hwdesc->cppi5_desc_vaddr) { 3271 dev_err(uc->ud->dev, 3272 "descriptor%d allocation failed\n", i); 3273 3274 udma_free_hwdesc(uc, d); 3275 kfree(d); 3276 return NULL; 3277 } 3278 3279 d->residue += sg_len; 3280 hwdesc->cppi5_desc_size = uc->config.hdesc_size; 3281 desc = hwdesc->cppi5_desc_vaddr; 3282 3283 if (i == 0) { 3284 cppi5_hdesc_init(desc, 0, 0); 3285 /* Flow and Packed ID */ 3286 cppi5_desc_set_pktids(&desc->hdr, uc->id, 3287 CPPI5_INFO1_DESC_FLOWID_DEFAULT); 3288 cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id); 3289 } else { 3290 cppi5_hdesc_reset_hbdesc(desc); 3291 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff); 3292 } 3293 3294 /* attach the sg buffer to the descriptor */ 3295 sg_addr |= asel; 3296 cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len); 3297 3298 /* Attach link as host buffer descriptor */ 3299 if (h_desc) 3300 cppi5_hdesc_link_hbdesc(h_desc, 3301 hwdesc->cppi5_desc_paddr | asel); 3302 3303 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA || 3304 dir == DMA_MEM_TO_DEV) 3305 h_desc = desc; 3306 } 3307 3308 if (d->residue >= SZ_4M) { 3309 dev_err(uc->ud->dev, 3310 "%s: Transfer size %u is over the supported 4M range\n", 3311 __func__, d->residue); 3312 udma_free_hwdesc(uc, d); 3313 kfree(d); 3314 return NULL; 3315 } 3316 3317 h_desc = d->hwdesc[0].cppi5_desc_vaddr; 3318 cppi5_hdesc_set_pktlen(h_desc, d->residue); 3319 3320 return d; 3321 } 3322 3323 static int udma_attach_metadata(struct dma_async_tx_descriptor *desc, 3324 void *data, size_t len) 3325 { 3326 struct udma_desc *d = to_udma_desc(desc); 3327 struct udma_chan *uc = to_udma_chan(desc->chan); 3328 struct cppi5_host_desc_t *h_desc; 3329 u32 psd_size = len; 3330 u32 flags = 0; 3331 3332 if (!uc->config.pkt_mode || !uc->config.metadata_size) 3333 return -ENOTSUPP; 3334 3335 if (!data || len > uc->config.metadata_size) 3336 return -EINVAL; 3337 3338 if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE) 3339 return -EINVAL; 3340 3341 h_desc = d->hwdesc[0].cppi5_desc_vaddr; 3342 if (d->dir == DMA_MEM_TO_DEV) 3343 memcpy(h_desc->epib, data, len); 3344 3345 if (uc->config.needs_epib) 3346 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE; 3347 3348 d->metadata = data; 3349 d->metadata_size = len; 3350 if (uc->config.needs_epib) 3351 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT; 3352 3353 cppi5_hdesc_update_flags(h_desc, flags); 3354 cppi5_hdesc_update_psdata_size(h_desc, psd_size); 3355 3356 return 0; 3357 } 3358 3359 static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc, 3360 size_t *payload_len, size_t *max_len) 3361 { 3362 struct udma_desc *d = to_udma_desc(desc); 3363 struct udma_chan *uc = to_udma_chan(desc->chan); 3364 struct cppi5_host_desc_t *h_desc; 3365 3366 if (!uc->config.pkt_mode || !uc->config.metadata_size) 3367 return ERR_PTR(-ENOTSUPP); 3368 3369 h_desc = d->hwdesc[0].cppi5_desc_vaddr; 3370 3371 *max_len = uc->config.metadata_size; 3372 3373 *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ? 3374 CPPI5_INFO0_HDESC_EPIB_SIZE : 0; 3375 *payload_len += cppi5_hdesc_get_psdata_size(h_desc); 3376 3377 return h_desc->epib; 3378 } 3379 3380 static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc, 3381 size_t payload_len) 3382 { 3383 struct udma_desc *d = to_udma_desc(desc); 3384 struct udma_chan *uc = to_udma_chan(desc->chan); 3385 struct cppi5_host_desc_t *h_desc; 3386 u32 psd_size = payload_len; 3387 u32 flags = 0; 3388 3389 if (!uc->config.pkt_mode || !uc->config.metadata_size) 3390 return -ENOTSUPP; 3391 3392 if (payload_len > uc->config.metadata_size) 3393 return -EINVAL; 3394 3395 if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE) 3396 return -EINVAL; 3397 3398 h_desc = d->hwdesc[0].cppi5_desc_vaddr; 3399 3400 if (uc->config.needs_epib) { 3401 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE; 3402 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT; 3403 } 3404 3405 cppi5_hdesc_update_flags(h_desc, flags); 3406 cppi5_hdesc_update_psdata_size(h_desc, psd_size); 3407 3408 return 0; 3409 } 3410 3411 static struct dma_descriptor_metadata_ops metadata_ops = { 3412 .attach = udma_attach_metadata, 3413 .get_ptr = udma_get_metadata_ptr, 3414 .set_len = udma_set_metadata_len, 3415 }; 3416 3417 static struct dma_async_tx_descriptor * 3418 udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 3419 unsigned int sglen, enum dma_transfer_direction dir, 3420 unsigned long tx_flags, void *context) 3421 { 3422 struct udma_chan *uc = to_udma_chan(chan); 3423 enum dma_slave_buswidth dev_width; 3424 struct udma_desc *d; 3425 u32 burst; 3426 3427 if (dir != uc->config.dir && 3428 (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type)) { 3429 dev_err(chan->device->dev, 3430 "%s: chan%d is for %s, not supporting %s\n", 3431 __func__, uc->id, 3432 dmaengine_get_direction_text(uc->config.dir), 3433 dmaengine_get_direction_text(dir)); 3434 return NULL; 3435 } 3436 3437 if (dir == DMA_DEV_TO_MEM) { 3438 dev_width = uc->cfg.src_addr_width; 3439 burst = uc->cfg.src_maxburst; 3440 } else if (dir == DMA_MEM_TO_DEV) { 3441 dev_width = uc->cfg.dst_addr_width; 3442 burst = uc->cfg.dst_maxburst; 3443 } else { 3444 dev_err(chan->device->dev, "%s: bad direction?\n", __func__); 3445 return NULL; 3446 } 3447 3448 if (!burst) 3449 burst = 1; 3450 3451 uc->config.tx_flags = tx_flags; 3452 3453 if (uc->config.pkt_mode) 3454 d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags, 3455 context); 3456 else if (is_slave_direction(uc->config.dir)) 3457 d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags, 3458 context); 3459 else 3460 d = udma_prep_slave_sg_triggered_tr(uc, sgl, sglen, dir, 3461 tx_flags, context); 3462 3463 if (!d) 3464 return NULL; 3465 3466 d->dir = dir; 3467 d->desc_idx = 0; 3468 d->tr_idx = 0; 3469 3470 /* static TR for remote PDMA */ 3471 if (udma_configure_statictr(uc, d, dev_width, burst)) { 3472 dev_err(uc->ud->dev, 3473 "%s: StaticTR Z is limited to maximum %u (%u)\n", 3474 __func__, uc->ud->match_data->statictr_z_mask, 3475 d->static_tr.bstcnt); 3476 3477 udma_free_hwdesc(uc, d); 3478 kfree(d); 3479 return NULL; 3480 } 3481 3482 if (uc->config.metadata_size) 3483 d->vd.tx.metadata_ops = &metadata_ops; 3484 3485 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags); 3486 } 3487 3488 static struct udma_desc * 3489 udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr, 3490 size_t buf_len, size_t period_len, 3491 enum dma_transfer_direction dir, unsigned long flags) 3492 { 3493 struct udma_desc *d; 3494 size_t tr_size, period_addr; 3495 struct cppi5_tr_type1_t *tr_req; 3496 unsigned int periods = buf_len / period_len; 3497 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; 3498 unsigned int i; 3499 int num_tr; 3500 u32 period_csf = 0; 3501 3502 num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0, 3503 &tr0_cnt1, &tr1_cnt0); 3504 if (num_tr < 0) { 3505 dev_err(uc->ud->dev, "size %zu is not supported\n", 3506 period_len); 3507 return NULL; 3508 } 3509 3510 /* Now allocate and setup the descriptor. */ 3511 tr_size = sizeof(struct cppi5_tr_type1_t); 3512 d = udma_alloc_tr_desc(uc, tr_size, periods * num_tr, dir); 3513 if (!d) 3514 return NULL; 3515 3516 tr_req = d->hwdesc[0].tr_req_base; 3517 if (uc->ud->match_data->type == DMA_TYPE_UDMA) 3518 period_addr = buf_addr; 3519 else 3520 period_addr = buf_addr | 3521 ((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT); 3522 3523 /* 3524 * For BCDMA <-> PDMA transfers, the EOP flag needs to be set on the 3525 * last TR of a descriptor, to mark the packet as complete. 3526 * This is required for getting the teardown completion message in case 3527 * of TX, and to avoid short-packet error in case of RX. 3528 * 3529 * As we are in cyclic mode, we do not know which period might be the 3530 * last one, so set the flag for each period. 3531 */ 3532 if (uc->config.ep_type == PSIL_EP_PDMA_XY && 3533 uc->ud->match_data->type == DMA_TYPE_BCDMA) { 3534 period_csf = CPPI5_TR_CSF_EOP; 3535 } 3536 3537 for (i = 0; i < periods; i++) { 3538 int tr_idx = i * num_tr; 3539 3540 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false, 3541 false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 3542 3543 tr_req[tr_idx].addr = period_addr; 3544 tr_req[tr_idx].icnt0 = tr0_cnt0; 3545 tr_req[tr_idx].icnt1 = tr0_cnt1; 3546 tr_req[tr_idx].dim1 = tr0_cnt0; 3547 3548 if (num_tr == 2) { 3549 cppi5_tr_csf_set(&tr_req[tr_idx].flags, 3550 CPPI5_TR_CSF_SUPR_EVT); 3551 tr_idx++; 3552 3553 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, 3554 false, false, 3555 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 3556 3557 tr_req[tr_idx].addr = period_addr + tr0_cnt1 * tr0_cnt0; 3558 tr_req[tr_idx].icnt0 = tr1_cnt0; 3559 tr_req[tr_idx].icnt1 = 1; 3560 tr_req[tr_idx].dim1 = tr1_cnt0; 3561 } 3562 3563 if (!(flags & DMA_PREP_INTERRUPT)) 3564 period_csf |= CPPI5_TR_CSF_SUPR_EVT; 3565 3566 if (period_csf) 3567 cppi5_tr_csf_set(&tr_req[tr_idx].flags, period_csf); 3568 3569 period_addr += period_len; 3570 } 3571 3572 return d; 3573 } 3574 3575 static struct udma_desc * 3576 udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr, 3577 size_t buf_len, size_t period_len, 3578 enum dma_transfer_direction dir, unsigned long flags) 3579 { 3580 struct udma_desc *d; 3581 u32 ring_id; 3582 int i; 3583 int periods = buf_len / period_len; 3584 3585 if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1)) 3586 return NULL; 3587 3588 if (period_len >= SZ_4M) 3589 return NULL; 3590 3591 d = kzalloc(struct_size(d, hwdesc, periods), GFP_NOWAIT); 3592 if (!d) 3593 return NULL; 3594 3595 d->hwdesc_count = periods; 3596 3597 /* TODO: re-check this... */ 3598 if (dir == DMA_DEV_TO_MEM) 3599 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); 3600 else 3601 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); 3602 3603 if (uc->ud->match_data->type != DMA_TYPE_UDMA) 3604 buf_addr |= (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; 3605 3606 for (i = 0; i < periods; i++) { 3607 struct udma_hwdesc *hwdesc = &d->hwdesc[i]; 3608 dma_addr_t period_addr = buf_addr + (period_len * i); 3609 struct cppi5_host_desc_t *h_desc; 3610 3611 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, 3612 GFP_NOWAIT, 3613 &hwdesc->cppi5_desc_paddr); 3614 if (!hwdesc->cppi5_desc_vaddr) { 3615 dev_err(uc->ud->dev, 3616 "descriptor%d allocation failed\n", i); 3617 3618 udma_free_hwdesc(uc, d); 3619 kfree(d); 3620 return NULL; 3621 } 3622 3623 hwdesc->cppi5_desc_size = uc->config.hdesc_size; 3624 h_desc = hwdesc->cppi5_desc_vaddr; 3625 3626 cppi5_hdesc_init(h_desc, 0, 0); 3627 cppi5_hdesc_set_pktlen(h_desc, period_len); 3628 3629 /* Flow and Packed ID */ 3630 cppi5_desc_set_pktids(&h_desc->hdr, uc->id, 3631 CPPI5_INFO1_DESC_FLOWID_DEFAULT); 3632 cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id); 3633 3634 /* attach each period to a new descriptor */ 3635 cppi5_hdesc_attach_buf(h_desc, 3636 period_addr, period_len, 3637 period_addr, period_len); 3638 } 3639 3640 return d; 3641 } 3642 3643 static struct dma_async_tx_descriptor * 3644 udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 3645 size_t period_len, enum dma_transfer_direction dir, 3646 unsigned long flags) 3647 { 3648 struct udma_chan *uc = to_udma_chan(chan); 3649 enum dma_slave_buswidth dev_width; 3650 struct udma_desc *d; 3651 u32 burst; 3652 3653 if (dir != uc->config.dir) { 3654 dev_err(chan->device->dev, 3655 "%s: chan%d is for %s, not supporting %s\n", 3656 __func__, uc->id, 3657 dmaengine_get_direction_text(uc->config.dir), 3658 dmaengine_get_direction_text(dir)); 3659 return NULL; 3660 } 3661 3662 uc->cyclic = true; 3663 3664 if (dir == DMA_DEV_TO_MEM) { 3665 dev_width = uc->cfg.src_addr_width; 3666 burst = uc->cfg.src_maxburst; 3667 } else if (dir == DMA_MEM_TO_DEV) { 3668 dev_width = uc->cfg.dst_addr_width; 3669 burst = uc->cfg.dst_maxburst; 3670 } else { 3671 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); 3672 return NULL; 3673 } 3674 3675 if (!burst) 3676 burst = 1; 3677 3678 if (uc->config.pkt_mode) 3679 d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len, 3680 dir, flags); 3681 else 3682 d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len, 3683 dir, flags); 3684 3685 if (!d) 3686 return NULL; 3687 3688 d->sglen = buf_len / period_len; 3689 3690 d->dir = dir; 3691 d->residue = buf_len; 3692 3693 /* static TR for remote PDMA */ 3694 if (udma_configure_statictr(uc, d, dev_width, burst)) { 3695 dev_err(uc->ud->dev, 3696 "%s: StaticTR Z is limited to maximum %u (%u)\n", 3697 __func__, uc->ud->match_data->statictr_z_mask, 3698 d->static_tr.bstcnt); 3699 3700 udma_free_hwdesc(uc, d); 3701 kfree(d); 3702 return NULL; 3703 } 3704 3705 if (uc->config.metadata_size) 3706 d->vd.tx.metadata_ops = &metadata_ops; 3707 3708 return vchan_tx_prep(&uc->vc, &d->vd, flags); 3709 } 3710 3711 static struct dma_async_tx_descriptor * 3712 udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 3713 size_t len, unsigned long tx_flags) 3714 { 3715 struct udma_chan *uc = to_udma_chan(chan); 3716 struct udma_desc *d; 3717 struct cppi5_tr_type15_t *tr_req; 3718 int num_tr; 3719 size_t tr_size = sizeof(struct cppi5_tr_type15_t); 3720 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; 3721 u32 csf = CPPI5_TR_CSF_SUPR_EVT; 3722 3723 if (uc->config.dir != DMA_MEM_TO_MEM) { 3724 dev_err(chan->device->dev, 3725 "%s: chan%d is for %s, not supporting %s\n", 3726 __func__, uc->id, 3727 dmaengine_get_direction_text(uc->config.dir), 3728 dmaengine_get_direction_text(DMA_MEM_TO_MEM)); 3729 return NULL; 3730 } 3731 3732 num_tr = udma_get_tr_counters(len, __ffs(src | dest), &tr0_cnt0, 3733 &tr0_cnt1, &tr1_cnt0); 3734 if (num_tr < 0) { 3735 dev_err(uc->ud->dev, "size %zu is not supported\n", 3736 len); 3737 return NULL; 3738 } 3739 3740 d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM); 3741 if (!d) 3742 return NULL; 3743 3744 d->dir = DMA_MEM_TO_MEM; 3745 d->desc_idx = 0; 3746 d->tr_idx = 0; 3747 d->residue = len; 3748 3749 if (uc->ud->match_data->type != DMA_TYPE_UDMA) { 3750 src |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT; 3751 dest |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT; 3752 } else { 3753 csf |= CPPI5_TR_CSF_EOL_ICNT0; 3754 } 3755 3756 tr_req = d->hwdesc[0].tr_req_base; 3757 3758 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true, 3759 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 3760 cppi5_tr_csf_set(&tr_req[0].flags, csf); 3761 3762 tr_req[0].addr = src; 3763 tr_req[0].icnt0 = tr0_cnt0; 3764 tr_req[0].icnt1 = tr0_cnt1; 3765 tr_req[0].icnt2 = 1; 3766 tr_req[0].icnt3 = 1; 3767 tr_req[0].dim1 = tr0_cnt0; 3768 3769 tr_req[0].daddr = dest; 3770 tr_req[0].dicnt0 = tr0_cnt0; 3771 tr_req[0].dicnt1 = tr0_cnt1; 3772 tr_req[0].dicnt2 = 1; 3773 tr_req[0].dicnt3 = 1; 3774 tr_req[0].ddim1 = tr0_cnt0; 3775 3776 if (num_tr == 2) { 3777 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true, 3778 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 3779 cppi5_tr_csf_set(&tr_req[1].flags, csf); 3780 3781 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0; 3782 tr_req[1].icnt0 = tr1_cnt0; 3783 tr_req[1].icnt1 = 1; 3784 tr_req[1].icnt2 = 1; 3785 tr_req[1].icnt3 = 1; 3786 3787 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0; 3788 tr_req[1].dicnt0 = tr1_cnt0; 3789 tr_req[1].dicnt1 = 1; 3790 tr_req[1].dicnt2 = 1; 3791 tr_req[1].dicnt3 = 1; 3792 } 3793 3794 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, csf | CPPI5_TR_CSF_EOP); 3795 3796 if (uc->config.metadata_size) 3797 d->vd.tx.metadata_ops = &metadata_ops; 3798 3799 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags); 3800 } 3801 3802 static void udma_issue_pending(struct dma_chan *chan) 3803 { 3804 struct udma_chan *uc = to_udma_chan(chan); 3805 unsigned long flags; 3806 3807 spin_lock_irqsave(&uc->vc.lock, flags); 3808 3809 /* If we have something pending and no active descriptor, then */ 3810 if (vchan_issue_pending(&uc->vc) && !uc->desc) { 3811 /* 3812 * start a descriptor if the channel is NOT [marked as 3813 * terminating _and_ it is still running (teardown has not 3814 * completed yet)]. 3815 */ 3816 if (!(uc->state == UDMA_CHAN_IS_TERMINATING && 3817 udma_is_chan_running(uc))) 3818 udma_start(uc); 3819 } 3820 3821 spin_unlock_irqrestore(&uc->vc.lock, flags); 3822 } 3823 3824 static enum dma_status udma_tx_status(struct dma_chan *chan, 3825 dma_cookie_t cookie, 3826 struct dma_tx_state *txstate) 3827 { 3828 struct udma_chan *uc = to_udma_chan(chan); 3829 enum dma_status ret; 3830 unsigned long flags; 3831 3832 spin_lock_irqsave(&uc->vc.lock, flags); 3833 3834 ret = dma_cookie_status(chan, cookie, txstate); 3835 3836 if (!udma_is_chan_running(uc)) 3837 ret = DMA_COMPLETE; 3838 3839 if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc)) 3840 ret = DMA_PAUSED; 3841 3842 if (ret == DMA_COMPLETE || !txstate) 3843 goto out; 3844 3845 if (uc->desc && uc->desc->vd.tx.cookie == cookie) { 3846 u32 peer_bcnt = 0; 3847 u32 bcnt = 0; 3848 u32 residue = uc->desc->residue; 3849 u32 delay = 0; 3850 3851 if (uc->desc->dir == DMA_MEM_TO_DEV) { 3852 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG); 3853 3854 if (uc->config.ep_type != PSIL_EP_NATIVE) { 3855 peer_bcnt = udma_tchanrt_read(uc, 3856 UDMA_CHAN_RT_PEER_BCNT_REG); 3857 3858 if (bcnt > peer_bcnt) 3859 delay = bcnt - peer_bcnt; 3860 } 3861 } else if (uc->desc->dir == DMA_DEV_TO_MEM) { 3862 bcnt = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); 3863 3864 if (uc->config.ep_type != PSIL_EP_NATIVE) { 3865 peer_bcnt = udma_rchanrt_read(uc, 3866 UDMA_CHAN_RT_PEER_BCNT_REG); 3867 3868 if (peer_bcnt > bcnt) 3869 delay = peer_bcnt - bcnt; 3870 } 3871 } else { 3872 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); 3873 } 3874 3875 if (bcnt && !(bcnt % uc->desc->residue)) 3876 residue = 0; 3877 else 3878 residue -= bcnt % uc->desc->residue; 3879 3880 if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) { 3881 ret = DMA_COMPLETE; 3882 delay = 0; 3883 } 3884 3885 dma_set_residue(txstate, residue); 3886 dma_set_in_flight_bytes(txstate, delay); 3887 3888 } else { 3889 ret = DMA_COMPLETE; 3890 } 3891 3892 out: 3893 spin_unlock_irqrestore(&uc->vc.lock, flags); 3894 return ret; 3895 } 3896 3897 static int udma_pause(struct dma_chan *chan) 3898 { 3899 struct udma_chan *uc = to_udma_chan(chan); 3900 3901 /* pause the channel */ 3902 switch (uc->config.dir) { 3903 case DMA_DEV_TO_MEM: 3904 udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 3905 UDMA_PEER_RT_EN_PAUSE, 3906 UDMA_PEER_RT_EN_PAUSE); 3907 break; 3908 case DMA_MEM_TO_DEV: 3909 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 3910 UDMA_PEER_RT_EN_PAUSE, 3911 UDMA_PEER_RT_EN_PAUSE); 3912 break; 3913 case DMA_MEM_TO_MEM: 3914 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG, 3915 UDMA_CHAN_RT_CTL_PAUSE, 3916 UDMA_CHAN_RT_CTL_PAUSE); 3917 break; 3918 default: 3919 return -EINVAL; 3920 } 3921 3922 return 0; 3923 } 3924 3925 static int udma_resume(struct dma_chan *chan) 3926 { 3927 struct udma_chan *uc = to_udma_chan(chan); 3928 3929 /* resume the channel */ 3930 switch (uc->config.dir) { 3931 case DMA_DEV_TO_MEM: 3932 udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 3933 UDMA_PEER_RT_EN_PAUSE, 0); 3934 3935 break; 3936 case DMA_MEM_TO_DEV: 3937 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 3938 UDMA_PEER_RT_EN_PAUSE, 0); 3939 break; 3940 case DMA_MEM_TO_MEM: 3941 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG, 3942 UDMA_CHAN_RT_CTL_PAUSE, 0); 3943 break; 3944 default: 3945 return -EINVAL; 3946 } 3947 3948 return 0; 3949 } 3950 3951 static int udma_terminate_all(struct dma_chan *chan) 3952 { 3953 struct udma_chan *uc = to_udma_chan(chan); 3954 unsigned long flags; 3955 LIST_HEAD(head); 3956 3957 spin_lock_irqsave(&uc->vc.lock, flags); 3958 3959 if (udma_is_chan_running(uc)) 3960 udma_stop(uc); 3961 3962 if (uc->desc) { 3963 uc->terminated_desc = uc->desc; 3964 uc->desc = NULL; 3965 uc->terminated_desc->terminated = true; 3966 cancel_delayed_work(&uc->tx_drain.work); 3967 } 3968 3969 uc->paused = false; 3970 3971 vchan_get_all_descriptors(&uc->vc, &head); 3972 spin_unlock_irqrestore(&uc->vc.lock, flags); 3973 vchan_dma_desc_free_list(&uc->vc, &head); 3974 3975 return 0; 3976 } 3977 3978 static void udma_synchronize(struct dma_chan *chan) 3979 { 3980 struct udma_chan *uc = to_udma_chan(chan); 3981 unsigned long timeout = msecs_to_jiffies(1000); 3982 3983 vchan_synchronize(&uc->vc); 3984 3985 if (uc->state == UDMA_CHAN_IS_TERMINATING) { 3986 timeout = wait_for_completion_timeout(&uc->teardown_completed, 3987 timeout); 3988 if (!timeout) { 3989 dev_warn(uc->ud->dev, "chan%d teardown timeout!\n", 3990 uc->id); 3991 udma_dump_chan_stdata(uc); 3992 udma_reset_chan(uc, true); 3993 } 3994 } 3995 3996 udma_reset_chan(uc, false); 3997 if (udma_is_chan_running(uc)) 3998 dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id); 3999 4000 cancel_delayed_work_sync(&uc->tx_drain.work); 4001 udma_reset_rings(uc); 4002 } 4003 4004 static void udma_desc_pre_callback(struct virt_dma_chan *vc, 4005 struct virt_dma_desc *vd, 4006 struct dmaengine_result *result) 4007 { 4008 struct udma_chan *uc = to_udma_chan(&vc->chan); 4009 struct udma_desc *d; 4010 u8 status; 4011 4012 if (!vd) 4013 return; 4014 4015 d = to_udma_desc(&vd->tx); 4016 4017 if (d->metadata_size) 4018 udma_fetch_epib(uc, d); 4019 4020 if (result) { 4021 void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx); 4022 4023 if (cppi5_desc_get_type(desc_vaddr) == 4024 CPPI5_INFO0_DESC_TYPE_VAL_HOST) { 4025 /* Provide residue information for the client */ 4026 result->residue = d->residue - 4027 cppi5_hdesc_get_pktlen(desc_vaddr); 4028 if (result->residue) 4029 result->result = DMA_TRANS_ABORTED; 4030 else 4031 result->result = DMA_TRANS_NOERROR; 4032 } else { 4033 result->residue = 0; 4034 /* Propagate TR Response errors to the client */ 4035 status = d->hwdesc[0].tr_resp_base->status; 4036 if (status) 4037 result->result = DMA_TRANS_ABORTED; 4038 else 4039 result->result = DMA_TRANS_NOERROR; 4040 } 4041 } 4042 } 4043 4044 /* 4045 * This tasklet handles the completion of a DMA descriptor by 4046 * calling its callback and freeing it. 4047 */ 4048 static void udma_vchan_complete(struct tasklet_struct *t) 4049 { 4050 struct virt_dma_chan *vc = from_tasklet(vc, t, task); 4051 struct virt_dma_desc *vd, *_vd; 4052 struct dmaengine_desc_callback cb; 4053 LIST_HEAD(head); 4054 4055 spin_lock_irq(&vc->lock); 4056 list_splice_tail_init(&vc->desc_completed, &head); 4057 vd = vc->cyclic; 4058 if (vd) { 4059 vc->cyclic = NULL; 4060 dmaengine_desc_get_callback(&vd->tx, &cb); 4061 } else { 4062 memset(&cb, 0, sizeof(cb)); 4063 } 4064 spin_unlock_irq(&vc->lock); 4065 4066 udma_desc_pre_callback(vc, vd, NULL); 4067 dmaengine_desc_callback_invoke(&cb, NULL); 4068 4069 list_for_each_entry_safe(vd, _vd, &head, node) { 4070 struct dmaengine_result result; 4071 4072 dmaengine_desc_get_callback(&vd->tx, &cb); 4073 4074 list_del(&vd->node); 4075 4076 udma_desc_pre_callback(vc, vd, &result); 4077 dmaengine_desc_callback_invoke(&cb, &result); 4078 4079 vchan_vdesc_fini(vd); 4080 } 4081 } 4082 4083 static void udma_free_chan_resources(struct dma_chan *chan) 4084 { 4085 struct udma_chan *uc = to_udma_chan(chan); 4086 struct udma_dev *ud = to_udma_dev(chan->device); 4087 4088 udma_terminate_all(chan); 4089 if (uc->terminated_desc) { 4090 udma_reset_chan(uc, false); 4091 udma_reset_rings(uc); 4092 } 4093 4094 cancel_delayed_work_sync(&uc->tx_drain.work); 4095 4096 if (uc->irq_num_ring > 0) { 4097 free_irq(uc->irq_num_ring, uc); 4098 4099 uc->irq_num_ring = 0; 4100 } 4101 if (uc->irq_num_udma > 0) { 4102 free_irq(uc->irq_num_udma, uc); 4103 4104 uc->irq_num_udma = 0; 4105 } 4106 4107 /* Release PSI-L pairing */ 4108 if (uc->psil_paired) { 4109 navss_psil_unpair(ud, uc->config.src_thread, 4110 uc->config.dst_thread); 4111 uc->psil_paired = false; 4112 } 4113 4114 vchan_free_chan_resources(&uc->vc); 4115 tasklet_kill(&uc->vc.task); 4116 4117 bcdma_free_bchan_resources(uc); 4118 udma_free_tx_resources(uc); 4119 udma_free_rx_resources(uc); 4120 udma_reset_uchan(uc); 4121 4122 if (uc->use_dma_pool) { 4123 dma_pool_destroy(uc->hdesc_pool); 4124 uc->use_dma_pool = false; 4125 } 4126 } 4127 4128 static struct platform_driver udma_driver; 4129 static struct platform_driver bcdma_driver; 4130 static struct platform_driver pktdma_driver; 4131 4132 struct udma_filter_param { 4133 int remote_thread_id; 4134 u32 atype; 4135 u32 asel; 4136 u32 tr_trigger_type; 4137 }; 4138 4139 static bool udma_dma_filter_fn(struct dma_chan *chan, void *param) 4140 { 4141 struct udma_chan_config *ucc; 4142 struct psil_endpoint_config *ep_config; 4143 struct udma_filter_param *filter_param; 4144 struct udma_chan *uc; 4145 struct udma_dev *ud; 4146 4147 if (chan->device->dev->driver != &udma_driver.driver && 4148 chan->device->dev->driver != &bcdma_driver.driver && 4149 chan->device->dev->driver != &pktdma_driver.driver) 4150 return false; 4151 4152 uc = to_udma_chan(chan); 4153 ucc = &uc->config; 4154 ud = uc->ud; 4155 filter_param = param; 4156 4157 if (filter_param->atype > 2) { 4158 dev_err(ud->dev, "Invalid channel atype: %u\n", 4159 filter_param->atype); 4160 return false; 4161 } 4162 4163 if (filter_param->asel > 15) { 4164 dev_err(ud->dev, "Invalid channel asel: %u\n", 4165 filter_param->asel); 4166 return false; 4167 } 4168 4169 ucc->remote_thread_id = filter_param->remote_thread_id; 4170 ucc->atype = filter_param->atype; 4171 ucc->asel = filter_param->asel; 4172 ucc->tr_trigger_type = filter_param->tr_trigger_type; 4173 4174 if (ucc->tr_trigger_type) { 4175 ucc->dir = DMA_MEM_TO_MEM; 4176 goto triggered_bchan; 4177 } else if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET) { 4178 ucc->dir = DMA_MEM_TO_DEV; 4179 } else { 4180 ucc->dir = DMA_DEV_TO_MEM; 4181 } 4182 4183 ep_config = psil_get_ep_config(ucc->remote_thread_id); 4184 if (IS_ERR(ep_config)) { 4185 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n", 4186 ucc->remote_thread_id); 4187 ucc->dir = DMA_MEM_TO_MEM; 4188 ucc->remote_thread_id = -1; 4189 ucc->atype = 0; 4190 ucc->asel = 0; 4191 return false; 4192 } 4193 4194 if (ud->match_data->type == DMA_TYPE_BCDMA && 4195 ep_config->pkt_mode) { 4196 dev_err(ud->dev, 4197 "Only TR mode is supported (psi-l thread 0x%04x)\n", 4198 ucc->remote_thread_id); 4199 ucc->dir = DMA_MEM_TO_MEM; 4200 ucc->remote_thread_id = -1; 4201 ucc->atype = 0; 4202 ucc->asel = 0; 4203 return false; 4204 } 4205 4206 ucc->pkt_mode = ep_config->pkt_mode; 4207 ucc->channel_tpl = ep_config->channel_tpl; 4208 ucc->notdpkt = ep_config->notdpkt; 4209 ucc->ep_type = ep_config->ep_type; 4210 4211 if (ud->match_data->type == DMA_TYPE_PKTDMA && 4212 ep_config->mapped_channel_id >= 0) { 4213 ucc->mapped_channel_id = ep_config->mapped_channel_id; 4214 ucc->default_flow_id = ep_config->default_flow_id; 4215 } else { 4216 ucc->mapped_channel_id = -1; 4217 ucc->default_flow_id = -1; 4218 } 4219 4220 if (ucc->ep_type != PSIL_EP_NATIVE) { 4221 const struct udma_match_data *match_data = ud->match_data; 4222 4223 if (match_data->flags & UDMA_FLAG_PDMA_ACC32) 4224 ucc->enable_acc32 = ep_config->pdma_acc32; 4225 if (match_data->flags & UDMA_FLAG_PDMA_BURST) 4226 ucc->enable_burst = ep_config->pdma_burst; 4227 } 4228 4229 ucc->needs_epib = ep_config->needs_epib; 4230 ucc->psd_size = ep_config->psd_size; 4231 ucc->metadata_size = 4232 (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) + 4233 ucc->psd_size; 4234 4235 if (ucc->pkt_mode) 4236 ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) + 4237 ucc->metadata_size, ud->desc_align); 4238 4239 dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id, 4240 ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir)); 4241 4242 return true; 4243 4244 triggered_bchan: 4245 dev_dbg(ud->dev, "chan%d: triggered channel (type: %u)\n", uc->id, 4246 ucc->tr_trigger_type); 4247 4248 return true; 4249 4250 } 4251 4252 static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec, 4253 struct of_dma *ofdma) 4254 { 4255 struct udma_dev *ud = ofdma->of_dma_data; 4256 struct udma_filter_param filter_param; 4257 struct dma_chan *chan; 4258 4259 if (ud->match_data->type == DMA_TYPE_BCDMA) { 4260 if (dma_spec->args_count != 3) 4261 return NULL; 4262 4263 filter_param.tr_trigger_type = dma_spec->args[0]; 4264 filter_param.remote_thread_id = dma_spec->args[1]; 4265 filter_param.asel = dma_spec->args[2]; 4266 filter_param.atype = 0; 4267 } else { 4268 if (dma_spec->args_count != 1 && dma_spec->args_count != 2) 4269 return NULL; 4270 4271 filter_param.remote_thread_id = dma_spec->args[0]; 4272 filter_param.tr_trigger_type = 0; 4273 if (dma_spec->args_count == 2) { 4274 if (ud->match_data->type == DMA_TYPE_UDMA) { 4275 filter_param.atype = dma_spec->args[1]; 4276 filter_param.asel = 0; 4277 } else { 4278 filter_param.atype = 0; 4279 filter_param.asel = dma_spec->args[1]; 4280 } 4281 } else { 4282 filter_param.atype = 0; 4283 filter_param.asel = 0; 4284 } 4285 } 4286 4287 chan = __dma_request_channel(&ud->ddev.cap_mask, udma_dma_filter_fn, &filter_param, 4288 ofdma->of_node); 4289 if (!chan) { 4290 dev_err(ud->dev, "get channel fail in %s.\n", __func__); 4291 return ERR_PTR(-EINVAL); 4292 } 4293 4294 return chan; 4295 } 4296 4297 static struct udma_match_data am654_main_data = { 4298 .type = DMA_TYPE_UDMA, 4299 .psil_base = 0x1000, 4300 .enable_memcpy_support = true, 4301 .statictr_z_mask = GENMASK(11, 0), 4302 .burst_size = { 4303 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 4304 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */ 4305 0, /* No UH Channels */ 4306 }, 4307 }; 4308 4309 static struct udma_match_data am654_mcu_data = { 4310 .type = DMA_TYPE_UDMA, 4311 .psil_base = 0x6000, 4312 .enable_memcpy_support = false, 4313 .statictr_z_mask = GENMASK(11, 0), 4314 .burst_size = { 4315 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 4316 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */ 4317 0, /* No UH Channels */ 4318 }, 4319 }; 4320 4321 static struct udma_match_data j721e_main_data = { 4322 .type = DMA_TYPE_UDMA, 4323 .psil_base = 0x1000, 4324 .enable_memcpy_support = true, 4325 .flags = UDMA_FLAGS_J7_CLASS, 4326 .statictr_z_mask = GENMASK(23, 0), 4327 .burst_size = { 4328 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 4329 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* H Channels */ 4330 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* UH Channels */ 4331 }, 4332 }; 4333 4334 static struct udma_match_data j721e_mcu_data = { 4335 .type = DMA_TYPE_UDMA, 4336 .psil_base = 0x6000, 4337 .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */ 4338 .flags = UDMA_FLAGS_J7_CLASS, 4339 .statictr_z_mask = GENMASK(23, 0), 4340 .burst_size = { 4341 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 4342 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES, /* H Channels */ 4343 0, /* No UH Channels */ 4344 }, 4345 }; 4346 4347 static struct udma_soc_data am62a_dmss_csi_soc_data = { 4348 .oes = { 4349 .bcdma_rchan_data = 0xe00, 4350 .bcdma_rchan_ring = 0x1000, 4351 }, 4352 }; 4353 4354 static struct udma_soc_data j721s2_bcdma_csi_soc_data = { 4355 .oes = { 4356 .bcdma_tchan_data = 0x800, 4357 .bcdma_tchan_ring = 0xa00, 4358 .bcdma_rchan_data = 0xe00, 4359 .bcdma_rchan_ring = 0x1000, 4360 }, 4361 }; 4362 4363 static struct udma_match_data am62a_bcdma_csirx_data = { 4364 .type = DMA_TYPE_BCDMA, 4365 .psil_base = 0x3100, 4366 .enable_memcpy_support = false, 4367 .burst_size = { 4368 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 4369 0, /* No H Channels */ 4370 0, /* No UH Channels */ 4371 }, 4372 .soc_data = &am62a_dmss_csi_soc_data, 4373 }; 4374 4375 static struct udma_match_data am64_bcdma_data = { 4376 .type = DMA_TYPE_BCDMA, 4377 .psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */ 4378 .enable_memcpy_support = true, /* Supported via bchan */ 4379 .flags = UDMA_FLAGS_J7_CLASS, 4380 .statictr_z_mask = GENMASK(23, 0), 4381 .burst_size = { 4382 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 4383 0, /* No H Channels */ 4384 0, /* No UH Channels */ 4385 }, 4386 }; 4387 4388 static struct udma_match_data am64_pktdma_data = { 4389 .type = DMA_TYPE_PKTDMA, 4390 .psil_base = 0x1000, 4391 .enable_memcpy_support = false, /* PKTDMA does not support MEM_TO_MEM */ 4392 .flags = UDMA_FLAGS_J7_CLASS, 4393 .statictr_z_mask = GENMASK(23, 0), 4394 .burst_size = { 4395 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 4396 0, /* No H Channels */ 4397 0, /* No UH Channels */ 4398 }, 4399 }; 4400 4401 static struct udma_match_data j721s2_bcdma_csi_data = { 4402 .type = DMA_TYPE_BCDMA, 4403 .psil_base = 0x2000, 4404 .enable_memcpy_support = false, 4405 .burst_size = { 4406 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 4407 0, /* No H Channels */ 4408 0, /* No UH Channels */ 4409 }, 4410 .soc_data = &j721s2_bcdma_csi_soc_data, 4411 }; 4412 4413 static struct udma_match_data j722s_bcdma_csi_data = { 4414 .type = DMA_TYPE_BCDMA, 4415 .psil_base = 0x3100, 4416 .enable_memcpy_support = false, 4417 .burst_size = { 4418 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 4419 0, /* No H Channels */ 4420 0, /* No UH Channels */ 4421 }, 4422 .soc_data = &j721s2_bcdma_csi_soc_data, 4423 }; 4424 4425 static const struct of_device_id udma_of_match[] = { 4426 { 4427 .compatible = "ti,am654-navss-main-udmap", 4428 .data = &am654_main_data, 4429 }, 4430 { 4431 .compatible = "ti,am654-navss-mcu-udmap", 4432 .data = &am654_mcu_data, 4433 }, { 4434 .compatible = "ti,j721e-navss-main-udmap", 4435 .data = &j721e_main_data, 4436 }, { 4437 .compatible = "ti,j721e-navss-mcu-udmap", 4438 .data = &j721e_mcu_data, 4439 }, 4440 { 4441 .compatible = "ti,am64-dmss-bcdma", 4442 .data = &am64_bcdma_data, 4443 }, 4444 { 4445 .compatible = "ti,am64-dmss-pktdma", 4446 .data = &am64_pktdma_data, 4447 }, 4448 { 4449 .compatible = "ti,am62a-dmss-bcdma-csirx", 4450 .data = &am62a_bcdma_csirx_data, 4451 }, 4452 { 4453 .compatible = "ti,j721s2-dmss-bcdma-csi", 4454 .data = &j721s2_bcdma_csi_data, 4455 }, 4456 { 4457 .compatible = "ti,j722s-dmss-bcdma-csi", 4458 .data = &j722s_bcdma_csi_data, 4459 }, 4460 { /* Sentinel */ }, 4461 }; 4462 MODULE_DEVICE_TABLE(of, udma_of_match); 4463 4464 static struct udma_soc_data am654_soc_data = { 4465 .oes = { 4466 .udma_rchan = 0x200, 4467 }, 4468 }; 4469 4470 static struct udma_soc_data j721e_soc_data = { 4471 .oes = { 4472 .udma_rchan = 0x400, 4473 }, 4474 }; 4475 4476 static struct udma_soc_data j7200_soc_data = { 4477 .oes = { 4478 .udma_rchan = 0x80, 4479 }, 4480 }; 4481 4482 static struct udma_soc_data am64_soc_data = { 4483 .oes = { 4484 .bcdma_bchan_data = 0x2200, 4485 .bcdma_bchan_ring = 0x2400, 4486 .bcdma_tchan_data = 0x2800, 4487 .bcdma_tchan_ring = 0x2a00, 4488 .bcdma_rchan_data = 0x2e00, 4489 .bcdma_rchan_ring = 0x3000, 4490 .pktdma_tchan_flow = 0x1200, 4491 .pktdma_rchan_flow = 0x1600, 4492 }, 4493 .bcdma_trigger_event_offset = 0xc400, 4494 }; 4495 4496 static const struct soc_device_attribute k3_soc_devices[] = { 4497 { .family = "AM65X", .data = &am654_soc_data }, 4498 { .family = "J721E", .data = &j721e_soc_data }, 4499 { .family = "J7200", .data = &j7200_soc_data }, 4500 { .family = "AM64X", .data = &am64_soc_data }, 4501 { .family = "J721S2", .data = &j721e_soc_data}, 4502 { .family = "AM62X", .data = &am64_soc_data }, 4503 { .family = "AM62AX", .data = &am64_soc_data }, 4504 { .family = "J784S4", .data = &j721e_soc_data }, 4505 { .family = "AM62PX", .data = &am64_soc_data }, 4506 { .family = "J722S", .data = &am64_soc_data }, 4507 { /* sentinel */ } 4508 }; 4509 4510 static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud) 4511 { 4512 u32 cap2, cap3, cap4; 4513 int i; 4514 4515 ud->mmrs[MMR_GCFG] = devm_platform_ioremap_resource_byname(pdev, mmr_names[MMR_GCFG]); 4516 if (IS_ERR(ud->mmrs[MMR_GCFG])) 4517 return PTR_ERR(ud->mmrs[MMR_GCFG]); 4518 4519 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28); 4520 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c); 4521 4522 switch (ud->match_data->type) { 4523 case DMA_TYPE_UDMA: 4524 ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3); 4525 ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2); 4526 ud->echan_cnt = UDMA_CAP2_ECHAN_CNT(cap2); 4527 ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2); 4528 break; 4529 case DMA_TYPE_BCDMA: 4530 ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2) + 4531 BCDMA_CAP3_HBCHAN_CNT(cap3) + 4532 BCDMA_CAP3_UBCHAN_CNT(cap3); 4533 ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2); 4534 ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2); 4535 ud->rflow_cnt = ud->rchan_cnt; 4536 break; 4537 case DMA_TYPE_PKTDMA: 4538 cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30); 4539 ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2); 4540 ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2); 4541 ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3); 4542 ud->tflow_cnt = PKTDMA_CAP4_TFLOW_CNT(cap4); 4543 break; 4544 default: 4545 return -EINVAL; 4546 } 4547 4548 for (i = 1; i < MMR_LAST; i++) { 4549 if (i == MMR_BCHANRT && ud->bchan_cnt == 0) 4550 continue; 4551 if (i == MMR_TCHANRT && ud->tchan_cnt == 0) 4552 continue; 4553 if (i == MMR_RCHANRT && ud->rchan_cnt == 0) 4554 continue; 4555 4556 ud->mmrs[i] = devm_platform_ioremap_resource_byname(pdev, mmr_names[i]); 4557 if (IS_ERR(ud->mmrs[i])) 4558 return PTR_ERR(ud->mmrs[i]); 4559 } 4560 4561 return 0; 4562 } 4563 4564 static void udma_mark_resource_ranges(struct udma_dev *ud, unsigned long *map, 4565 struct ti_sci_resource_desc *rm_desc, 4566 char *name) 4567 { 4568 bitmap_clear(map, rm_desc->start, rm_desc->num); 4569 bitmap_clear(map, rm_desc->start_sec, rm_desc->num_sec); 4570 dev_dbg(ud->dev, "ti_sci resource range for %s: %d:%d | %d:%d\n", name, 4571 rm_desc->start, rm_desc->num, rm_desc->start_sec, 4572 rm_desc->num_sec); 4573 } 4574 4575 static const char * const range_names[] = { 4576 [RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan", 4577 [RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan", 4578 [RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan", 4579 [RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow", 4580 [RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow", 4581 }; 4582 4583 static int udma_setup_resources(struct udma_dev *ud) 4584 { 4585 int ret, i, j; 4586 struct device *dev = ud->dev; 4587 struct ti_sci_resource *rm_res, irq_res; 4588 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 4589 u32 cap3; 4590 4591 /* Set up the throughput level start indexes */ 4592 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c); 4593 if (of_device_is_compatible(dev->of_node, 4594 "ti,am654-navss-main-udmap")) { 4595 ud->tchan_tpl.levels = 2; 4596 ud->tchan_tpl.start_idx[0] = 8; 4597 } else if (of_device_is_compatible(dev->of_node, 4598 "ti,am654-navss-mcu-udmap")) { 4599 ud->tchan_tpl.levels = 2; 4600 ud->tchan_tpl.start_idx[0] = 2; 4601 } else if (UDMA_CAP3_UCHAN_CNT(cap3)) { 4602 ud->tchan_tpl.levels = 3; 4603 ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3); 4604 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); 4605 } else if (UDMA_CAP3_HCHAN_CNT(cap3)) { 4606 ud->tchan_tpl.levels = 2; 4607 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); 4608 } else { 4609 ud->tchan_tpl.levels = 1; 4610 } 4611 4612 ud->rchan_tpl.levels = ud->tchan_tpl.levels; 4613 ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0]; 4614 ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1]; 4615 4616 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), 4617 sizeof(unsigned long), GFP_KERNEL); 4618 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans), 4619 GFP_KERNEL); 4620 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), 4621 sizeof(unsigned long), GFP_KERNEL); 4622 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans), 4623 GFP_KERNEL); 4624 ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt), 4625 sizeof(unsigned long), 4626 GFP_KERNEL); 4627 ud->rflow_gp_map_allocated = devm_kcalloc(dev, 4628 BITS_TO_LONGS(ud->rflow_cnt), 4629 sizeof(unsigned long), 4630 GFP_KERNEL); 4631 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt), 4632 sizeof(unsigned long), 4633 GFP_KERNEL); 4634 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows), 4635 GFP_KERNEL); 4636 4637 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map || 4638 !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans || 4639 !ud->rflows || !ud->rflow_in_use) 4640 return -ENOMEM; 4641 4642 /* 4643 * RX flows with the same Ids as RX channels are reserved to be used 4644 * as default flows if remote HW can't generate flow_ids. Those 4645 * RX flows can be requested only explicitly by id. 4646 */ 4647 bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt); 4648 4649 /* by default no GP rflows are assigned to Linux */ 4650 bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt); 4651 4652 /* Get resource ranges from tisci */ 4653 for (i = 0; i < RM_RANGE_LAST; i++) { 4654 if (i == RM_RANGE_BCHAN || i == RM_RANGE_TFLOW) 4655 continue; 4656 4657 tisci_rm->rm_ranges[i] = 4658 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, 4659 tisci_rm->tisci_dev_id, 4660 (char *)range_names[i]); 4661 } 4662 4663 /* tchan ranges */ 4664 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; 4665 if (IS_ERR(rm_res)) { 4666 bitmap_zero(ud->tchan_map, ud->tchan_cnt); 4667 irq_res.sets = 1; 4668 } else { 4669 bitmap_fill(ud->tchan_map, ud->tchan_cnt); 4670 for (i = 0; i < rm_res->sets; i++) 4671 udma_mark_resource_ranges(ud, ud->tchan_map, 4672 &rm_res->desc[i], "tchan"); 4673 irq_res.sets = rm_res->sets; 4674 } 4675 4676 /* rchan and matching default flow ranges */ 4677 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; 4678 if (IS_ERR(rm_res)) { 4679 bitmap_zero(ud->rchan_map, ud->rchan_cnt); 4680 irq_res.sets++; 4681 } else { 4682 bitmap_fill(ud->rchan_map, ud->rchan_cnt); 4683 for (i = 0; i < rm_res->sets; i++) 4684 udma_mark_resource_ranges(ud, ud->rchan_map, 4685 &rm_res->desc[i], "rchan"); 4686 irq_res.sets += rm_res->sets; 4687 } 4688 4689 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL); 4690 if (!irq_res.desc) 4691 return -ENOMEM; 4692 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; 4693 if (IS_ERR(rm_res)) { 4694 irq_res.desc[0].start = 0; 4695 irq_res.desc[0].num = ud->tchan_cnt; 4696 i = 1; 4697 } else { 4698 for (i = 0; i < rm_res->sets; i++) { 4699 irq_res.desc[i].start = rm_res->desc[i].start; 4700 irq_res.desc[i].num = rm_res->desc[i].num; 4701 irq_res.desc[i].start_sec = rm_res->desc[i].start_sec; 4702 irq_res.desc[i].num_sec = rm_res->desc[i].num_sec; 4703 } 4704 } 4705 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; 4706 if (IS_ERR(rm_res)) { 4707 irq_res.desc[i].start = 0; 4708 irq_res.desc[i].num = ud->rchan_cnt; 4709 } else { 4710 for (j = 0; j < rm_res->sets; j++, i++) { 4711 if (rm_res->desc[j].num) { 4712 irq_res.desc[i].start = rm_res->desc[j].start + 4713 ud->soc_data->oes.udma_rchan; 4714 irq_res.desc[i].num = rm_res->desc[j].num; 4715 } 4716 if (rm_res->desc[j].num_sec) { 4717 irq_res.desc[i].start_sec = rm_res->desc[j].start_sec + 4718 ud->soc_data->oes.udma_rchan; 4719 irq_res.desc[i].num_sec = rm_res->desc[j].num_sec; 4720 } 4721 } 4722 } 4723 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); 4724 kfree(irq_res.desc); 4725 if (ret) { 4726 dev_err(ud->dev, "Failed to allocate MSI interrupts\n"); 4727 return ret; 4728 } 4729 4730 /* GP rflow ranges */ 4731 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; 4732 if (IS_ERR(rm_res)) { 4733 /* all gp flows are assigned exclusively to Linux */ 4734 bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt, 4735 ud->rflow_cnt - ud->rchan_cnt); 4736 } else { 4737 for (i = 0; i < rm_res->sets; i++) 4738 udma_mark_resource_ranges(ud, ud->rflow_gp_map, 4739 &rm_res->desc[i], "gp-rflow"); 4740 } 4741 4742 return 0; 4743 } 4744 4745 static int bcdma_setup_resources(struct udma_dev *ud) 4746 { 4747 int ret, i, j; 4748 struct device *dev = ud->dev; 4749 struct ti_sci_resource *rm_res, irq_res; 4750 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 4751 const struct udma_oes_offsets *oes = &ud->soc_data->oes; 4752 u32 cap; 4753 4754 /* Set up the throughput level start indexes */ 4755 cap = udma_read(ud->mmrs[MMR_GCFG], 0x2c); 4756 if (BCDMA_CAP3_UBCHAN_CNT(cap)) { 4757 ud->bchan_tpl.levels = 3; 4758 ud->bchan_tpl.start_idx[1] = BCDMA_CAP3_UBCHAN_CNT(cap); 4759 ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap); 4760 } else if (BCDMA_CAP3_HBCHAN_CNT(cap)) { 4761 ud->bchan_tpl.levels = 2; 4762 ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap); 4763 } else { 4764 ud->bchan_tpl.levels = 1; 4765 } 4766 4767 cap = udma_read(ud->mmrs[MMR_GCFG], 0x30); 4768 if (BCDMA_CAP4_URCHAN_CNT(cap)) { 4769 ud->rchan_tpl.levels = 3; 4770 ud->rchan_tpl.start_idx[1] = BCDMA_CAP4_URCHAN_CNT(cap); 4771 ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap); 4772 } else if (BCDMA_CAP4_HRCHAN_CNT(cap)) { 4773 ud->rchan_tpl.levels = 2; 4774 ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap); 4775 } else { 4776 ud->rchan_tpl.levels = 1; 4777 } 4778 4779 if (BCDMA_CAP4_UTCHAN_CNT(cap)) { 4780 ud->tchan_tpl.levels = 3; 4781 ud->tchan_tpl.start_idx[1] = BCDMA_CAP4_UTCHAN_CNT(cap); 4782 ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap); 4783 } else if (BCDMA_CAP4_HTCHAN_CNT(cap)) { 4784 ud->tchan_tpl.levels = 2; 4785 ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap); 4786 } else { 4787 ud->tchan_tpl.levels = 1; 4788 } 4789 4790 ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt), 4791 sizeof(unsigned long), GFP_KERNEL); 4792 ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans), 4793 GFP_KERNEL); 4794 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), 4795 sizeof(unsigned long), GFP_KERNEL); 4796 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans), 4797 GFP_KERNEL); 4798 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), 4799 sizeof(unsigned long), GFP_KERNEL); 4800 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans), 4801 GFP_KERNEL); 4802 /* BCDMA do not really have flows, but the driver expect it */ 4803 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rchan_cnt), 4804 sizeof(unsigned long), 4805 GFP_KERNEL); 4806 ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows), 4807 GFP_KERNEL); 4808 4809 if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map || 4810 !ud->rflow_in_use || !ud->bchans || !ud->tchans || !ud->rchans || 4811 !ud->rflows) 4812 return -ENOMEM; 4813 4814 /* Get resource ranges from tisci */ 4815 for (i = 0; i < RM_RANGE_LAST; i++) { 4816 if (i == RM_RANGE_RFLOW || i == RM_RANGE_TFLOW) 4817 continue; 4818 if (i == RM_RANGE_BCHAN && ud->bchan_cnt == 0) 4819 continue; 4820 if (i == RM_RANGE_TCHAN && ud->tchan_cnt == 0) 4821 continue; 4822 if (i == RM_RANGE_RCHAN && ud->rchan_cnt == 0) 4823 continue; 4824 4825 tisci_rm->rm_ranges[i] = 4826 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, 4827 tisci_rm->tisci_dev_id, 4828 (char *)range_names[i]); 4829 } 4830 4831 irq_res.sets = 0; 4832 4833 /* bchan ranges */ 4834 if (ud->bchan_cnt) { 4835 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN]; 4836 if (IS_ERR(rm_res)) { 4837 bitmap_zero(ud->bchan_map, ud->bchan_cnt); 4838 irq_res.sets++; 4839 } else { 4840 bitmap_fill(ud->bchan_map, ud->bchan_cnt); 4841 for (i = 0; i < rm_res->sets; i++) 4842 udma_mark_resource_ranges(ud, ud->bchan_map, 4843 &rm_res->desc[i], 4844 "bchan"); 4845 irq_res.sets += rm_res->sets; 4846 } 4847 } 4848 4849 /* tchan ranges */ 4850 if (ud->tchan_cnt) { 4851 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; 4852 if (IS_ERR(rm_res)) { 4853 bitmap_zero(ud->tchan_map, ud->tchan_cnt); 4854 irq_res.sets += 2; 4855 } else { 4856 bitmap_fill(ud->tchan_map, ud->tchan_cnt); 4857 for (i = 0; i < rm_res->sets; i++) 4858 udma_mark_resource_ranges(ud, ud->tchan_map, 4859 &rm_res->desc[i], 4860 "tchan"); 4861 irq_res.sets += rm_res->sets * 2; 4862 } 4863 } 4864 4865 /* rchan ranges */ 4866 if (ud->rchan_cnt) { 4867 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; 4868 if (IS_ERR(rm_res)) { 4869 bitmap_zero(ud->rchan_map, ud->rchan_cnt); 4870 irq_res.sets += 2; 4871 } else { 4872 bitmap_fill(ud->rchan_map, ud->rchan_cnt); 4873 for (i = 0; i < rm_res->sets; i++) 4874 udma_mark_resource_ranges(ud, ud->rchan_map, 4875 &rm_res->desc[i], 4876 "rchan"); 4877 irq_res.sets += rm_res->sets * 2; 4878 } 4879 } 4880 4881 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL); 4882 if (!irq_res.desc) 4883 return -ENOMEM; 4884 if (ud->bchan_cnt) { 4885 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN]; 4886 if (IS_ERR(rm_res)) { 4887 irq_res.desc[0].start = oes->bcdma_bchan_ring; 4888 irq_res.desc[0].num = ud->bchan_cnt; 4889 i = 1; 4890 } else { 4891 for (i = 0; i < rm_res->sets; i++) { 4892 irq_res.desc[i].start = rm_res->desc[i].start + 4893 oes->bcdma_bchan_ring; 4894 irq_res.desc[i].num = rm_res->desc[i].num; 4895 4896 if (rm_res->desc[i].num_sec) { 4897 irq_res.desc[i].start_sec = rm_res->desc[i].start_sec + 4898 oes->bcdma_bchan_ring; 4899 irq_res.desc[i].num_sec = rm_res->desc[i].num_sec; 4900 } 4901 } 4902 } 4903 } else { 4904 i = 0; 4905 } 4906 4907 if (ud->tchan_cnt) { 4908 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; 4909 if (IS_ERR(rm_res)) { 4910 irq_res.desc[i].start = oes->bcdma_tchan_data; 4911 irq_res.desc[i].num = ud->tchan_cnt; 4912 irq_res.desc[i + 1].start = oes->bcdma_tchan_ring; 4913 irq_res.desc[i + 1].num = ud->tchan_cnt; 4914 i += 2; 4915 } else { 4916 for (j = 0; j < rm_res->sets; j++, i += 2) { 4917 irq_res.desc[i].start = rm_res->desc[j].start + 4918 oes->bcdma_tchan_data; 4919 irq_res.desc[i].num = rm_res->desc[j].num; 4920 4921 irq_res.desc[i + 1].start = rm_res->desc[j].start + 4922 oes->bcdma_tchan_ring; 4923 irq_res.desc[i + 1].num = rm_res->desc[j].num; 4924 4925 if (rm_res->desc[j].num_sec) { 4926 irq_res.desc[i].start_sec = rm_res->desc[j].start_sec + 4927 oes->bcdma_tchan_data; 4928 irq_res.desc[i].num_sec = rm_res->desc[j].num_sec; 4929 irq_res.desc[i + 1].start_sec = rm_res->desc[j].start_sec + 4930 oes->bcdma_tchan_ring; 4931 irq_res.desc[i + 1].num_sec = rm_res->desc[j].num_sec; 4932 } 4933 } 4934 } 4935 } 4936 if (ud->rchan_cnt) { 4937 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; 4938 if (IS_ERR(rm_res)) { 4939 irq_res.desc[i].start = oes->bcdma_rchan_data; 4940 irq_res.desc[i].num = ud->rchan_cnt; 4941 irq_res.desc[i + 1].start = oes->bcdma_rchan_ring; 4942 irq_res.desc[i + 1].num = ud->rchan_cnt; 4943 i += 2; 4944 } else { 4945 for (j = 0; j < rm_res->sets; j++, i += 2) { 4946 irq_res.desc[i].start = rm_res->desc[j].start + 4947 oes->bcdma_rchan_data; 4948 irq_res.desc[i].num = rm_res->desc[j].num; 4949 4950 irq_res.desc[i + 1].start = rm_res->desc[j].start + 4951 oes->bcdma_rchan_ring; 4952 irq_res.desc[i + 1].num = rm_res->desc[j].num; 4953 4954 if (rm_res->desc[j].num_sec) { 4955 irq_res.desc[i].start_sec = rm_res->desc[j].start_sec + 4956 oes->bcdma_rchan_data; 4957 irq_res.desc[i].num_sec = rm_res->desc[j].num_sec; 4958 irq_res.desc[i + 1].start_sec = rm_res->desc[j].start_sec + 4959 oes->bcdma_rchan_ring; 4960 irq_res.desc[i + 1].num_sec = rm_res->desc[j].num_sec; 4961 } 4962 } 4963 } 4964 } 4965 4966 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); 4967 kfree(irq_res.desc); 4968 if (ret) { 4969 dev_err(ud->dev, "Failed to allocate MSI interrupts\n"); 4970 return ret; 4971 } 4972 4973 return 0; 4974 } 4975 4976 static int pktdma_setup_resources(struct udma_dev *ud) 4977 { 4978 int ret, i, j; 4979 struct device *dev = ud->dev; 4980 struct ti_sci_resource *rm_res, irq_res; 4981 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 4982 const struct udma_oes_offsets *oes = &ud->soc_data->oes; 4983 u32 cap3; 4984 4985 /* Set up the throughput level start indexes */ 4986 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c); 4987 if (UDMA_CAP3_UCHAN_CNT(cap3)) { 4988 ud->tchan_tpl.levels = 3; 4989 ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3); 4990 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); 4991 } else if (UDMA_CAP3_HCHAN_CNT(cap3)) { 4992 ud->tchan_tpl.levels = 2; 4993 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); 4994 } else { 4995 ud->tchan_tpl.levels = 1; 4996 } 4997 4998 ud->rchan_tpl.levels = ud->tchan_tpl.levels; 4999 ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0]; 5000 ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1]; 5001 5002 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), 5003 sizeof(unsigned long), GFP_KERNEL); 5004 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans), 5005 GFP_KERNEL); 5006 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), 5007 sizeof(unsigned long), GFP_KERNEL); 5008 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans), 5009 GFP_KERNEL); 5010 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt), 5011 sizeof(unsigned long), 5012 GFP_KERNEL); 5013 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows), 5014 GFP_KERNEL); 5015 ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt), 5016 sizeof(unsigned long), GFP_KERNEL); 5017 5018 if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans || 5019 !ud->rchans || !ud->rflows || !ud->rflow_in_use) 5020 return -ENOMEM; 5021 5022 /* Get resource ranges from tisci */ 5023 for (i = 0; i < RM_RANGE_LAST; i++) { 5024 if (i == RM_RANGE_BCHAN) 5025 continue; 5026 5027 tisci_rm->rm_ranges[i] = 5028 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, 5029 tisci_rm->tisci_dev_id, 5030 (char *)range_names[i]); 5031 } 5032 5033 /* tchan ranges */ 5034 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; 5035 if (IS_ERR(rm_res)) { 5036 bitmap_zero(ud->tchan_map, ud->tchan_cnt); 5037 } else { 5038 bitmap_fill(ud->tchan_map, ud->tchan_cnt); 5039 for (i = 0; i < rm_res->sets; i++) 5040 udma_mark_resource_ranges(ud, ud->tchan_map, 5041 &rm_res->desc[i], "tchan"); 5042 } 5043 5044 /* rchan ranges */ 5045 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; 5046 if (IS_ERR(rm_res)) { 5047 bitmap_zero(ud->rchan_map, ud->rchan_cnt); 5048 } else { 5049 bitmap_fill(ud->rchan_map, ud->rchan_cnt); 5050 for (i = 0; i < rm_res->sets; i++) 5051 udma_mark_resource_ranges(ud, ud->rchan_map, 5052 &rm_res->desc[i], "rchan"); 5053 } 5054 5055 /* rflow ranges */ 5056 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; 5057 if (IS_ERR(rm_res)) { 5058 /* all rflows are assigned exclusively to Linux */ 5059 bitmap_zero(ud->rflow_in_use, ud->rflow_cnt); 5060 irq_res.sets = 1; 5061 } else { 5062 bitmap_fill(ud->rflow_in_use, ud->rflow_cnt); 5063 for (i = 0; i < rm_res->sets; i++) 5064 udma_mark_resource_ranges(ud, ud->rflow_in_use, 5065 &rm_res->desc[i], "rflow"); 5066 irq_res.sets = rm_res->sets; 5067 } 5068 5069 /* tflow ranges */ 5070 rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW]; 5071 if (IS_ERR(rm_res)) { 5072 /* all tflows are assigned exclusively to Linux */ 5073 bitmap_zero(ud->tflow_map, ud->tflow_cnt); 5074 irq_res.sets++; 5075 } else { 5076 bitmap_fill(ud->tflow_map, ud->tflow_cnt); 5077 for (i = 0; i < rm_res->sets; i++) 5078 udma_mark_resource_ranges(ud, ud->tflow_map, 5079 &rm_res->desc[i], "tflow"); 5080 irq_res.sets += rm_res->sets; 5081 } 5082 5083 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL); 5084 if (!irq_res.desc) 5085 return -ENOMEM; 5086 rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW]; 5087 if (IS_ERR(rm_res)) { 5088 irq_res.desc[0].start = oes->pktdma_tchan_flow; 5089 irq_res.desc[0].num = ud->tflow_cnt; 5090 i = 1; 5091 } else { 5092 for (i = 0; i < rm_res->sets; i++) { 5093 irq_res.desc[i].start = rm_res->desc[i].start + 5094 oes->pktdma_tchan_flow; 5095 irq_res.desc[i].num = rm_res->desc[i].num; 5096 5097 if (rm_res->desc[i].num_sec) { 5098 irq_res.desc[i].start_sec = rm_res->desc[i].start_sec + 5099 oes->pktdma_tchan_flow; 5100 irq_res.desc[i].num_sec = rm_res->desc[i].num_sec; 5101 } 5102 } 5103 } 5104 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; 5105 if (IS_ERR(rm_res)) { 5106 irq_res.desc[i].start = oes->pktdma_rchan_flow; 5107 irq_res.desc[i].num = ud->rflow_cnt; 5108 } else { 5109 for (j = 0; j < rm_res->sets; j++, i++) { 5110 irq_res.desc[i].start = rm_res->desc[j].start + 5111 oes->pktdma_rchan_flow; 5112 irq_res.desc[i].num = rm_res->desc[j].num; 5113 5114 if (rm_res->desc[j].num_sec) { 5115 irq_res.desc[i].start_sec = rm_res->desc[j].start_sec + 5116 oes->pktdma_rchan_flow; 5117 irq_res.desc[i].num_sec = rm_res->desc[j].num_sec; 5118 } 5119 } 5120 } 5121 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); 5122 kfree(irq_res.desc); 5123 if (ret) { 5124 dev_err(ud->dev, "Failed to allocate MSI interrupts\n"); 5125 return ret; 5126 } 5127 5128 return 0; 5129 } 5130 5131 static int setup_resources(struct udma_dev *ud) 5132 { 5133 struct device *dev = ud->dev; 5134 int ch_count, ret; 5135 5136 switch (ud->match_data->type) { 5137 case DMA_TYPE_UDMA: 5138 ret = udma_setup_resources(ud); 5139 break; 5140 case DMA_TYPE_BCDMA: 5141 ret = bcdma_setup_resources(ud); 5142 break; 5143 case DMA_TYPE_PKTDMA: 5144 ret = pktdma_setup_resources(ud); 5145 break; 5146 default: 5147 return -EINVAL; 5148 } 5149 5150 if (ret) 5151 return ret; 5152 5153 ch_count = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt; 5154 if (ud->bchan_cnt) 5155 ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt); 5156 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt); 5157 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt); 5158 if (!ch_count) 5159 return -ENODEV; 5160 5161 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels), 5162 GFP_KERNEL); 5163 if (!ud->channels) 5164 return -ENOMEM; 5165 5166 switch (ud->match_data->type) { 5167 case DMA_TYPE_UDMA: 5168 dev_info(dev, 5169 "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n", 5170 ch_count, 5171 ud->tchan_cnt - bitmap_weight(ud->tchan_map, 5172 ud->tchan_cnt), 5173 ud->rchan_cnt - bitmap_weight(ud->rchan_map, 5174 ud->rchan_cnt), 5175 ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map, 5176 ud->rflow_cnt)); 5177 break; 5178 case DMA_TYPE_BCDMA: 5179 dev_info(dev, 5180 "Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n", 5181 ch_count, 5182 ud->bchan_cnt - bitmap_weight(ud->bchan_map, 5183 ud->bchan_cnt), 5184 ud->tchan_cnt - bitmap_weight(ud->tchan_map, 5185 ud->tchan_cnt), 5186 ud->rchan_cnt - bitmap_weight(ud->rchan_map, 5187 ud->rchan_cnt)); 5188 break; 5189 case DMA_TYPE_PKTDMA: 5190 dev_info(dev, 5191 "Channels: %d (tchan: %u, rchan: %u)\n", 5192 ch_count, 5193 ud->tchan_cnt - bitmap_weight(ud->tchan_map, 5194 ud->tchan_cnt), 5195 ud->rchan_cnt - bitmap_weight(ud->rchan_map, 5196 ud->rchan_cnt)); 5197 break; 5198 default: 5199 break; 5200 } 5201 5202 return ch_count; 5203 } 5204 5205 static int udma_setup_rx_flush(struct udma_dev *ud) 5206 { 5207 struct udma_rx_flush *rx_flush = &ud->rx_flush; 5208 struct cppi5_desc_hdr_t *tr_desc; 5209 struct cppi5_tr_type1_t *tr_req; 5210 struct cppi5_host_desc_t *desc; 5211 struct device *dev = ud->dev; 5212 struct udma_hwdesc *hwdesc; 5213 size_t tr_size; 5214 5215 /* Allocate 1K buffer for discarded data on RX channel teardown */ 5216 rx_flush->buffer_size = SZ_1K; 5217 rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size, 5218 GFP_KERNEL); 5219 if (!rx_flush->buffer_vaddr) 5220 return -ENOMEM; 5221 5222 rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr, 5223 rx_flush->buffer_size, 5224 DMA_TO_DEVICE); 5225 if (dma_mapping_error(dev, rx_flush->buffer_paddr)) 5226 return -ENOMEM; 5227 5228 /* Set up descriptor to be used for TR mode */ 5229 hwdesc = &rx_flush->hwdescs[0]; 5230 tr_size = sizeof(struct cppi5_tr_type1_t); 5231 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1); 5232 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size, 5233 ud->desc_align); 5234 5235 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size, 5236 GFP_KERNEL); 5237 if (!hwdesc->cppi5_desc_vaddr) 5238 return -ENOMEM; 5239 5240 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr, 5241 hwdesc->cppi5_desc_size, 5242 DMA_TO_DEVICE); 5243 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr)) 5244 return -ENOMEM; 5245 5246 /* Start of the TR req records */ 5247 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size; 5248 /* Start address of the TR response array */ 5249 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size; 5250 5251 tr_desc = hwdesc->cppi5_desc_vaddr; 5252 cppi5_trdesc_init(tr_desc, 1, tr_size, 0, 0); 5253 cppi5_desc_set_pktids(tr_desc, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT); 5254 cppi5_desc_set_retpolicy(tr_desc, 0, 0); 5255 5256 tr_req = hwdesc->tr_req_base; 5257 cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false, 5258 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 5259 cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT); 5260 5261 tr_req->addr = rx_flush->buffer_paddr; 5262 tr_req->icnt0 = rx_flush->buffer_size; 5263 tr_req->icnt1 = 1; 5264 5265 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr, 5266 hwdesc->cppi5_desc_size, DMA_TO_DEVICE); 5267 5268 /* Set up descriptor to be used for packet mode */ 5269 hwdesc = &rx_flush->hwdescs[1]; 5270 hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) + 5271 CPPI5_INFO0_HDESC_EPIB_SIZE + 5272 CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE, 5273 ud->desc_align); 5274 5275 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size, 5276 GFP_KERNEL); 5277 if (!hwdesc->cppi5_desc_vaddr) 5278 return -ENOMEM; 5279 5280 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr, 5281 hwdesc->cppi5_desc_size, 5282 DMA_TO_DEVICE); 5283 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr)) 5284 return -ENOMEM; 5285 5286 desc = hwdesc->cppi5_desc_vaddr; 5287 cppi5_hdesc_init(desc, 0, 0); 5288 cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT); 5289 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0); 5290 5291 cppi5_hdesc_attach_buf(desc, 5292 rx_flush->buffer_paddr, rx_flush->buffer_size, 5293 rx_flush->buffer_paddr, rx_flush->buffer_size); 5294 5295 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr, 5296 hwdesc->cppi5_desc_size, DMA_TO_DEVICE); 5297 return 0; 5298 } 5299 5300 #ifdef CONFIG_DEBUG_FS 5301 static void udma_dbg_summary_show_chan(struct seq_file *s, 5302 struct dma_chan *chan) 5303 { 5304 struct udma_chan *uc = to_udma_chan(chan); 5305 struct udma_chan_config *ucc = &uc->config; 5306 5307 seq_printf(s, " %-13s| %s", dma_chan_name(chan), 5308 chan->dbg_client_name ?: "in-use"); 5309 if (ucc->tr_trigger_type) 5310 seq_puts(s, " (triggered, "); 5311 else 5312 seq_printf(s, " (%s, ", 5313 dmaengine_get_direction_text(uc->config.dir)); 5314 5315 switch (uc->config.dir) { 5316 case DMA_MEM_TO_MEM: 5317 if (uc->ud->match_data->type == DMA_TYPE_BCDMA) { 5318 seq_printf(s, "bchan%d)\n", uc->bchan->id); 5319 return; 5320 } 5321 5322 seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id, 5323 ucc->src_thread, ucc->dst_thread); 5324 break; 5325 case DMA_DEV_TO_MEM: 5326 seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id, 5327 ucc->src_thread, ucc->dst_thread); 5328 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA) 5329 seq_printf(s, "rflow%d, ", uc->rflow->id); 5330 break; 5331 case DMA_MEM_TO_DEV: 5332 seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id, 5333 ucc->src_thread, ucc->dst_thread); 5334 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA) 5335 seq_printf(s, "tflow%d, ", uc->tchan->tflow_id); 5336 break; 5337 default: 5338 seq_printf(s, ")\n"); 5339 return; 5340 } 5341 5342 if (ucc->ep_type == PSIL_EP_NATIVE) { 5343 seq_printf(s, "PSI-L Native"); 5344 if (ucc->metadata_size) { 5345 seq_printf(s, "[%s", ucc->needs_epib ? " EPIB" : ""); 5346 if (ucc->psd_size) 5347 seq_printf(s, " PSDsize:%u", ucc->psd_size); 5348 seq_printf(s, " ]"); 5349 } 5350 } else { 5351 seq_printf(s, "PDMA"); 5352 if (ucc->enable_acc32 || ucc->enable_burst) 5353 seq_printf(s, "[%s%s ]", 5354 ucc->enable_acc32 ? " ACC32" : "", 5355 ucc->enable_burst ? " BURST" : ""); 5356 } 5357 5358 seq_printf(s, ", %s)\n", ucc->pkt_mode ? "Packet mode" : "TR mode"); 5359 } 5360 5361 static void udma_dbg_summary_show(struct seq_file *s, 5362 struct dma_device *dma_dev) 5363 { 5364 struct dma_chan *chan; 5365 5366 list_for_each_entry(chan, &dma_dev->channels, device_node) { 5367 if (chan->client_count) 5368 udma_dbg_summary_show_chan(s, chan); 5369 } 5370 } 5371 #endif /* CONFIG_DEBUG_FS */ 5372 5373 static enum dmaengine_alignment udma_get_copy_align(struct udma_dev *ud) 5374 { 5375 const struct udma_match_data *match_data = ud->match_data; 5376 u8 tpl; 5377 5378 if (!match_data->enable_memcpy_support) 5379 return DMAENGINE_ALIGN_8_BYTES; 5380 5381 /* Get the highest TPL level the device supports for memcpy */ 5382 if (ud->bchan_cnt) 5383 tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, 0); 5384 else if (ud->tchan_cnt) 5385 tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, 0); 5386 else 5387 return DMAENGINE_ALIGN_8_BYTES; 5388 5389 switch (match_data->burst_size[tpl]) { 5390 case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES: 5391 return DMAENGINE_ALIGN_256_BYTES; 5392 case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES: 5393 return DMAENGINE_ALIGN_128_BYTES; 5394 case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES: 5395 fallthrough; 5396 default: 5397 return DMAENGINE_ALIGN_64_BYTES; 5398 } 5399 } 5400 5401 #define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ 5402 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 5403 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ 5404 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ 5405 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) 5406 5407 static int udma_probe(struct platform_device *pdev) 5408 { 5409 struct device_node *navss_node = pdev->dev.parent->of_node; 5410 const struct soc_device_attribute *soc; 5411 struct device *dev = &pdev->dev; 5412 struct udma_dev *ud; 5413 const struct of_device_id *match; 5414 int i, ret; 5415 int ch_count; 5416 5417 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48)); 5418 if (ret) 5419 dev_err(dev, "failed to set dma mask stuff\n"); 5420 5421 ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL); 5422 if (!ud) 5423 return -ENOMEM; 5424 5425 match = of_match_node(udma_of_match, dev->of_node); 5426 if (!match) { 5427 dev_err(dev, "No compatible match found\n"); 5428 return -ENODEV; 5429 } 5430 ud->match_data = match->data; 5431 5432 ud->soc_data = ud->match_data->soc_data; 5433 if (!ud->soc_data) { 5434 soc = soc_device_match(k3_soc_devices); 5435 if (!soc) { 5436 dev_err(dev, "No compatible SoC found\n"); 5437 return -ENODEV; 5438 } 5439 ud->soc_data = soc->data; 5440 } 5441 5442 ret = udma_get_mmrs(pdev, ud); 5443 if (ret) 5444 return ret; 5445 5446 ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci"); 5447 if (IS_ERR(ud->tisci_rm.tisci)) 5448 return PTR_ERR(ud->tisci_rm.tisci); 5449 5450 ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", 5451 &ud->tisci_rm.tisci_dev_id); 5452 if (ret) { 5453 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret); 5454 return ret; 5455 } 5456 pdev->id = ud->tisci_rm.tisci_dev_id; 5457 5458 ret = of_property_read_u32(navss_node, "ti,sci-dev-id", 5459 &ud->tisci_rm.tisci_navss_dev_id); 5460 if (ret) { 5461 dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret); 5462 return ret; 5463 } 5464 5465 if (ud->match_data->type == DMA_TYPE_UDMA) { 5466 ret = of_property_read_u32(dev->of_node, "ti,udma-atype", 5467 &ud->atype); 5468 if (!ret && ud->atype > 2) { 5469 dev_err(dev, "Invalid atype: %u\n", ud->atype); 5470 return -EINVAL; 5471 } 5472 } else { 5473 ret = of_property_read_u32(dev->of_node, "ti,asel", 5474 &ud->asel); 5475 if (!ret && ud->asel > 15) { 5476 dev_err(dev, "Invalid asel: %u\n", ud->asel); 5477 return -EINVAL; 5478 } 5479 } 5480 5481 ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops; 5482 ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops; 5483 5484 if (ud->match_data->type == DMA_TYPE_UDMA) { 5485 ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc"); 5486 } else { 5487 struct k3_ringacc_init_data ring_init_data; 5488 5489 ring_init_data.tisci = ud->tisci_rm.tisci; 5490 ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id; 5491 if (ud->match_data->type == DMA_TYPE_BCDMA) { 5492 ring_init_data.num_rings = ud->bchan_cnt + 5493 ud->tchan_cnt + 5494 ud->rchan_cnt; 5495 } else { 5496 ring_init_data.num_rings = ud->rflow_cnt + 5497 ud->tflow_cnt; 5498 } 5499 5500 ud->ringacc = k3_ringacc_dmarings_init(pdev, &ring_init_data); 5501 } 5502 5503 if (IS_ERR(ud->ringacc)) 5504 return PTR_ERR(ud->ringacc); 5505 5506 dev->msi.domain = of_msi_get_domain(dev, dev->of_node, 5507 DOMAIN_BUS_TI_SCI_INTA_MSI); 5508 if (!dev->msi.domain) { 5509 return -EPROBE_DEFER; 5510 } 5511 5512 dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask); 5513 /* cyclic operation is not supported via PKTDMA */ 5514 if (ud->match_data->type != DMA_TYPE_PKTDMA) { 5515 dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask); 5516 ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic; 5517 } 5518 5519 ud->ddev.device_config = udma_slave_config; 5520 ud->ddev.device_prep_slave_sg = udma_prep_slave_sg; 5521 ud->ddev.device_issue_pending = udma_issue_pending; 5522 ud->ddev.device_tx_status = udma_tx_status; 5523 ud->ddev.device_pause = udma_pause; 5524 ud->ddev.device_resume = udma_resume; 5525 ud->ddev.device_terminate_all = udma_terminate_all; 5526 ud->ddev.device_synchronize = udma_synchronize; 5527 #ifdef CONFIG_DEBUG_FS 5528 ud->ddev.dbg_summary_show = udma_dbg_summary_show; 5529 #endif 5530 5531 switch (ud->match_data->type) { 5532 case DMA_TYPE_UDMA: 5533 ud->ddev.device_alloc_chan_resources = 5534 udma_alloc_chan_resources; 5535 break; 5536 case DMA_TYPE_BCDMA: 5537 ud->ddev.device_alloc_chan_resources = 5538 bcdma_alloc_chan_resources; 5539 ud->ddev.device_router_config = bcdma_router_config; 5540 break; 5541 case DMA_TYPE_PKTDMA: 5542 ud->ddev.device_alloc_chan_resources = 5543 pktdma_alloc_chan_resources; 5544 break; 5545 default: 5546 return -EINVAL; 5547 } 5548 ud->ddev.device_free_chan_resources = udma_free_chan_resources; 5549 5550 ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS; 5551 ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS; 5552 ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 5553 ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 5554 ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT | 5555 DESC_METADATA_ENGINE; 5556 if (ud->match_data->enable_memcpy_support && 5557 !(ud->match_data->type == DMA_TYPE_BCDMA && ud->bchan_cnt == 0)) { 5558 dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask); 5559 ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy; 5560 ud->ddev.directions |= BIT(DMA_MEM_TO_MEM); 5561 } 5562 5563 ud->ddev.dev = dev; 5564 ud->dev = dev; 5565 ud->psil_base = ud->match_data->psil_base; 5566 5567 INIT_LIST_HEAD(&ud->ddev.channels); 5568 INIT_LIST_HEAD(&ud->desc_to_purge); 5569 5570 ch_count = setup_resources(ud); 5571 if (ch_count <= 0) 5572 return ch_count; 5573 5574 spin_lock_init(&ud->lock); 5575 INIT_WORK(&ud->purge_work, udma_purge_desc_work); 5576 5577 ud->desc_align = 64; 5578 if (ud->desc_align < dma_get_cache_alignment()) 5579 ud->desc_align = dma_get_cache_alignment(); 5580 5581 ret = udma_setup_rx_flush(ud); 5582 if (ret) 5583 return ret; 5584 5585 for (i = 0; i < ud->bchan_cnt; i++) { 5586 struct udma_bchan *bchan = &ud->bchans[i]; 5587 5588 bchan->id = i; 5589 bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000; 5590 } 5591 5592 for (i = 0; i < ud->tchan_cnt; i++) { 5593 struct udma_tchan *tchan = &ud->tchans[i]; 5594 5595 tchan->id = i; 5596 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000; 5597 } 5598 5599 for (i = 0; i < ud->rchan_cnt; i++) { 5600 struct udma_rchan *rchan = &ud->rchans[i]; 5601 5602 rchan->id = i; 5603 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000; 5604 } 5605 5606 for (i = 0; i < ud->rflow_cnt; i++) { 5607 struct udma_rflow *rflow = &ud->rflows[i]; 5608 5609 rflow->id = i; 5610 } 5611 5612 for (i = 0; i < ch_count; i++) { 5613 struct udma_chan *uc = &ud->channels[i]; 5614 5615 uc->ud = ud; 5616 uc->vc.desc_free = udma_desc_free; 5617 uc->id = i; 5618 uc->bchan = NULL; 5619 uc->tchan = NULL; 5620 uc->rchan = NULL; 5621 uc->config.remote_thread_id = -1; 5622 uc->config.mapped_channel_id = -1; 5623 uc->config.default_flow_id = -1; 5624 uc->config.dir = DMA_MEM_TO_MEM; 5625 uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d", 5626 dev_name(dev), i); 5627 if (!uc->name) 5628 return -ENOMEM; 5629 vchan_init(&uc->vc, &ud->ddev); 5630 /* Use custom vchan completion handling */ 5631 tasklet_setup(&uc->vc.task, udma_vchan_complete); 5632 init_completion(&uc->teardown_completed); 5633 INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion); 5634 } 5635 5636 /* Configure the copy_align to the maximum burst size the device supports */ 5637 ud->ddev.copy_align = udma_get_copy_align(ud); 5638 5639 ret = dma_async_device_register(&ud->ddev); 5640 if (ret) { 5641 dev_err(dev, "failed to register slave DMA engine: %d\n", ret); 5642 return ret; 5643 } 5644 5645 platform_set_drvdata(pdev, ud); 5646 5647 ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud); 5648 if (ret) { 5649 dev_err(dev, "failed to register of_dma controller\n"); 5650 dma_async_device_unregister(&ud->ddev); 5651 } 5652 5653 return ret; 5654 } 5655 5656 static int __maybe_unused udma_pm_suspend(struct device *dev) 5657 { 5658 struct udma_dev *ud = dev_get_drvdata(dev); 5659 struct dma_device *dma_dev = &ud->ddev; 5660 struct dma_chan *chan; 5661 struct udma_chan *uc; 5662 5663 list_for_each_entry(chan, &dma_dev->channels, device_node) { 5664 if (chan->client_count) { 5665 uc = to_udma_chan(chan); 5666 /* backup the channel configuration */ 5667 memcpy(&uc->backup_config, &uc->config, 5668 sizeof(struct udma_chan_config)); 5669 dev_dbg(dev, "Suspending channel %s\n", 5670 dma_chan_name(chan)); 5671 ud->ddev.device_free_chan_resources(chan); 5672 } 5673 } 5674 5675 return 0; 5676 } 5677 5678 static int __maybe_unused udma_pm_resume(struct device *dev) 5679 { 5680 struct udma_dev *ud = dev_get_drvdata(dev); 5681 struct dma_device *dma_dev = &ud->ddev; 5682 struct dma_chan *chan; 5683 struct udma_chan *uc; 5684 int ret; 5685 5686 list_for_each_entry(chan, &dma_dev->channels, device_node) { 5687 if (chan->client_count) { 5688 uc = to_udma_chan(chan); 5689 /* restore the channel configuration */ 5690 memcpy(&uc->config, &uc->backup_config, 5691 sizeof(struct udma_chan_config)); 5692 dev_dbg(dev, "Resuming channel %s\n", 5693 dma_chan_name(chan)); 5694 ret = ud->ddev.device_alloc_chan_resources(chan); 5695 if (ret) 5696 return ret; 5697 } 5698 } 5699 5700 return 0; 5701 } 5702 5703 static const struct dev_pm_ops udma_pm_ops = { 5704 SET_LATE_SYSTEM_SLEEP_PM_OPS(udma_pm_suspend, udma_pm_resume) 5705 }; 5706 5707 static struct platform_driver udma_driver = { 5708 .driver = { 5709 .name = "ti-udma", 5710 .of_match_table = udma_of_match, 5711 .suppress_bind_attrs = true, 5712 .pm = &udma_pm_ops, 5713 }, 5714 .probe = udma_probe, 5715 }; 5716 5717 module_platform_driver(udma_driver); 5718 MODULE_DESCRIPTION("Texas Instruments UDMA support"); 5719 MODULE_LICENSE("GPL v2"); 5720 5721 /* Private interfaces to UDMA */ 5722 #include "k3-udma-private.c" 5723