1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * TI K3 NAVSS Ring Accelerator subsystem driver 4 * 5 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com 6 */ 7 8 #include <linux/dma-mapping.h> 9 #include <linux/io.h> 10 #include <linux/module.h> 11 #include <linux/of.h> 12 #include <linux/platform_device.h> 13 #include <linux/sys_soc.h> 14 #include <linux/dma/ti-cppi5.h> 15 #include <linux/soc/ti/k3-ringacc.h> 16 #include <linux/soc/ti/ti_sci_protocol.h> 17 #include <linux/soc/ti/ti_sci_inta_msi.h> 18 #include <linux/of_irq.h> 19 #include <linux/irqdomain.h> 20 21 static LIST_HEAD(k3_ringacc_list); 22 static DEFINE_MUTEX(k3_ringacc_list_lock); 23 24 #define K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK GENMASK(19, 0) 25 #define K3_DMARING_CFG_RING_SIZE_ELCNT_MASK GENMASK(15, 0) 26 27 /** 28 * struct k3_ring_rt_regs - The RA realtime Control/Status Registers region 29 * 30 * @resv_16: Reserved 31 * @db: Ring Doorbell Register 32 * @resv_4: Reserved 33 * @occ: Ring Occupancy Register 34 * @indx: Ring Current Index Register 35 * @hwocc: Ring Hardware Occupancy Register 36 * @hwindx: Ring Hardware Current Index Register 37 */ 38 struct k3_ring_rt_regs { 39 u32 resv_16[4]; 40 u32 db; 41 u32 resv_4[1]; 42 u32 occ; 43 u32 indx; 44 u32 hwocc; 45 u32 hwindx; 46 }; 47 48 #define K3_RINGACC_RT_REGS_STEP 0x1000 49 #define K3_DMARING_RT_REGS_STEP 0x2000 50 #define K3_DMARING_RT_REGS_REVERSE_OFS 0x1000 51 #define K3_RINGACC_RT_OCC_MASK GENMASK(20, 0) 52 #define K3_DMARING_RT_OCC_TDOWN_COMPLETE BIT(31) 53 #define K3_DMARING_RT_DB_ENTRY_MASK GENMASK(7, 0) 54 #define K3_DMARING_RT_DB_TDOWN_ACK BIT(31) 55 56 /** 57 * struct k3_ring_fifo_regs - The Ring Accelerator Queues Registers region 58 * 59 * @head_data: Ring Head Entry Data Registers 60 * @tail_data: Ring Tail Entry Data Registers 61 * @peek_head_data: Ring Peek Head Entry Data Regs 62 * @peek_tail_data: Ring Peek Tail Entry Data Regs 63 */ 64 struct k3_ring_fifo_regs { 65 u32 head_data[128]; 66 u32 tail_data[128]; 67 u32 peek_head_data[128]; 68 u32 peek_tail_data[128]; 69 }; 70 71 /** 72 * struct k3_ringacc_proxy_gcfg_regs - RA Proxy Global Config MMIO Region 73 * 74 * @revision: Revision Register 75 * @config: Config Register 76 */ 77 struct k3_ringacc_proxy_gcfg_regs { 78 u32 revision; 79 u32 config; 80 }; 81 82 #define K3_RINGACC_PROXY_CFG_THREADS_MASK GENMASK(15, 0) 83 84 /** 85 * struct k3_ringacc_proxy_target_regs - Proxy Datapath MMIO Region 86 * 87 * @control: Proxy Control Register 88 * @status: Proxy Status Register 89 * @resv_512: Reserved 90 * @data: Proxy Data Register 91 */ 92 struct k3_ringacc_proxy_target_regs { 93 u32 control; 94 u32 status; 95 u8 resv_512[504]; 96 u32 data[128]; 97 }; 98 99 #define K3_RINGACC_PROXY_TARGET_STEP 0x1000 100 #define K3_RINGACC_PROXY_NOT_USED (-1) 101 102 enum k3_ringacc_proxy_access_mode { 103 PROXY_ACCESS_MODE_HEAD = 0, 104 PROXY_ACCESS_MODE_TAIL = 1, 105 PROXY_ACCESS_MODE_PEEK_HEAD = 2, 106 PROXY_ACCESS_MODE_PEEK_TAIL = 3, 107 }; 108 109 #define K3_RINGACC_FIFO_WINDOW_SIZE_BYTES (512U) 110 #define K3_RINGACC_FIFO_REGS_STEP 0x1000 111 #define K3_RINGACC_MAX_DB_RING_CNT (127U) 112 113 struct k3_ring_ops { 114 int (*push_tail)(struct k3_ring *ring, void *elm); 115 int (*push_head)(struct k3_ring *ring, void *elm); 116 int (*pop_tail)(struct k3_ring *ring, void *elm); 117 int (*pop_head)(struct k3_ring *ring, void *elm); 118 }; 119 120 /** 121 * struct k3_ring_state - Internal state tracking structure 122 * 123 * @free: Number of free entries 124 * @occ: Occupancy 125 * @windex: Write index 126 * @rindex: Read index 127 * @tdown_complete: Tear down complete state 128 */ 129 struct k3_ring_state { 130 u32 free; 131 u32 occ; 132 u32 windex; 133 u32 rindex; 134 u32 tdown_complete:1; 135 }; 136 137 /** 138 * struct k3_ring - RA Ring descriptor 139 * 140 * @rt: Ring control/status registers 141 * @fifos: Ring queues registers 142 * @proxy: Ring Proxy Datapath registers 143 * @ring_mem_dma: Ring buffer dma address 144 * @ring_mem_virt: Ring buffer virt address 145 * @ops: Ring operations 146 * @size: Ring size in elements 147 * @elm_size: Size of the ring element 148 * @mode: Ring mode 149 * @flags: flags 150 * @state: Ring state 151 * @ring_id: Ring Id 152 * @parent: Pointer on struct @k3_ringacc 153 * @use_count: Use count for shared rings 154 * @proxy_id: RA Ring Proxy Id (only if @K3_RINGACC_RING_USE_PROXY) 155 * @dma_dev: device to be used for DMA API (allocation, mapping) 156 * @asel: Address Space Select value for physical addresses 157 */ 158 struct k3_ring { 159 struct k3_ring_rt_regs __iomem *rt; 160 struct k3_ring_fifo_regs __iomem *fifos; 161 struct k3_ringacc_proxy_target_regs __iomem *proxy; 162 dma_addr_t ring_mem_dma; 163 void *ring_mem_virt; 164 const struct k3_ring_ops *ops; 165 u32 size; 166 enum k3_ring_size elm_size; 167 enum k3_ring_mode mode; 168 u32 flags; 169 #define K3_RING_FLAG_BUSY BIT(1) 170 #define K3_RING_FLAG_SHARED BIT(2) 171 #define K3_RING_FLAG_REVERSE BIT(3) 172 struct k3_ring_state state; 173 u32 ring_id; 174 struct k3_ringacc *parent; 175 u32 use_count; 176 int proxy_id; 177 struct device *dma_dev; 178 u32 asel; 179 #define K3_ADDRESS_ASEL_SHIFT 48 180 }; 181 182 struct k3_ringacc_ops { 183 int (*init)(struct platform_device *pdev, struct k3_ringacc *ringacc); 184 }; 185 186 /** 187 * struct k3_ringacc - Rings accelerator descriptor 188 * 189 * @dev: pointer on RA device 190 * @proxy_gcfg: RA proxy global config registers 191 * @proxy_target_base: RA proxy datapath region 192 * @num_rings: number of ring in RA 193 * @rings_inuse: bitfield for ring usage tracking 194 * @rm_gp_range: general purpose rings range from tisci 195 * @dma_ring_reset_quirk: DMA reset workaround enable 196 * @num_proxies: number of RA proxies 197 * @proxy_inuse: bitfield for proxy usage tracking 198 * @rings: array of rings descriptors (struct @k3_ring) 199 * @list: list of RAs in the system 200 * @req_lock: protect rings allocation 201 * @tisci: pointer ti-sci handle 202 * @tisci_ring_ops: ti-sci rings ops 203 * @tisci_dev_id: ti-sci device id 204 * @ops: SoC specific ringacc operation 205 * @dma_rings: indicate DMA ring (dual ring within BCDMA/PKTDMA) 206 */ 207 struct k3_ringacc { 208 struct device *dev; 209 struct k3_ringacc_proxy_gcfg_regs __iomem *proxy_gcfg; 210 void __iomem *proxy_target_base; 211 u32 num_rings; /* number of rings in Ringacc module */ 212 unsigned long *rings_inuse; 213 struct ti_sci_resource *rm_gp_range; 214 215 bool dma_ring_reset_quirk; 216 u32 num_proxies; 217 unsigned long *proxy_inuse; 218 219 struct k3_ring *rings; 220 struct list_head list; 221 struct mutex req_lock; /* protect rings allocation */ 222 223 const struct ti_sci_handle *tisci; 224 const struct ti_sci_rm_ringacc_ops *tisci_ring_ops; 225 u32 tisci_dev_id; 226 227 const struct k3_ringacc_ops *ops; 228 bool dma_rings; 229 }; 230 231 /** 232 * struct k3_ringacc_soc_data - Rings accelerator SoC data 233 * 234 * @dma_ring_reset_quirk: DMA reset workaround enable 235 */ 236 struct k3_ringacc_soc_data { 237 unsigned dma_ring_reset_quirk:1; 238 }; 239 240 static int k3_ringacc_ring_read_occ(struct k3_ring *ring) 241 { 242 return readl(&ring->rt->occ) & K3_RINGACC_RT_OCC_MASK; 243 } 244 245 static void k3_ringacc_ring_update_occ(struct k3_ring *ring) 246 { 247 u32 val; 248 249 val = readl(&ring->rt->occ); 250 251 ring->state.occ = val & K3_RINGACC_RT_OCC_MASK; 252 ring->state.tdown_complete = !!(val & K3_DMARING_RT_OCC_TDOWN_COMPLETE); 253 } 254 255 static long k3_ringacc_ring_get_fifo_pos(struct k3_ring *ring) 256 { 257 return K3_RINGACC_FIFO_WINDOW_SIZE_BYTES - 258 (4 << ring->elm_size); 259 } 260 261 static void *k3_ringacc_get_elm_addr(struct k3_ring *ring, u32 idx) 262 { 263 return (ring->ring_mem_virt + idx * (4 << ring->elm_size)); 264 } 265 266 static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem); 267 static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem); 268 static int k3_dmaring_fwd_pop(struct k3_ring *ring, void *elem); 269 static int k3_dmaring_reverse_pop(struct k3_ring *ring, void *elem); 270 271 static const struct k3_ring_ops k3_ring_mode_ring_ops = { 272 .push_tail = k3_ringacc_ring_push_mem, 273 .pop_head = k3_ringacc_ring_pop_mem, 274 }; 275 276 static const struct k3_ring_ops k3_dmaring_fwd_ops = { 277 .push_tail = k3_ringacc_ring_push_mem, 278 .pop_head = k3_dmaring_fwd_pop, 279 }; 280 281 static const struct k3_ring_ops k3_dmaring_reverse_ops = { 282 /* Reverse side of the DMA ring can only be popped by SW */ 283 .pop_head = k3_dmaring_reverse_pop, 284 }; 285 286 static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem); 287 static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem); 288 static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem); 289 static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem); 290 291 static const struct k3_ring_ops k3_ring_mode_msg_ops = { 292 .push_tail = k3_ringacc_ring_push_io, 293 .push_head = k3_ringacc_ring_push_head_io, 294 .pop_tail = k3_ringacc_ring_pop_tail_io, 295 .pop_head = k3_ringacc_ring_pop_io, 296 }; 297 298 static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem); 299 static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem); 300 static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem); 301 static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem); 302 303 static const struct k3_ring_ops k3_ring_mode_proxy_ops = { 304 .push_tail = k3_ringacc_ring_push_tail_proxy, 305 .push_head = k3_ringacc_ring_push_head_proxy, 306 .pop_tail = k3_ringacc_ring_pop_tail_proxy, 307 .pop_head = k3_ringacc_ring_pop_head_proxy, 308 }; 309 310 static void k3_ringacc_ring_dump(struct k3_ring *ring) 311 { 312 struct device *dev = ring->parent->dev; 313 314 dev_dbg(dev, "dump ring: %d\n", ring->ring_id); 315 dev_dbg(dev, "dump mem virt %p, dma %pad\n", ring->ring_mem_virt, 316 &ring->ring_mem_dma); 317 dev_dbg(dev, "dump elmsize %d, size %d, mode %d, proxy_id %d\n", 318 ring->elm_size, ring->size, ring->mode, ring->proxy_id); 319 dev_dbg(dev, "dump flags %08X\n", ring->flags); 320 321 dev_dbg(dev, "dump ring_rt_regs: db%08x\n", readl(&ring->rt->db)); 322 dev_dbg(dev, "dump occ%08x\n", readl(&ring->rt->occ)); 323 dev_dbg(dev, "dump indx%08x\n", readl(&ring->rt->indx)); 324 dev_dbg(dev, "dump hwocc%08x\n", readl(&ring->rt->hwocc)); 325 dev_dbg(dev, "dump hwindx%08x\n", readl(&ring->rt->hwindx)); 326 327 if (ring->ring_mem_virt) 328 print_hex_dump_debug("dump ring_mem_virt ", DUMP_PREFIX_NONE, 329 16, 1, ring->ring_mem_virt, 16 * 8, false); 330 } 331 332 struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc, 333 int id, u32 flags) 334 { 335 int proxy_id = K3_RINGACC_PROXY_NOT_USED; 336 337 mutex_lock(&ringacc->req_lock); 338 339 if (!try_module_get(ringacc->dev->driver->owner)) 340 goto err_module_get; 341 342 if (id == K3_RINGACC_RING_ID_ANY) { 343 /* Request for any general purpose ring */ 344 struct ti_sci_resource_desc *gp_rings = 345 &ringacc->rm_gp_range->desc[0]; 346 unsigned long size; 347 348 size = gp_rings->start + gp_rings->num; 349 id = find_next_zero_bit(ringacc->rings_inuse, size, 350 gp_rings->start); 351 if (id == size) 352 goto error; 353 } else if (id < 0) { 354 goto error; 355 } 356 357 if (test_bit(id, ringacc->rings_inuse) && 358 !(ringacc->rings[id].flags & K3_RING_FLAG_SHARED)) 359 goto error; 360 else if (ringacc->rings[id].flags & K3_RING_FLAG_SHARED) 361 goto out; 362 363 if (flags & K3_RINGACC_RING_USE_PROXY) { 364 proxy_id = find_first_zero_bit(ringacc->proxy_inuse, 365 ringacc->num_proxies); 366 if (proxy_id == ringacc->num_proxies) 367 goto error; 368 } 369 370 if (proxy_id != K3_RINGACC_PROXY_NOT_USED) { 371 set_bit(proxy_id, ringacc->proxy_inuse); 372 ringacc->rings[id].proxy_id = proxy_id; 373 dev_dbg(ringacc->dev, "Giving ring#%d proxy#%d\n", id, 374 proxy_id); 375 } else { 376 dev_dbg(ringacc->dev, "Giving ring#%d\n", id); 377 } 378 379 set_bit(id, ringacc->rings_inuse); 380 out: 381 ringacc->rings[id].use_count++; 382 mutex_unlock(&ringacc->req_lock); 383 return &ringacc->rings[id]; 384 385 error: 386 module_put(ringacc->dev->driver->owner); 387 388 err_module_get: 389 mutex_unlock(&ringacc->req_lock); 390 return NULL; 391 } 392 EXPORT_SYMBOL_GPL(k3_ringacc_request_ring); 393 394 static int k3_dmaring_request_dual_ring(struct k3_ringacc *ringacc, int fwd_id, 395 struct k3_ring **fwd_ring, 396 struct k3_ring **compl_ring) 397 { 398 int ret = 0; 399 400 /* 401 * DMA rings must be requested by ID, completion ring is the reverse 402 * side of the forward ring 403 */ 404 if (fwd_id < 0) 405 return -EINVAL; 406 407 mutex_lock(&ringacc->req_lock); 408 409 if (!try_module_get(ringacc->dev->driver->owner)) { 410 ret = -EINVAL; 411 goto err_module_get; 412 } 413 414 if (test_bit(fwd_id, ringacc->rings_inuse)) { 415 ret = -EBUSY; 416 goto error; 417 } 418 419 *fwd_ring = &ringacc->rings[fwd_id]; 420 *compl_ring = &ringacc->rings[fwd_id + ringacc->num_rings]; 421 set_bit(fwd_id, ringacc->rings_inuse); 422 ringacc->rings[fwd_id].use_count++; 423 dev_dbg(ringacc->dev, "Giving ring#%d\n", fwd_id); 424 425 mutex_unlock(&ringacc->req_lock); 426 return 0; 427 428 error: 429 module_put(ringacc->dev->driver->owner); 430 err_module_get: 431 mutex_unlock(&ringacc->req_lock); 432 return ret; 433 } 434 435 int k3_ringacc_request_rings_pair(struct k3_ringacc *ringacc, 436 int fwd_id, int compl_id, 437 struct k3_ring **fwd_ring, 438 struct k3_ring **compl_ring) 439 { 440 int ret = 0; 441 442 if (!fwd_ring || !compl_ring) 443 return -EINVAL; 444 445 if (ringacc->dma_rings) 446 return k3_dmaring_request_dual_ring(ringacc, fwd_id, 447 fwd_ring, compl_ring); 448 449 *fwd_ring = k3_ringacc_request_ring(ringacc, fwd_id, 0); 450 if (!(*fwd_ring)) 451 return -ENODEV; 452 453 *compl_ring = k3_ringacc_request_ring(ringacc, compl_id, 0); 454 if (!(*compl_ring)) { 455 k3_ringacc_ring_free(*fwd_ring); 456 ret = -ENODEV; 457 } 458 459 return ret; 460 } 461 EXPORT_SYMBOL_GPL(k3_ringacc_request_rings_pair); 462 463 static void k3_ringacc_ring_reset_sci(struct k3_ring *ring) 464 { 465 struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 }; 466 struct k3_ringacc *ringacc = ring->parent; 467 int ret; 468 469 ring_cfg.nav_id = ringacc->tisci_dev_id; 470 ring_cfg.index = ring->ring_id; 471 ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID; 472 ring_cfg.count = ring->size; 473 474 ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg); 475 if (ret) 476 dev_err(ringacc->dev, "TISCI reset ring fail (%d) ring_idx %d\n", 477 ret, ring->ring_id); 478 } 479 480 void k3_ringacc_ring_reset(struct k3_ring *ring) 481 { 482 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) 483 return; 484 485 memset(&ring->state, 0, sizeof(ring->state)); 486 487 k3_ringacc_ring_reset_sci(ring); 488 } 489 EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset); 490 491 static void k3_ringacc_ring_reconfig_qmode_sci(struct k3_ring *ring, 492 enum k3_ring_mode mode) 493 { 494 struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 }; 495 struct k3_ringacc *ringacc = ring->parent; 496 int ret; 497 498 ring_cfg.nav_id = ringacc->tisci_dev_id; 499 ring_cfg.index = ring->ring_id; 500 ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_RING_MODE_VALID; 501 ring_cfg.mode = mode; 502 503 ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg); 504 if (ret) 505 dev_err(ringacc->dev, "TISCI reconf qmode fail (%d) ring_idx %d\n", 506 ret, ring->ring_id); 507 } 508 509 void k3_ringacc_ring_reset_dma(struct k3_ring *ring, u32 occ) 510 { 511 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) 512 return; 513 514 if (!ring->parent->dma_ring_reset_quirk) 515 goto reset; 516 517 if (!occ) 518 occ = k3_ringacc_ring_read_occ(ring); 519 520 if (occ) { 521 u32 db_ring_cnt, db_ring_cnt_cur; 522 523 dev_dbg(ring->parent->dev, "%s %u occ: %u\n", __func__, 524 ring->ring_id, occ); 525 /* TI-SCI ring reset */ 526 k3_ringacc_ring_reset_sci(ring); 527 528 /* 529 * Setup the ring in ring/doorbell mode (if not already in this 530 * mode) 531 */ 532 if (ring->mode != K3_RINGACC_RING_MODE_RING) 533 k3_ringacc_ring_reconfig_qmode_sci( 534 ring, K3_RINGACC_RING_MODE_RING); 535 /* 536 * Ring the doorbell 2**22 – ringOcc times. 537 * This will wrap the internal UDMAP ring state occupancy 538 * counter (which is 21-bits wide) to 0. 539 */ 540 db_ring_cnt = (1U << 22) - occ; 541 542 while (db_ring_cnt != 0) { 543 /* 544 * Ring the doorbell with the maximum count each 545 * iteration if possible to minimize the total 546 * of writes 547 */ 548 if (db_ring_cnt > K3_RINGACC_MAX_DB_RING_CNT) 549 db_ring_cnt_cur = K3_RINGACC_MAX_DB_RING_CNT; 550 else 551 db_ring_cnt_cur = db_ring_cnt; 552 553 writel(db_ring_cnt_cur, &ring->rt->db); 554 db_ring_cnt -= db_ring_cnt_cur; 555 } 556 557 /* Restore the original ring mode (if not ring mode) */ 558 if (ring->mode != K3_RINGACC_RING_MODE_RING) 559 k3_ringacc_ring_reconfig_qmode_sci(ring, ring->mode); 560 } 561 562 reset: 563 /* Reset the ring */ 564 k3_ringacc_ring_reset(ring); 565 } 566 EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset_dma); 567 568 static void k3_ringacc_ring_free_sci(struct k3_ring *ring) 569 { 570 struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 }; 571 struct k3_ringacc *ringacc = ring->parent; 572 int ret; 573 574 ring_cfg.nav_id = ringacc->tisci_dev_id; 575 ring_cfg.index = ring->ring_id; 576 ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER; 577 578 ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg); 579 if (ret) 580 dev_err(ringacc->dev, "TISCI ring free fail (%d) ring_idx %d\n", 581 ret, ring->ring_id); 582 } 583 584 int k3_ringacc_ring_free(struct k3_ring *ring) 585 { 586 struct k3_ringacc *ringacc; 587 588 if (!ring) 589 return -EINVAL; 590 591 ringacc = ring->parent; 592 593 /* 594 * DMA rings: rings shared memory and configuration, only forward ring 595 * is configured and reverse ring considered as slave. 596 */ 597 if (ringacc->dma_rings && (ring->flags & K3_RING_FLAG_REVERSE)) 598 return 0; 599 600 dev_dbg(ring->parent->dev, "flags: 0x%08x\n", ring->flags); 601 602 if (!test_bit(ring->ring_id, ringacc->rings_inuse)) 603 return -EINVAL; 604 605 mutex_lock(&ringacc->req_lock); 606 607 if (--ring->use_count) 608 goto out; 609 610 if (!(ring->flags & K3_RING_FLAG_BUSY)) 611 goto no_init; 612 613 k3_ringacc_ring_free_sci(ring); 614 615 dma_free_coherent(ring->dma_dev, 616 ring->size * (4 << ring->elm_size), 617 ring->ring_mem_virt, ring->ring_mem_dma); 618 ring->flags = 0; 619 ring->ops = NULL; 620 ring->dma_dev = NULL; 621 ring->asel = 0; 622 623 if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) { 624 clear_bit(ring->proxy_id, ringacc->proxy_inuse); 625 ring->proxy = NULL; 626 ring->proxy_id = K3_RINGACC_PROXY_NOT_USED; 627 } 628 629 no_init: 630 clear_bit(ring->ring_id, ringacc->rings_inuse); 631 632 module_put(ringacc->dev->driver->owner); 633 634 out: 635 mutex_unlock(&ringacc->req_lock); 636 return 0; 637 } 638 EXPORT_SYMBOL_GPL(k3_ringacc_ring_free); 639 640 u32 k3_ringacc_get_ring_id(struct k3_ring *ring) 641 { 642 if (!ring) 643 return -EINVAL; 644 645 return ring->ring_id; 646 } 647 EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_id); 648 649 u32 k3_ringacc_get_tisci_dev_id(struct k3_ring *ring) 650 { 651 if (!ring) 652 return -EINVAL; 653 654 return ring->parent->tisci_dev_id; 655 } 656 EXPORT_SYMBOL_GPL(k3_ringacc_get_tisci_dev_id); 657 658 int k3_ringacc_get_ring_irq_num(struct k3_ring *ring) 659 { 660 int irq_num; 661 662 if (!ring) 663 return -EINVAL; 664 665 irq_num = msi_get_virq(ring->parent->dev, ring->ring_id); 666 if (irq_num <= 0) 667 irq_num = -EINVAL; 668 return irq_num; 669 } 670 EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_irq_num); 671 672 static int k3_ringacc_ring_cfg_sci(struct k3_ring *ring) 673 { 674 struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 }; 675 struct k3_ringacc *ringacc = ring->parent; 676 int ret; 677 678 if (!ringacc->tisci) 679 return -EINVAL; 680 681 ring_cfg.nav_id = ringacc->tisci_dev_id; 682 ring_cfg.index = ring->ring_id; 683 ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER; 684 ring_cfg.addr_lo = lower_32_bits(ring->ring_mem_dma); 685 ring_cfg.addr_hi = upper_32_bits(ring->ring_mem_dma); 686 ring_cfg.count = ring->size; 687 ring_cfg.mode = ring->mode; 688 ring_cfg.size = ring->elm_size; 689 ring_cfg.asel = ring->asel; 690 691 ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg); 692 if (ret) 693 dev_err(ringacc->dev, "TISCI config ring fail (%d) ring_idx %d\n", 694 ret, ring->ring_id); 695 696 return ret; 697 } 698 699 static int k3_dmaring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg) 700 { 701 struct k3_ringacc *ringacc; 702 struct k3_ring *reverse_ring; 703 int ret = 0; 704 705 if (cfg->elm_size != K3_RINGACC_RING_ELSIZE_8 || 706 cfg->mode != K3_RINGACC_RING_MODE_RING || 707 cfg->size & ~K3_DMARING_CFG_RING_SIZE_ELCNT_MASK) 708 return -EINVAL; 709 710 ringacc = ring->parent; 711 712 /* 713 * DMA rings: rings shared memory and configuration, only forward ring 714 * is configured and reverse ring considered as slave. 715 */ 716 if (ringacc->dma_rings && (ring->flags & K3_RING_FLAG_REVERSE)) 717 return 0; 718 719 if (!test_bit(ring->ring_id, ringacc->rings_inuse)) 720 return -EINVAL; 721 722 ring->size = cfg->size; 723 ring->elm_size = cfg->elm_size; 724 ring->mode = cfg->mode; 725 ring->asel = cfg->asel; 726 ring->dma_dev = cfg->dma_dev; 727 if (!ring->dma_dev) { 728 dev_warn(ringacc->dev, "dma_dev is not provided for ring%d\n", 729 ring->ring_id); 730 ring->dma_dev = ringacc->dev; 731 } 732 733 memset(&ring->state, 0, sizeof(ring->state)); 734 735 ring->ops = &k3_dmaring_fwd_ops; 736 737 ring->ring_mem_virt = dma_alloc_coherent(ring->dma_dev, 738 ring->size * (4 << ring->elm_size), 739 &ring->ring_mem_dma, GFP_KERNEL); 740 if (!ring->ring_mem_virt) { 741 dev_err(ringacc->dev, "Failed to alloc ring mem\n"); 742 ret = -ENOMEM; 743 goto err_free_ops; 744 } 745 746 ret = k3_ringacc_ring_cfg_sci(ring); 747 if (ret) 748 goto err_free_mem; 749 750 ring->flags |= K3_RING_FLAG_BUSY; 751 752 k3_ringacc_ring_dump(ring); 753 754 /* DMA rings: configure reverse ring */ 755 reverse_ring = &ringacc->rings[ring->ring_id + ringacc->num_rings]; 756 reverse_ring->size = cfg->size; 757 reverse_ring->elm_size = cfg->elm_size; 758 reverse_ring->mode = cfg->mode; 759 reverse_ring->asel = cfg->asel; 760 memset(&reverse_ring->state, 0, sizeof(reverse_ring->state)); 761 reverse_ring->ops = &k3_dmaring_reverse_ops; 762 763 reverse_ring->ring_mem_virt = ring->ring_mem_virt; 764 reverse_ring->ring_mem_dma = ring->ring_mem_dma; 765 reverse_ring->flags |= K3_RING_FLAG_BUSY; 766 k3_ringacc_ring_dump(reverse_ring); 767 768 return 0; 769 770 err_free_mem: 771 dma_free_coherent(ring->dma_dev, 772 ring->size * (4 << ring->elm_size), 773 ring->ring_mem_virt, 774 ring->ring_mem_dma); 775 err_free_ops: 776 ring->ops = NULL; 777 ring->proxy = NULL; 778 ring->dma_dev = NULL; 779 ring->asel = 0; 780 return ret; 781 } 782 783 int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg) 784 { 785 struct k3_ringacc *ringacc; 786 int ret = 0; 787 788 if (!ring || !cfg) 789 return -EINVAL; 790 791 ringacc = ring->parent; 792 793 if (ringacc->dma_rings) 794 return k3_dmaring_cfg(ring, cfg); 795 796 if (cfg->elm_size > K3_RINGACC_RING_ELSIZE_256 || 797 cfg->mode >= K3_RINGACC_RING_MODE_INVALID || 798 cfg->size & ~K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK || 799 !test_bit(ring->ring_id, ringacc->rings_inuse)) 800 return -EINVAL; 801 802 if (cfg->mode == K3_RINGACC_RING_MODE_MESSAGE && 803 ring->proxy_id == K3_RINGACC_PROXY_NOT_USED && 804 cfg->elm_size > K3_RINGACC_RING_ELSIZE_8) { 805 dev_err(ringacc->dev, 806 "Message mode must use proxy for %u element size\n", 807 4 << ring->elm_size); 808 return -EINVAL; 809 } 810 811 /* 812 * In case of shared ring only the first user (master user) can 813 * configure the ring. The sequence should be by the client: 814 * ring = k3_ringacc_request_ring(ringacc, ring_id, 0); # master user 815 * k3_ringacc_ring_cfg(ring, cfg); # master configuration 816 * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED); 817 * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED); 818 */ 819 if (ring->use_count != 1) 820 return 0; 821 822 ring->size = cfg->size; 823 ring->elm_size = cfg->elm_size; 824 ring->mode = cfg->mode; 825 memset(&ring->state, 0, sizeof(ring->state)); 826 827 if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) 828 ring->proxy = ringacc->proxy_target_base + 829 ring->proxy_id * K3_RINGACC_PROXY_TARGET_STEP; 830 831 switch (ring->mode) { 832 case K3_RINGACC_RING_MODE_RING: 833 ring->ops = &k3_ring_mode_ring_ops; 834 ring->dma_dev = cfg->dma_dev; 835 if (!ring->dma_dev) 836 ring->dma_dev = ringacc->dev; 837 break; 838 case K3_RINGACC_RING_MODE_MESSAGE: 839 ring->dma_dev = ringacc->dev; 840 if (ring->proxy) 841 ring->ops = &k3_ring_mode_proxy_ops; 842 else 843 ring->ops = &k3_ring_mode_msg_ops; 844 break; 845 default: 846 ring->ops = NULL; 847 ret = -EINVAL; 848 goto err_free_proxy; 849 } 850 851 ring->ring_mem_virt = dma_alloc_coherent(ring->dma_dev, 852 ring->size * (4 << ring->elm_size), 853 &ring->ring_mem_dma, GFP_KERNEL); 854 if (!ring->ring_mem_virt) { 855 dev_err(ringacc->dev, "Failed to alloc ring mem\n"); 856 ret = -ENOMEM; 857 goto err_free_ops; 858 } 859 860 ret = k3_ringacc_ring_cfg_sci(ring); 861 862 if (ret) 863 goto err_free_mem; 864 865 ring->flags |= K3_RING_FLAG_BUSY; 866 ring->flags |= (cfg->flags & K3_RINGACC_RING_SHARED) ? 867 K3_RING_FLAG_SHARED : 0; 868 869 k3_ringacc_ring_dump(ring); 870 871 return 0; 872 873 err_free_mem: 874 dma_free_coherent(ring->dma_dev, 875 ring->size * (4 << ring->elm_size), 876 ring->ring_mem_virt, 877 ring->ring_mem_dma); 878 err_free_ops: 879 ring->ops = NULL; 880 ring->dma_dev = NULL; 881 err_free_proxy: 882 ring->proxy = NULL; 883 return ret; 884 } 885 EXPORT_SYMBOL_GPL(k3_ringacc_ring_cfg); 886 887 u32 k3_ringacc_ring_get_size(struct k3_ring *ring) 888 { 889 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) 890 return -EINVAL; 891 892 return ring->size; 893 } 894 EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_size); 895 896 u32 k3_ringacc_ring_get_free(struct k3_ring *ring) 897 { 898 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) 899 return -EINVAL; 900 901 if (!ring->state.free) 902 ring->state.free = ring->size - k3_ringacc_ring_read_occ(ring); 903 904 return ring->state.free; 905 } 906 EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_free); 907 908 u32 k3_ringacc_ring_get_occ(struct k3_ring *ring) 909 { 910 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) 911 return -EINVAL; 912 913 return k3_ringacc_ring_read_occ(ring); 914 } 915 EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_occ); 916 917 u32 k3_ringacc_ring_is_full(struct k3_ring *ring) 918 { 919 return !k3_ringacc_ring_get_free(ring); 920 } 921 EXPORT_SYMBOL_GPL(k3_ringacc_ring_is_full); 922 923 enum k3_ringacc_access_mode { 924 K3_RINGACC_ACCESS_MODE_PUSH_HEAD, 925 K3_RINGACC_ACCESS_MODE_POP_HEAD, 926 K3_RINGACC_ACCESS_MODE_PUSH_TAIL, 927 K3_RINGACC_ACCESS_MODE_POP_TAIL, 928 K3_RINGACC_ACCESS_MODE_PEEK_HEAD, 929 K3_RINGACC_ACCESS_MODE_PEEK_TAIL, 930 }; 931 932 #define K3_RINGACC_PROXY_MODE(x) (((x) & 0x3) << 16) 933 #define K3_RINGACC_PROXY_ELSIZE(x) (((x) & 0x7) << 24) 934 static int k3_ringacc_ring_cfg_proxy(struct k3_ring *ring, 935 enum k3_ringacc_proxy_access_mode mode) 936 { 937 u32 val; 938 939 val = ring->ring_id; 940 val |= K3_RINGACC_PROXY_MODE(mode); 941 val |= K3_RINGACC_PROXY_ELSIZE(ring->elm_size); 942 writel(val, &ring->proxy->control); 943 return 0; 944 } 945 946 static int k3_ringacc_ring_access_proxy(struct k3_ring *ring, void *elem, 947 enum k3_ringacc_access_mode access_mode) 948 { 949 void __iomem *ptr; 950 951 ptr = (void __iomem *)&ring->proxy->data; 952 953 switch (access_mode) { 954 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD: 955 case K3_RINGACC_ACCESS_MODE_POP_HEAD: 956 k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_HEAD); 957 break; 958 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL: 959 case K3_RINGACC_ACCESS_MODE_POP_TAIL: 960 k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_TAIL); 961 break; 962 default: 963 return -EINVAL; 964 } 965 966 ptr += k3_ringacc_ring_get_fifo_pos(ring); 967 968 switch (access_mode) { 969 case K3_RINGACC_ACCESS_MODE_POP_HEAD: 970 case K3_RINGACC_ACCESS_MODE_POP_TAIL: 971 dev_dbg(ring->parent->dev, 972 "proxy:memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr, 973 access_mode); 974 memcpy_fromio(elem, ptr, (4 << ring->elm_size)); 975 ring->state.occ--; 976 break; 977 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL: 978 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD: 979 dev_dbg(ring->parent->dev, 980 "proxy:memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr, 981 access_mode); 982 memcpy_toio(ptr, elem, (4 << ring->elm_size)); 983 ring->state.free--; 984 break; 985 default: 986 return -EINVAL; 987 } 988 989 dev_dbg(ring->parent->dev, "proxy: free%d occ%d\n", ring->state.free, 990 ring->state.occ); 991 return 0; 992 } 993 994 static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem) 995 { 996 return k3_ringacc_ring_access_proxy(ring, elem, 997 K3_RINGACC_ACCESS_MODE_PUSH_HEAD); 998 } 999 1000 static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem) 1001 { 1002 return k3_ringacc_ring_access_proxy(ring, elem, 1003 K3_RINGACC_ACCESS_MODE_PUSH_TAIL); 1004 } 1005 1006 static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem) 1007 { 1008 return k3_ringacc_ring_access_proxy(ring, elem, 1009 K3_RINGACC_ACCESS_MODE_POP_HEAD); 1010 } 1011 1012 static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem) 1013 { 1014 return k3_ringacc_ring_access_proxy(ring, elem, 1015 K3_RINGACC_ACCESS_MODE_POP_HEAD); 1016 } 1017 1018 static int k3_ringacc_ring_access_io(struct k3_ring *ring, void *elem, 1019 enum k3_ringacc_access_mode access_mode) 1020 { 1021 void __iomem *ptr; 1022 1023 switch (access_mode) { 1024 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD: 1025 case K3_RINGACC_ACCESS_MODE_POP_HEAD: 1026 ptr = (void __iomem *)&ring->fifos->head_data; 1027 break; 1028 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL: 1029 case K3_RINGACC_ACCESS_MODE_POP_TAIL: 1030 ptr = (void __iomem *)&ring->fifos->tail_data; 1031 break; 1032 default: 1033 return -EINVAL; 1034 } 1035 1036 ptr += k3_ringacc_ring_get_fifo_pos(ring); 1037 1038 switch (access_mode) { 1039 case K3_RINGACC_ACCESS_MODE_POP_HEAD: 1040 case K3_RINGACC_ACCESS_MODE_POP_TAIL: 1041 dev_dbg(ring->parent->dev, 1042 "memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr, 1043 access_mode); 1044 memcpy_fromio(elem, ptr, (4 << ring->elm_size)); 1045 ring->state.occ--; 1046 break; 1047 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL: 1048 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD: 1049 dev_dbg(ring->parent->dev, 1050 "memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr, 1051 access_mode); 1052 memcpy_toio(ptr, elem, (4 << ring->elm_size)); 1053 ring->state.free--; 1054 break; 1055 default: 1056 return -EINVAL; 1057 } 1058 1059 dev_dbg(ring->parent->dev, "free%d index%d occ%d index%d\n", 1060 ring->state.free, ring->state.windex, ring->state.occ, 1061 ring->state.rindex); 1062 return 0; 1063 } 1064 1065 static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem) 1066 { 1067 return k3_ringacc_ring_access_io(ring, elem, 1068 K3_RINGACC_ACCESS_MODE_PUSH_HEAD); 1069 } 1070 1071 static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem) 1072 { 1073 return k3_ringacc_ring_access_io(ring, elem, 1074 K3_RINGACC_ACCESS_MODE_PUSH_TAIL); 1075 } 1076 1077 static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem) 1078 { 1079 return k3_ringacc_ring_access_io(ring, elem, 1080 K3_RINGACC_ACCESS_MODE_POP_HEAD); 1081 } 1082 1083 static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem) 1084 { 1085 return k3_ringacc_ring_access_io(ring, elem, 1086 K3_RINGACC_ACCESS_MODE_POP_HEAD); 1087 } 1088 1089 /* 1090 * The element is 48 bits of address + ASEL bits in the ring. 1091 * ASEL is used by the DMAs and should be removed for the kernel as it is not 1092 * part of the physical memory address. 1093 */ 1094 static void k3_dmaring_remove_asel_from_elem(u64 *elem) 1095 { 1096 *elem &= GENMASK_ULL(K3_ADDRESS_ASEL_SHIFT - 1, 0); 1097 } 1098 1099 static int k3_dmaring_fwd_pop(struct k3_ring *ring, void *elem) 1100 { 1101 void *elem_ptr; 1102 u32 elem_idx; 1103 1104 /* 1105 * DMA rings: forward ring is always tied DMA channel and HW does not 1106 * maintain any state data required for POP operation and its unknown 1107 * how much elements were consumed by HW. So, to actually 1108 * do POP, the read pointer has to be recalculated every time. 1109 */ 1110 ring->state.occ = k3_ringacc_ring_read_occ(ring); 1111 if (ring->state.windex >= ring->state.occ) 1112 elem_idx = ring->state.windex - ring->state.occ; 1113 else 1114 elem_idx = ring->size - (ring->state.occ - ring->state.windex); 1115 1116 elem_ptr = k3_ringacc_get_elm_addr(ring, elem_idx); 1117 memcpy(elem, elem_ptr, (4 << ring->elm_size)); 1118 k3_dmaring_remove_asel_from_elem(elem); 1119 1120 ring->state.occ--; 1121 writel(-1, &ring->rt->db); 1122 1123 dev_dbg(ring->parent->dev, "%s: occ%d Windex%d Rindex%d pos_ptr%px\n", 1124 __func__, ring->state.occ, ring->state.windex, elem_idx, 1125 elem_ptr); 1126 return 0; 1127 } 1128 1129 static int k3_dmaring_reverse_pop(struct k3_ring *ring, void *elem) 1130 { 1131 void *elem_ptr; 1132 1133 elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.rindex); 1134 1135 if (ring->state.occ) { 1136 memcpy(elem, elem_ptr, (4 << ring->elm_size)); 1137 k3_dmaring_remove_asel_from_elem(elem); 1138 1139 ring->state.rindex = (ring->state.rindex + 1) % ring->size; 1140 ring->state.occ--; 1141 writel(-1 & K3_DMARING_RT_DB_ENTRY_MASK, &ring->rt->db); 1142 } else if (ring->state.tdown_complete) { 1143 dma_addr_t *value = elem; 1144 1145 *value = CPPI5_TDCM_MARKER; 1146 writel(K3_DMARING_RT_DB_TDOWN_ACK, &ring->rt->db); 1147 ring->state.tdown_complete = false; 1148 } 1149 1150 dev_dbg(ring->parent->dev, "%s: occ%d index%d pos_ptr%px\n", 1151 __func__, ring->state.occ, ring->state.rindex, elem_ptr); 1152 return 0; 1153 } 1154 1155 static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem) 1156 { 1157 void *elem_ptr; 1158 1159 elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.windex); 1160 1161 memcpy(elem_ptr, elem, (4 << ring->elm_size)); 1162 if (ring->parent->dma_rings) { 1163 u64 *addr = elem_ptr; 1164 1165 *addr |= ((u64)ring->asel << K3_ADDRESS_ASEL_SHIFT); 1166 } 1167 1168 ring->state.windex = (ring->state.windex + 1) % ring->size; 1169 ring->state.free--; 1170 writel(1, &ring->rt->db); 1171 1172 dev_dbg(ring->parent->dev, "ring_push_mem: free%d index%d\n", 1173 ring->state.free, ring->state.windex); 1174 1175 return 0; 1176 } 1177 1178 static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem) 1179 { 1180 void *elem_ptr; 1181 1182 elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.rindex); 1183 1184 memcpy(elem, elem_ptr, (4 << ring->elm_size)); 1185 1186 ring->state.rindex = (ring->state.rindex + 1) % ring->size; 1187 ring->state.occ--; 1188 writel(-1, &ring->rt->db); 1189 1190 dev_dbg(ring->parent->dev, "ring_pop_mem: occ%d index%d pos_ptr%p\n", 1191 ring->state.occ, ring->state.rindex, elem_ptr); 1192 return 0; 1193 } 1194 1195 int k3_ringacc_ring_push(struct k3_ring *ring, void *elem) 1196 { 1197 int ret = -EOPNOTSUPP; 1198 1199 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) 1200 return -EINVAL; 1201 1202 dev_dbg(ring->parent->dev, "ring_push: free%d index%d\n", 1203 ring->state.free, ring->state.windex); 1204 1205 if (k3_ringacc_ring_is_full(ring)) 1206 return -ENOMEM; 1207 1208 if (ring->ops && ring->ops->push_tail) 1209 ret = ring->ops->push_tail(ring, elem); 1210 1211 return ret; 1212 } 1213 EXPORT_SYMBOL_GPL(k3_ringacc_ring_push); 1214 1215 int k3_ringacc_ring_push_head(struct k3_ring *ring, void *elem) 1216 { 1217 int ret = -EOPNOTSUPP; 1218 1219 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) 1220 return -EINVAL; 1221 1222 dev_dbg(ring->parent->dev, "ring_push_head: free%d index%d\n", 1223 ring->state.free, ring->state.windex); 1224 1225 if (k3_ringacc_ring_is_full(ring)) 1226 return -ENOMEM; 1227 1228 if (ring->ops && ring->ops->push_head) 1229 ret = ring->ops->push_head(ring, elem); 1230 1231 return ret; 1232 } 1233 EXPORT_SYMBOL_GPL(k3_ringacc_ring_push_head); 1234 1235 int k3_ringacc_ring_pop(struct k3_ring *ring, void *elem) 1236 { 1237 int ret = -EOPNOTSUPP; 1238 1239 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) 1240 return -EINVAL; 1241 1242 if (!ring->state.occ) 1243 k3_ringacc_ring_update_occ(ring); 1244 1245 dev_dbg(ring->parent->dev, "ring_pop: occ%d index%d\n", ring->state.occ, 1246 ring->state.rindex); 1247 1248 if (!ring->state.occ && !ring->state.tdown_complete) 1249 return -ENODATA; 1250 1251 if (ring->ops && ring->ops->pop_head) 1252 ret = ring->ops->pop_head(ring, elem); 1253 1254 return ret; 1255 } 1256 EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop); 1257 1258 int k3_ringacc_ring_pop_tail(struct k3_ring *ring, void *elem) 1259 { 1260 int ret = -EOPNOTSUPP; 1261 1262 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) 1263 return -EINVAL; 1264 1265 if (!ring->state.occ) 1266 k3_ringacc_ring_update_occ(ring); 1267 1268 dev_dbg(ring->parent->dev, "ring_pop_tail: occ%d index%d\n", 1269 ring->state.occ, ring->state.rindex); 1270 1271 if (!ring->state.occ) 1272 return -ENODATA; 1273 1274 if (ring->ops && ring->ops->pop_tail) 1275 ret = ring->ops->pop_tail(ring, elem); 1276 1277 return ret; 1278 } 1279 EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop_tail); 1280 1281 struct k3_ringacc *of_k3_ringacc_get_by_phandle(struct device_node *np, 1282 const char *property) 1283 { 1284 struct device_node *ringacc_np; 1285 struct k3_ringacc *ringacc = ERR_PTR(-EPROBE_DEFER); 1286 struct k3_ringacc *entry; 1287 1288 ringacc_np = of_parse_phandle(np, property, 0); 1289 if (!ringacc_np) 1290 return ERR_PTR(-ENODEV); 1291 1292 mutex_lock(&k3_ringacc_list_lock); 1293 list_for_each_entry(entry, &k3_ringacc_list, list) 1294 if (entry->dev->of_node == ringacc_np) { 1295 ringacc = entry; 1296 break; 1297 } 1298 mutex_unlock(&k3_ringacc_list_lock); 1299 of_node_put(ringacc_np); 1300 1301 return ringacc; 1302 } 1303 EXPORT_SYMBOL_GPL(of_k3_ringacc_get_by_phandle); 1304 1305 static int k3_ringacc_probe_dt(struct k3_ringacc *ringacc) 1306 { 1307 struct device_node *node = ringacc->dev->of_node; 1308 struct device *dev = ringacc->dev; 1309 struct platform_device *pdev = to_platform_device(dev); 1310 int ret; 1311 1312 if (!node) { 1313 dev_err(dev, "device tree info unavailable\n"); 1314 return -ENODEV; 1315 } 1316 1317 ret = of_property_read_u32(node, "ti,num-rings", &ringacc->num_rings); 1318 if (ret) { 1319 dev_err(dev, "ti,num-rings read failure %d\n", ret); 1320 return ret; 1321 } 1322 1323 ringacc->tisci = ti_sci_get_by_phandle(node, "ti,sci"); 1324 if (IS_ERR(ringacc->tisci)) { 1325 ret = PTR_ERR(ringacc->tisci); 1326 if (ret != -EPROBE_DEFER) 1327 dev_err(dev, "ti,sci read fail %d\n", ret); 1328 ringacc->tisci = NULL; 1329 return ret; 1330 } 1331 1332 ret = of_property_read_u32(node, "ti,sci-dev-id", 1333 &ringacc->tisci_dev_id); 1334 if (ret) { 1335 dev_err(dev, "ti,sci-dev-id read fail %d\n", ret); 1336 return ret; 1337 } 1338 1339 pdev->id = ringacc->tisci_dev_id; 1340 1341 ringacc->rm_gp_range = devm_ti_sci_get_of_resource(ringacc->tisci, dev, 1342 ringacc->tisci_dev_id, 1343 "ti,sci-rm-range-gp-rings"); 1344 if (IS_ERR(ringacc->rm_gp_range)) { 1345 dev_err(dev, "Failed to allocate MSI interrupts\n"); 1346 return PTR_ERR(ringacc->rm_gp_range); 1347 } 1348 1349 return ti_sci_inta_msi_domain_alloc_irqs(ringacc->dev, 1350 ringacc->rm_gp_range); 1351 } 1352 1353 static const struct k3_ringacc_soc_data k3_ringacc_soc_data_sr1 = { 1354 .dma_ring_reset_quirk = 1, 1355 }; 1356 1357 static const struct soc_device_attribute k3_ringacc_socinfo[] = { 1358 { .family = "AM65X", 1359 .revision = "SR1.0", 1360 .data = &k3_ringacc_soc_data_sr1 1361 }, 1362 {/* sentinel */} 1363 }; 1364 1365 static int k3_ringacc_init(struct platform_device *pdev, 1366 struct k3_ringacc *ringacc) 1367 { 1368 const struct soc_device_attribute *soc; 1369 void __iomem *base_fifo, *base_rt; 1370 struct device *dev = &pdev->dev; 1371 int ret, i; 1372 1373 dev->msi.domain = of_msi_get_domain(dev, dev->of_node, 1374 DOMAIN_BUS_TI_SCI_INTA_MSI); 1375 if (!dev->msi.domain) 1376 return -EPROBE_DEFER; 1377 1378 ret = k3_ringacc_probe_dt(ringacc); 1379 if (ret) 1380 return ret; 1381 1382 soc = soc_device_match(k3_ringacc_socinfo); 1383 if (soc && soc->data) { 1384 const struct k3_ringacc_soc_data *soc_data = soc->data; 1385 1386 ringacc->dma_ring_reset_quirk = soc_data->dma_ring_reset_quirk; 1387 } 1388 1389 base_rt = devm_platform_ioremap_resource_byname(pdev, "rt"); 1390 if (IS_ERR(base_rt)) 1391 return PTR_ERR(base_rt); 1392 1393 base_fifo = devm_platform_ioremap_resource_byname(pdev, "fifos"); 1394 if (IS_ERR(base_fifo)) 1395 return PTR_ERR(base_fifo); 1396 1397 ringacc->proxy_gcfg = devm_platform_ioremap_resource_byname(pdev, "proxy_gcfg"); 1398 if (IS_ERR(ringacc->proxy_gcfg)) 1399 return PTR_ERR(ringacc->proxy_gcfg); 1400 1401 ringacc->proxy_target_base = devm_platform_ioremap_resource_byname(pdev, 1402 "proxy_target"); 1403 if (IS_ERR(ringacc->proxy_target_base)) 1404 return PTR_ERR(ringacc->proxy_target_base); 1405 1406 ringacc->num_proxies = readl(&ringacc->proxy_gcfg->config) & 1407 K3_RINGACC_PROXY_CFG_THREADS_MASK; 1408 1409 ringacc->rings = devm_kzalloc(dev, 1410 sizeof(*ringacc->rings) * 1411 ringacc->num_rings, 1412 GFP_KERNEL); 1413 ringacc->rings_inuse = devm_bitmap_zalloc(dev, ringacc->num_rings, 1414 GFP_KERNEL); 1415 ringacc->proxy_inuse = devm_bitmap_zalloc(dev, ringacc->num_proxies, 1416 GFP_KERNEL); 1417 1418 if (!ringacc->rings || !ringacc->rings_inuse || !ringacc->proxy_inuse) 1419 return -ENOMEM; 1420 1421 for (i = 0; i < ringacc->num_rings; i++) { 1422 ringacc->rings[i].rt = base_rt + 1423 K3_RINGACC_RT_REGS_STEP * i; 1424 ringacc->rings[i].fifos = base_fifo + 1425 K3_RINGACC_FIFO_REGS_STEP * i; 1426 ringacc->rings[i].parent = ringacc; 1427 ringacc->rings[i].ring_id = i; 1428 ringacc->rings[i].proxy_id = K3_RINGACC_PROXY_NOT_USED; 1429 } 1430 1431 ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops; 1432 1433 dev_info(dev, "Ring Accelerator probed rings:%u, gp-rings[%u,%u] sci-dev-id:%u\n", 1434 ringacc->num_rings, 1435 ringacc->rm_gp_range->desc[0].start, 1436 ringacc->rm_gp_range->desc[0].num, 1437 ringacc->tisci_dev_id); 1438 dev_info(dev, "dma-ring-reset-quirk: %s\n", 1439 ringacc->dma_ring_reset_quirk ? "enabled" : "disabled"); 1440 dev_info(dev, "RA Proxy rev. %08x, num_proxies:%u\n", 1441 readl(&ringacc->proxy_gcfg->revision), ringacc->num_proxies); 1442 1443 return 0; 1444 } 1445 1446 struct ringacc_match_data { 1447 struct k3_ringacc_ops ops; 1448 }; 1449 1450 static struct ringacc_match_data k3_ringacc_data = { 1451 .ops = { 1452 .init = k3_ringacc_init, 1453 }, 1454 }; 1455 1456 /* Match table for of_platform binding */ 1457 static const struct of_device_id k3_ringacc_of_match[] = { 1458 { .compatible = "ti,am654-navss-ringacc", .data = &k3_ringacc_data, }, 1459 {}, 1460 }; 1461 MODULE_DEVICE_TABLE(of, k3_ringacc_of_match); 1462 1463 struct k3_ringacc *k3_ringacc_dmarings_init(struct platform_device *pdev, 1464 struct k3_ringacc_init_data *data) 1465 { 1466 struct device *dev = &pdev->dev; 1467 struct k3_ringacc *ringacc; 1468 void __iomem *base_rt; 1469 int i; 1470 1471 ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL); 1472 if (!ringacc) 1473 return ERR_PTR(-ENOMEM); 1474 1475 ringacc->dev = dev; 1476 ringacc->dma_rings = true; 1477 ringacc->num_rings = data->num_rings; 1478 ringacc->tisci = data->tisci; 1479 ringacc->tisci_dev_id = data->tisci_dev_id; 1480 1481 mutex_init(&ringacc->req_lock); 1482 1483 base_rt = devm_platform_ioremap_resource_byname(pdev, "ringrt"); 1484 if (IS_ERR(base_rt)) 1485 return ERR_CAST(base_rt); 1486 1487 ringacc->rings = devm_kzalloc(dev, 1488 sizeof(*ringacc->rings) * 1489 ringacc->num_rings * 2, 1490 GFP_KERNEL); 1491 ringacc->rings_inuse = devm_bitmap_zalloc(dev, ringacc->num_rings, 1492 GFP_KERNEL); 1493 1494 if (!ringacc->rings || !ringacc->rings_inuse) 1495 return ERR_PTR(-ENOMEM); 1496 1497 for (i = 0; i < ringacc->num_rings; i++) { 1498 struct k3_ring *ring = &ringacc->rings[i]; 1499 1500 ring->rt = base_rt + K3_DMARING_RT_REGS_STEP * i; 1501 ring->parent = ringacc; 1502 ring->ring_id = i; 1503 ring->proxy_id = K3_RINGACC_PROXY_NOT_USED; 1504 1505 ring = &ringacc->rings[ringacc->num_rings + i]; 1506 ring->rt = base_rt + K3_DMARING_RT_REGS_STEP * i + 1507 K3_DMARING_RT_REGS_REVERSE_OFS; 1508 ring->parent = ringacc; 1509 ring->ring_id = i; 1510 ring->proxy_id = K3_RINGACC_PROXY_NOT_USED; 1511 ring->flags = K3_RING_FLAG_REVERSE; 1512 } 1513 1514 ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops; 1515 1516 dev_info(dev, "Number of rings: %u\n", ringacc->num_rings); 1517 1518 return ringacc; 1519 } 1520 EXPORT_SYMBOL_GPL(k3_ringacc_dmarings_init); 1521 1522 static int k3_ringacc_probe(struct platform_device *pdev) 1523 { 1524 const struct ringacc_match_data *match_data; 1525 struct device *dev = &pdev->dev; 1526 struct k3_ringacc *ringacc; 1527 int ret; 1528 1529 match_data = of_device_get_match_data(&pdev->dev); 1530 if (!match_data) 1531 return -ENODEV; 1532 1533 ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL); 1534 if (!ringacc) 1535 return -ENOMEM; 1536 1537 ringacc->dev = dev; 1538 mutex_init(&ringacc->req_lock); 1539 ringacc->ops = &match_data->ops; 1540 1541 ret = ringacc->ops->init(pdev, ringacc); 1542 if (ret) 1543 return ret; 1544 1545 dev_set_drvdata(dev, ringacc); 1546 1547 mutex_lock(&k3_ringacc_list_lock); 1548 list_add_tail(&ringacc->list, &k3_ringacc_list); 1549 mutex_unlock(&k3_ringacc_list_lock); 1550 1551 return 0; 1552 } 1553 1554 static void k3_ringacc_remove(struct platform_device *pdev) 1555 { 1556 struct k3_ringacc *ringacc = dev_get_drvdata(&pdev->dev); 1557 1558 mutex_lock(&k3_ringacc_list_lock); 1559 list_del(&ringacc->list); 1560 mutex_unlock(&k3_ringacc_list_lock); 1561 } 1562 1563 static struct platform_driver k3_ringacc_driver = { 1564 .probe = k3_ringacc_probe, 1565 .remove_new = k3_ringacc_remove, 1566 .driver = { 1567 .name = "k3-ringacc", 1568 .of_match_table = k3_ringacc_of_match, 1569 .suppress_bind_attrs = true, 1570 }, 1571 }; 1572 module_platform_driver(k3_ringacc_driver); 1573 1574 MODULE_LICENSE("GPL"); 1575 MODULE_DESCRIPTION("TI Ringacc driver for K3 SOCs"); 1576 MODULE_AUTHOR("Grygorii Strashko <grygorii.strashko@ti.com>"); 1577