1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 5 * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. 6 */ 7 8 #include <linux/export.h> 9 #include "dp_rx.h" 10 #include "debug.h" 11 #include "hif.h" 12 13 const struct ce_attr ath11k_host_ce_config_ipq8074[] = { 14 /* CE0: host->target HTC control and raw streams */ 15 { 16 .flags = CE_ATTR_FLAGS, 17 .src_nentries = 16, 18 .src_sz_max = 2048, 19 .dest_nentries = 0, 20 .send_cb = ath11k_htc_tx_completion_handler, 21 }, 22 23 /* CE1: target->host HTT + HTC control */ 24 { 25 .flags = CE_ATTR_FLAGS, 26 .src_nentries = 0, 27 .src_sz_max = 2048, 28 .dest_nentries = 512, 29 .recv_cb = ath11k_htc_rx_completion_handler, 30 }, 31 32 /* CE2: target->host WMI */ 33 { 34 .flags = CE_ATTR_FLAGS, 35 .src_nentries = 0, 36 .src_sz_max = 2048, 37 .dest_nentries = 512, 38 .recv_cb = ath11k_htc_rx_completion_handler, 39 }, 40 41 /* CE3: host->target WMI (mac0) */ 42 { 43 .flags = CE_ATTR_FLAGS, 44 .src_nentries = 32, 45 .src_sz_max = 2048, 46 .dest_nentries = 0, 47 .send_cb = ath11k_htc_tx_completion_handler, 48 }, 49 50 /* CE4: host->target HTT */ 51 { 52 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 53 .src_nentries = 2048, 54 .src_sz_max = 256, 55 .dest_nentries = 0, 56 }, 57 58 /* CE5: target->host pktlog */ 59 { 60 .flags = CE_ATTR_FLAGS, 61 .src_nentries = 0, 62 .src_sz_max = 2048, 63 .dest_nentries = 512, 64 .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler, 65 }, 66 67 /* CE6: target autonomous hif_memcpy */ 68 { 69 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 70 .src_nentries = 0, 71 .src_sz_max = 0, 72 .dest_nentries = 0, 73 }, 74 75 /* CE7: host->target WMI (mac1) */ 76 { 77 .flags = CE_ATTR_FLAGS, 78 .src_nentries = 32, 79 .src_sz_max = 2048, 80 .dest_nentries = 0, 81 .send_cb = ath11k_htc_tx_completion_handler, 82 }, 83 84 /* CE8: target autonomous hif_memcpy */ 85 { 86 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 87 .src_nentries = 0, 88 .src_sz_max = 0, 89 .dest_nentries = 0, 90 }, 91 92 /* CE9: host->target WMI (mac2) */ 93 { 94 .flags = CE_ATTR_FLAGS, 95 .src_nentries = 32, 96 .src_sz_max = 2048, 97 .dest_nentries = 0, 98 .send_cb = ath11k_htc_tx_completion_handler, 99 }, 100 101 /* CE10: target->host HTT */ 102 { 103 .flags = CE_ATTR_FLAGS, 104 .src_nentries = 0, 105 .src_sz_max = 2048, 106 .dest_nentries = 512, 107 .recv_cb = ath11k_htc_rx_completion_handler, 108 }, 109 110 /* CE11: Not used */ 111 { 112 .flags = CE_ATTR_FLAGS, 113 .src_nentries = 0, 114 .src_sz_max = 0, 115 .dest_nentries = 0, 116 }, 117 }; 118 119 const struct ce_attr ath11k_host_ce_config_qca6390[] = { 120 /* CE0: host->target HTC control and raw streams */ 121 { 122 .flags = CE_ATTR_FLAGS, 123 .src_nentries = 16, 124 .src_sz_max = 2048, 125 .dest_nentries = 0, 126 }, 127 128 /* CE1: target->host HTT + HTC control */ 129 { 130 .flags = CE_ATTR_FLAGS, 131 .src_nentries = 0, 132 .src_sz_max = 2048, 133 .dest_nentries = 512, 134 .recv_cb = ath11k_htc_rx_completion_handler, 135 }, 136 137 /* CE2: target->host WMI */ 138 { 139 .flags = CE_ATTR_FLAGS, 140 .src_nentries = 0, 141 .src_sz_max = 2048, 142 .dest_nentries = 512, 143 .recv_cb = ath11k_htc_rx_completion_handler, 144 }, 145 146 /* CE3: host->target WMI (mac0) */ 147 { 148 .flags = CE_ATTR_FLAGS, 149 .src_nentries = 32, 150 .src_sz_max = 2048, 151 .dest_nentries = 0, 152 .send_cb = ath11k_htc_tx_completion_handler, 153 }, 154 155 /* CE4: host->target HTT */ 156 { 157 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 158 .src_nentries = 2048, 159 .src_sz_max = 256, 160 .dest_nentries = 0, 161 }, 162 163 /* CE5: target->host pktlog */ 164 { 165 .flags = CE_ATTR_FLAGS, 166 .src_nentries = 0, 167 .src_sz_max = 2048, 168 .dest_nentries = 512, 169 .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler, 170 }, 171 172 /* CE6: target autonomous hif_memcpy */ 173 { 174 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 175 .src_nentries = 0, 176 .src_sz_max = 0, 177 .dest_nentries = 0, 178 }, 179 180 /* CE7: host->target WMI (mac1) */ 181 { 182 .flags = CE_ATTR_FLAGS, 183 .src_nentries = 32, 184 .src_sz_max = 2048, 185 .dest_nentries = 0, 186 .send_cb = ath11k_htc_tx_completion_handler, 187 }, 188 189 /* CE8: target autonomous hif_memcpy */ 190 { 191 .flags = CE_ATTR_FLAGS, 192 .src_nentries = 0, 193 .src_sz_max = 0, 194 .dest_nentries = 0, 195 }, 196 197 }; 198 199 const struct ce_attr ath11k_host_ce_config_qcn9074[] = { 200 /* CE0: host->target HTC control and raw streams */ 201 { 202 .flags = CE_ATTR_FLAGS, 203 .src_nentries = 16, 204 .src_sz_max = 2048, 205 .dest_nentries = 0, 206 }, 207 208 /* CE1: target->host HTT + HTC control */ 209 { 210 .flags = CE_ATTR_FLAGS, 211 .src_nentries = 0, 212 .src_sz_max = 2048, 213 .dest_nentries = 512, 214 .recv_cb = ath11k_htc_rx_completion_handler, 215 }, 216 217 /* CE2: target->host WMI */ 218 { 219 .flags = CE_ATTR_FLAGS, 220 .src_nentries = 0, 221 .src_sz_max = 2048, 222 .dest_nentries = 32, 223 .recv_cb = ath11k_htc_rx_completion_handler, 224 }, 225 226 /* CE3: host->target WMI (mac0) */ 227 { 228 .flags = CE_ATTR_FLAGS, 229 .src_nentries = 32, 230 .src_sz_max = 2048, 231 .dest_nentries = 0, 232 .send_cb = ath11k_htc_tx_completion_handler, 233 }, 234 235 /* CE4: host->target HTT */ 236 { 237 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 238 .src_nentries = 2048, 239 .src_sz_max = 256, 240 .dest_nentries = 0, 241 }, 242 243 /* CE5: target->host pktlog */ 244 { 245 .flags = CE_ATTR_FLAGS, 246 .src_nentries = 0, 247 .src_sz_max = 2048, 248 .dest_nentries = 512, 249 .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler, 250 }, 251 }; 252 253 static bool ath11k_ce_need_shadow_fix(int ce_id) 254 { 255 /* only ce4 needs shadow workaround */ 256 if (ce_id == 4) 257 return true; 258 return false; 259 } 260 261 void ath11k_ce_stop_shadow_timers(struct ath11k_base *ab) 262 { 263 int i; 264 265 if (!ab->hw_params.supports_shadow_regs) 266 return; 267 268 for (i = 0; i < ab->hw_params.ce_count; i++) 269 if (ath11k_ce_need_shadow_fix(i)) 270 ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]); 271 } 272 273 static int ath11k_ce_rx_buf_enqueue_pipe(struct ath11k_ce_pipe *pipe, 274 struct sk_buff *skb, dma_addr_t paddr) 275 { 276 struct ath11k_base *ab = pipe->ab; 277 struct ath11k_ce_ring *ring = pipe->dest_ring; 278 struct hal_srng *srng; 279 unsigned int write_index; 280 unsigned int nentries_mask = ring->nentries_mask; 281 u32 *desc; 282 int ret; 283 284 lockdep_assert_held(&ab->ce.ce_lock); 285 286 write_index = ring->write_index; 287 288 srng = &ab->hal.srng_list[ring->hal_ring_id]; 289 290 spin_lock_bh(&srng->lock); 291 292 ath11k_hal_srng_access_begin(ab, srng); 293 294 if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) { 295 ret = -ENOSPC; 296 goto exit; 297 } 298 299 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 300 if (!desc) { 301 ret = -ENOSPC; 302 goto exit; 303 } 304 305 ath11k_hal_ce_dst_set_desc(desc, paddr); 306 307 ring->skb[write_index] = skb; 308 write_index = CE_RING_IDX_INCR(nentries_mask, write_index); 309 ring->write_index = write_index; 310 311 pipe->rx_buf_needed--; 312 313 ret = 0; 314 exit: 315 ath11k_hal_srng_access_end(ab, srng); 316 317 spin_unlock_bh(&srng->lock); 318 319 return ret; 320 } 321 322 static int ath11k_ce_rx_post_pipe(struct ath11k_ce_pipe *pipe) 323 { 324 struct ath11k_base *ab = pipe->ab; 325 struct sk_buff *skb; 326 dma_addr_t paddr; 327 int ret = 0; 328 329 if (!(pipe->dest_ring || pipe->status_ring)) 330 return 0; 331 332 spin_lock_bh(&ab->ce.ce_lock); 333 while (pipe->rx_buf_needed) { 334 skb = dev_alloc_skb(pipe->buf_sz); 335 if (!skb) { 336 ret = -ENOMEM; 337 goto exit; 338 } 339 340 WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4)); 341 342 paddr = dma_map_single(ab->dev, skb->data, 343 skb->len + skb_tailroom(skb), 344 DMA_FROM_DEVICE); 345 if (unlikely(dma_mapping_error(ab->dev, paddr))) { 346 ath11k_warn(ab, "failed to dma map ce rx buf\n"); 347 dev_kfree_skb_any(skb); 348 ret = -EIO; 349 goto exit; 350 } 351 352 ATH11K_SKB_RXCB(skb)->paddr = paddr; 353 354 ret = ath11k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr); 355 356 if (ret) { 357 ath11k_warn(ab, "failed to enqueue rx buf: %d\n", ret); 358 dma_unmap_single(ab->dev, paddr, 359 skb->len + skb_tailroom(skb), 360 DMA_FROM_DEVICE); 361 dev_kfree_skb_any(skb); 362 goto exit; 363 } 364 } 365 366 exit: 367 spin_unlock_bh(&ab->ce.ce_lock); 368 return ret; 369 } 370 371 static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe *pipe, 372 struct sk_buff **skb, int *nbytes) 373 { 374 struct ath11k_base *ab = pipe->ab; 375 struct hal_srng *srng; 376 unsigned int sw_index; 377 unsigned int nentries_mask; 378 u32 *desc; 379 int ret = 0; 380 381 spin_lock_bh(&ab->ce.ce_lock); 382 383 sw_index = pipe->dest_ring->sw_index; 384 nentries_mask = pipe->dest_ring->nentries_mask; 385 386 srng = &ab->hal.srng_list[pipe->status_ring->hal_ring_id]; 387 388 spin_lock_bh(&srng->lock); 389 390 ath11k_hal_srng_access_begin(ab, srng); 391 392 desc = ath11k_hal_srng_dst_get_next_entry(ab, srng); 393 if (!desc) { 394 ret = -EIO; 395 goto err; 396 } 397 398 *nbytes = ath11k_hal_ce_dst_status_get_length(desc); 399 400 *skb = pipe->dest_ring->skb[sw_index]; 401 pipe->dest_ring->skb[sw_index] = NULL; 402 403 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); 404 pipe->dest_ring->sw_index = sw_index; 405 406 pipe->rx_buf_needed++; 407 err: 408 ath11k_hal_srng_access_end(ab, srng); 409 410 spin_unlock_bh(&srng->lock); 411 412 spin_unlock_bh(&ab->ce.ce_lock); 413 414 return ret; 415 } 416 417 static void ath11k_ce_recv_process_cb(struct ath11k_ce_pipe *pipe) 418 { 419 struct ath11k_base *ab = pipe->ab; 420 struct sk_buff *skb; 421 struct sk_buff_head list; 422 unsigned int nbytes, max_nbytes; 423 int ret; 424 425 __skb_queue_head_init(&list); 426 while (ath11k_ce_completed_recv_next(pipe, &skb, &nbytes) == 0) { 427 max_nbytes = skb->len + skb_tailroom(skb); 428 dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr, 429 max_nbytes, DMA_FROM_DEVICE); 430 431 if (unlikely(max_nbytes < nbytes || nbytes == 0)) { 432 ath11k_warn(ab, "unexpected rx length (nbytes %d, max %d)", 433 nbytes, max_nbytes); 434 dev_kfree_skb_any(skb); 435 continue; 436 } 437 438 skb_put(skb, nbytes); 439 __skb_queue_tail(&list, skb); 440 } 441 442 while ((skb = __skb_dequeue(&list))) { 443 ath11k_dbg(ab, ATH11K_DBG_CE, "rx ce pipe %d len %d\n", 444 pipe->pipe_num, skb->len); 445 pipe->recv_cb(ab, skb); 446 } 447 448 ret = ath11k_ce_rx_post_pipe(pipe); 449 if (ret && ret != -ENOSPC) { 450 ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n", 451 pipe->pipe_num, ret); 452 mod_timer(&ab->rx_replenish_retry, 453 jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES); 454 } 455 } 456 457 static struct sk_buff *ath11k_ce_completed_send_next(struct ath11k_ce_pipe *pipe) 458 { 459 struct ath11k_base *ab = pipe->ab; 460 struct hal_srng *srng; 461 unsigned int sw_index; 462 unsigned int nentries_mask; 463 struct sk_buff *skb; 464 u32 *desc; 465 466 spin_lock_bh(&ab->ce.ce_lock); 467 468 sw_index = pipe->src_ring->sw_index; 469 nentries_mask = pipe->src_ring->nentries_mask; 470 471 srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id]; 472 473 spin_lock_bh(&srng->lock); 474 475 ath11k_hal_srng_access_begin(ab, srng); 476 477 desc = ath11k_hal_srng_src_reap_next(ab, srng); 478 if (!desc) { 479 skb = ERR_PTR(-EIO); 480 goto err_unlock; 481 } 482 483 skb = pipe->src_ring->skb[sw_index]; 484 485 pipe->src_ring->skb[sw_index] = NULL; 486 487 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); 488 pipe->src_ring->sw_index = sw_index; 489 490 err_unlock: 491 spin_unlock_bh(&srng->lock); 492 493 spin_unlock_bh(&ab->ce.ce_lock); 494 495 return skb; 496 } 497 498 static void ath11k_ce_tx_process_cb(struct ath11k_ce_pipe *pipe) 499 { 500 struct ath11k_base *ab = pipe->ab; 501 struct sk_buff *skb; 502 struct sk_buff_head list; 503 504 __skb_queue_head_init(&list); 505 while (!IS_ERR(skb = ath11k_ce_completed_send_next(pipe))) { 506 if (!skb) 507 continue; 508 509 dma_unmap_single(ab->dev, ATH11K_SKB_CB(skb)->paddr, skb->len, 510 DMA_TO_DEVICE); 511 512 if ((!pipe->send_cb) || ab->hw_params.credit_flow) { 513 dev_kfree_skb_any(skb); 514 continue; 515 } 516 517 __skb_queue_tail(&list, skb); 518 } 519 520 while ((skb = __skb_dequeue(&list))) { 521 ath11k_dbg(ab, ATH11K_DBG_CE, "tx ce pipe %d len %d\n", 522 pipe->pipe_num, skb->len); 523 pipe->send_cb(ab, skb); 524 } 525 } 526 527 static void ath11k_ce_srng_msi_ring_params_setup(struct ath11k_base *ab, u32 ce_id, 528 struct hal_srng_params *ring_params) 529 { 530 u32 msi_data_start; 531 u32 msi_data_count, msi_data_idx; 532 u32 msi_irq_start; 533 u32 addr_lo; 534 u32 addr_hi; 535 int ret; 536 537 ret = ath11k_get_user_msi_vector(ab, "CE", 538 &msi_data_count, &msi_data_start, 539 &msi_irq_start); 540 541 if (ret) 542 return; 543 544 ath11k_get_msi_address(ab, &addr_lo, &addr_hi); 545 ath11k_get_ce_msi_idx(ab, ce_id, &msi_data_idx); 546 547 ring_params->msi_addr = addr_lo; 548 ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32); 549 ring_params->msi_data = (msi_data_idx % msi_data_count) + msi_data_start; 550 ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR; 551 } 552 553 static int ath11k_ce_init_ring(struct ath11k_base *ab, 554 struct ath11k_ce_ring *ce_ring, 555 int ce_id, enum hal_ring_type type) 556 { 557 struct hal_srng_params params = {}; 558 int ret; 559 560 params.ring_base_paddr = ce_ring->base_addr_ce_space; 561 params.ring_base_vaddr = ce_ring->base_addr_owner_space; 562 params.num_entries = ce_ring->nentries; 563 564 if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags)) 565 ath11k_ce_srng_msi_ring_params_setup(ab, ce_id, ¶ms); 566 567 switch (type) { 568 case HAL_CE_SRC: 569 if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags)) 570 params.intr_batch_cntr_thres_entries = 1; 571 break; 572 case HAL_CE_DST: 573 params.max_buffer_len = ab->hw_params.host_ce_config[ce_id].src_sz_max; 574 if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) { 575 params.intr_timer_thres_us = 1024; 576 params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN; 577 params.low_threshold = ce_ring->nentries - 3; 578 } 579 break; 580 case HAL_CE_DST_STATUS: 581 if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) { 582 params.intr_batch_cntr_thres_entries = 1; 583 params.intr_timer_thres_us = 0x1000; 584 } 585 break; 586 default: 587 ath11k_warn(ab, "Invalid CE ring type %d\n", type); 588 return -EINVAL; 589 } 590 591 /* TODO: Init other params needed by HAL to init the ring */ 592 593 ret = ath11k_hal_srng_setup(ab, type, ce_id, 0, ¶ms); 594 if (ret < 0) { 595 ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n", 596 ret, ce_id); 597 return ret; 598 } 599 600 ce_ring->hal_ring_id = ret; 601 602 if (ab->hw_params.supports_shadow_regs && 603 ath11k_ce_need_shadow_fix(ce_id)) 604 ath11k_dp_shadow_init_timer(ab, &ab->ce.hp_timer[ce_id], 605 ATH11K_SHADOW_CTRL_TIMER_INTERVAL, 606 ce_ring->hal_ring_id); 607 608 return 0; 609 } 610 611 static struct ath11k_ce_ring * 612 ath11k_ce_alloc_ring(struct ath11k_base *ab, int nentries, int desc_sz) 613 { 614 struct ath11k_ce_ring *ce_ring; 615 dma_addr_t base_addr; 616 617 ce_ring = kzalloc(struct_size(ce_ring, skb, nentries), GFP_KERNEL); 618 if (ce_ring == NULL) 619 return ERR_PTR(-ENOMEM); 620 621 ce_ring->nentries = nentries; 622 ce_ring->nentries_mask = nentries - 1; 623 624 /* Legacy platforms that do not support cache 625 * coherent DMA are unsupported 626 */ 627 ce_ring->base_addr_owner_space_unaligned = 628 dma_alloc_coherent(ab->dev, 629 nentries * desc_sz + CE_DESC_RING_ALIGN, 630 &base_addr, GFP_KERNEL); 631 if (!ce_ring->base_addr_owner_space_unaligned) { 632 kfree(ce_ring); 633 return ERR_PTR(-ENOMEM); 634 } 635 636 ce_ring->base_addr_ce_space_unaligned = base_addr; 637 638 ce_ring->base_addr_owner_space = PTR_ALIGN( 639 ce_ring->base_addr_owner_space_unaligned, 640 CE_DESC_RING_ALIGN); 641 ce_ring->base_addr_ce_space = ALIGN( 642 ce_ring->base_addr_ce_space_unaligned, 643 CE_DESC_RING_ALIGN); 644 645 return ce_ring; 646 } 647 648 static int ath11k_ce_alloc_pipe(struct ath11k_base *ab, int ce_id) 649 { 650 struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id]; 651 const struct ce_attr *attr = &ab->hw_params.host_ce_config[ce_id]; 652 struct ath11k_ce_ring *ring; 653 int nentries; 654 int desc_sz; 655 656 pipe->attr_flags = attr->flags; 657 658 if (attr->src_nentries) { 659 pipe->send_cb = attr->send_cb; 660 nentries = roundup_pow_of_two(attr->src_nentries); 661 desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC); 662 ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz); 663 if (IS_ERR(ring)) 664 return PTR_ERR(ring); 665 pipe->src_ring = ring; 666 } 667 668 if (attr->dest_nentries) { 669 pipe->recv_cb = attr->recv_cb; 670 nentries = roundup_pow_of_two(attr->dest_nentries); 671 desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST); 672 ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz); 673 if (IS_ERR(ring)) 674 return PTR_ERR(ring); 675 pipe->dest_ring = ring; 676 677 desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS); 678 ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz); 679 if (IS_ERR(ring)) 680 return PTR_ERR(ring); 681 pipe->status_ring = ring; 682 } 683 684 return 0; 685 } 686 687 void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id) 688 { 689 struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id]; 690 const struct ce_attr *attr = &ab->hw_params.host_ce_config[ce_id]; 691 692 if (attr->src_nentries) 693 ath11k_ce_tx_process_cb(pipe); 694 695 if (pipe->recv_cb) 696 ath11k_ce_recv_process_cb(pipe); 697 } 698 699 void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id) 700 { 701 struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id]; 702 const struct ce_attr *attr = &ab->hw_params.host_ce_config[pipe_id]; 703 704 if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && attr->src_nentries) 705 ath11k_ce_tx_process_cb(pipe); 706 } 707 EXPORT_SYMBOL(ath11k_ce_per_engine_service); 708 709 int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id, 710 u16 transfer_id) 711 { 712 struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id]; 713 struct hal_srng *srng; 714 u32 *desc; 715 unsigned int write_index, sw_index; 716 unsigned int nentries_mask; 717 int ret = 0; 718 u8 byte_swap_data = 0; 719 int num_used; 720 721 /* Check if some entries could be regained by handling tx completion if 722 * the CE has interrupts disabled and the used entries is more than the 723 * defined usage threshold. 724 */ 725 if (pipe->attr_flags & CE_ATTR_DIS_INTR) { 726 spin_lock_bh(&ab->ce.ce_lock); 727 write_index = pipe->src_ring->write_index; 728 729 sw_index = pipe->src_ring->sw_index; 730 731 if (write_index >= sw_index) 732 num_used = write_index - sw_index; 733 else 734 num_used = pipe->src_ring->nentries - sw_index + 735 write_index; 736 737 spin_unlock_bh(&ab->ce.ce_lock); 738 739 if (num_used > ATH11K_CE_USAGE_THRESHOLD) 740 ath11k_ce_poll_send_completed(ab, pipe->pipe_num); 741 } 742 743 if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags)) 744 return -ESHUTDOWN; 745 746 spin_lock_bh(&ab->ce.ce_lock); 747 748 write_index = pipe->src_ring->write_index; 749 nentries_mask = pipe->src_ring->nentries_mask; 750 751 srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id]; 752 753 spin_lock_bh(&srng->lock); 754 755 ath11k_hal_srng_access_begin(ab, srng); 756 757 if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) { 758 ath11k_hal_srng_access_end(ab, srng); 759 ret = -ENOBUFS; 760 goto err_unlock; 761 } 762 763 desc = ath11k_hal_srng_src_get_next_reaped(ab, srng); 764 if (!desc) { 765 ath11k_hal_srng_access_end(ab, srng); 766 ret = -ENOBUFS; 767 goto err_unlock; 768 } 769 770 if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA) 771 byte_swap_data = 1; 772 773 ath11k_hal_ce_src_set_desc(desc, ATH11K_SKB_CB(skb)->paddr, 774 skb->len, transfer_id, byte_swap_data); 775 776 pipe->src_ring->skb[write_index] = skb; 777 pipe->src_ring->write_index = CE_RING_IDX_INCR(nentries_mask, 778 write_index); 779 780 ath11k_hal_srng_access_end(ab, srng); 781 782 if (ath11k_ce_need_shadow_fix(pipe_id)) 783 ath11k_dp_shadow_start_timer(ab, srng, &ab->ce.hp_timer[pipe_id]); 784 785 spin_unlock_bh(&srng->lock); 786 787 spin_unlock_bh(&ab->ce.ce_lock); 788 789 return 0; 790 791 err_unlock: 792 spin_unlock_bh(&srng->lock); 793 794 spin_unlock_bh(&ab->ce.ce_lock); 795 796 return ret; 797 } 798 799 static void ath11k_ce_rx_pipe_cleanup(struct ath11k_ce_pipe *pipe) 800 { 801 struct ath11k_base *ab = pipe->ab; 802 struct ath11k_ce_ring *ring = pipe->dest_ring; 803 struct sk_buff *skb; 804 int i; 805 806 if (!(ring && pipe->buf_sz)) 807 return; 808 809 for (i = 0; i < ring->nentries; i++) { 810 skb = ring->skb[i]; 811 if (!skb) 812 continue; 813 814 ring->skb[i] = NULL; 815 dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr, 816 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); 817 dev_kfree_skb_any(skb); 818 } 819 } 820 821 static void ath11k_ce_shadow_config(struct ath11k_base *ab) 822 { 823 int i; 824 825 for (i = 0; i < ab->hw_params.ce_count; i++) { 826 if (ab->hw_params.host_ce_config[i].src_nentries) 827 ath11k_hal_srng_update_shadow_config(ab, 828 HAL_CE_SRC, i); 829 830 if (ab->hw_params.host_ce_config[i].dest_nentries) { 831 ath11k_hal_srng_update_shadow_config(ab, 832 HAL_CE_DST, i); 833 834 ath11k_hal_srng_update_shadow_config(ab, 835 HAL_CE_DST_STATUS, i); 836 } 837 } 838 } 839 840 void ath11k_ce_get_shadow_config(struct ath11k_base *ab, 841 u32 **shadow_cfg, u32 *shadow_cfg_len) 842 { 843 if (!ab->hw_params.supports_shadow_regs) 844 return; 845 846 ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len); 847 848 /* shadow is already configured */ 849 if (*shadow_cfg_len) 850 return; 851 852 /* shadow isn't configured yet, configure now. 853 * non-CE srngs are configured firstly, then 854 * all CE srngs. 855 */ 856 ath11k_hal_srng_shadow_config(ab); 857 ath11k_ce_shadow_config(ab); 858 859 /* get the shadow configuration */ 860 ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len); 861 } 862 EXPORT_SYMBOL(ath11k_ce_get_shadow_config); 863 864 void ath11k_ce_cleanup_pipes(struct ath11k_base *ab) 865 { 866 struct ath11k_ce_pipe *pipe; 867 int pipe_num; 868 869 ath11k_ce_stop_shadow_timers(ab); 870 871 for (pipe_num = 0; pipe_num < ab->hw_params.ce_count; pipe_num++) { 872 pipe = &ab->ce.ce_pipe[pipe_num]; 873 ath11k_ce_rx_pipe_cleanup(pipe); 874 875 /* Cleanup any src CE's which have interrupts disabled */ 876 ath11k_ce_poll_send_completed(ab, pipe_num); 877 878 /* NOTE: Should we also clean up tx buffer in all pipes? */ 879 } 880 } 881 EXPORT_SYMBOL(ath11k_ce_cleanup_pipes); 882 883 void ath11k_ce_rx_post_buf(struct ath11k_base *ab) 884 { 885 struct ath11k_ce_pipe *pipe; 886 int i; 887 int ret; 888 889 for (i = 0; i < ab->hw_params.ce_count; i++) { 890 pipe = &ab->ce.ce_pipe[i]; 891 ret = ath11k_ce_rx_post_pipe(pipe); 892 if (ret) { 893 if (ret == -ENOSPC) 894 continue; 895 896 ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n", 897 i, ret); 898 mod_timer(&ab->rx_replenish_retry, 899 jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES); 900 901 return; 902 } 903 } 904 } 905 EXPORT_SYMBOL(ath11k_ce_rx_post_buf); 906 907 void ath11k_ce_rx_replenish_retry(struct timer_list *t) 908 { 909 struct ath11k_base *ab = timer_container_of(ab, t, rx_replenish_retry); 910 911 ath11k_ce_rx_post_buf(ab); 912 } 913 914 int ath11k_ce_init_pipes(struct ath11k_base *ab) 915 { 916 struct ath11k_ce_pipe *pipe; 917 int i; 918 int ret; 919 920 for (i = 0; i < ab->hw_params.ce_count; i++) { 921 pipe = &ab->ce.ce_pipe[i]; 922 923 if (pipe->src_ring) { 924 ret = ath11k_ce_init_ring(ab, pipe->src_ring, i, 925 HAL_CE_SRC); 926 if (ret) { 927 ath11k_warn(ab, "failed to init src ring: %d\n", 928 ret); 929 /* Should we clear any partial init */ 930 return ret; 931 } 932 933 pipe->src_ring->write_index = 0; 934 pipe->src_ring->sw_index = 0; 935 } 936 937 if (pipe->dest_ring) { 938 ret = ath11k_ce_init_ring(ab, pipe->dest_ring, i, 939 HAL_CE_DST); 940 if (ret) { 941 ath11k_warn(ab, "failed to init dest ring: %d\n", 942 ret); 943 /* Should we clear any partial init */ 944 return ret; 945 } 946 947 pipe->rx_buf_needed = pipe->dest_ring->nentries ? 948 pipe->dest_ring->nentries - 2 : 0; 949 950 pipe->dest_ring->write_index = 0; 951 pipe->dest_ring->sw_index = 0; 952 } 953 954 if (pipe->status_ring) { 955 ret = ath11k_ce_init_ring(ab, pipe->status_ring, i, 956 HAL_CE_DST_STATUS); 957 if (ret) { 958 ath11k_warn(ab, "failed to init dest status ing: %d\n", 959 ret); 960 /* Should we clear any partial init */ 961 return ret; 962 } 963 964 pipe->status_ring->write_index = 0; 965 pipe->status_ring->sw_index = 0; 966 } 967 } 968 969 return 0; 970 } 971 972 void ath11k_ce_free_pipes(struct ath11k_base *ab) 973 { 974 struct ath11k_ce_pipe *pipe; 975 struct ath11k_ce_ring *ce_ring; 976 int desc_sz; 977 int i; 978 979 for (i = 0; i < ab->hw_params.ce_count; i++) { 980 pipe = &ab->ce.ce_pipe[i]; 981 982 if (ath11k_ce_need_shadow_fix(i)) 983 ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]); 984 985 if (pipe->src_ring) { 986 desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC); 987 ce_ring = pipe->src_ring; 988 dma_free_coherent(ab->dev, 989 pipe->src_ring->nentries * desc_sz + 990 CE_DESC_RING_ALIGN, 991 ce_ring->base_addr_owner_space_unaligned, 992 ce_ring->base_addr_ce_space_unaligned); 993 kfree(pipe->src_ring); 994 pipe->src_ring = NULL; 995 } 996 997 if (pipe->dest_ring) { 998 desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST); 999 ce_ring = pipe->dest_ring; 1000 dma_free_coherent(ab->dev, 1001 pipe->dest_ring->nentries * desc_sz + 1002 CE_DESC_RING_ALIGN, 1003 ce_ring->base_addr_owner_space_unaligned, 1004 ce_ring->base_addr_ce_space_unaligned); 1005 kfree(pipe->dest_ring); 1006 pipe->dest_ring = NULL; 1007 } 1008 1009 if (pipe->status_ring) { 1010 desc_sz = 1011 ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS); 1012 ce_ring = pipe->status_ring; 1013 dma_free_coherent(ab->dev, 1014 pipe->status_ring->nentries * desc_sz + 1015 CE_DESC_RING_ALIGN, 1016 ce_ring->base_addr_owner_space_unaligned, 1017 ce_ring->base_addr_ce_space_unaligned); 1018 kfree(pipe->status_ring); 1019 pipe->status_ring = NULL; 1020 } 1021 } 1022 } 1023 EXPORT_SYMBOL(ath11k_ce_free_pipes); 1024 1025 int ath11k_ce_alloc_pipes(struct ath11k_base *ab) 1026 { 1027 struct ath11k_ce_pipe *pipe; 1028 int i; 1029 int ret; 1030 const struct ce_attr *attr; 1031 1032 spin_lock_init(&ab->ce.ce_lock); 1033 1034 for (i = 0; i < ab->hw_params.ce_count; i++) { 1035 attr = &ab->hw_params.host_ce_config[i]; 1036 pipe = &ab->ce.ce_pipe[i]; 1037 pipe->pipe_num = i; 1038 pipe->ab = ab; 1039 pipe->buf_sz = attr->src_sz_max; 1040 1041 ret = ath11k_ce_alloc_pipe(ab, i); 1042 if (ret) { 1043 /* Free any partial successful allocation */ 1044 ath11k_ce_free_pipes(ab); 1045 return ret; 1046 } 1047 } 1048 1049 return 0; 1050 } 1051 EXPORT_SYMBOL(ath11k_ce_alloc_pipes); 1052 1053 /* For Big Endian Host, Copy Engine byte_swap is enabled 1054 * When Copy Engine does byte_swap, need to byte swap again for the 1055 * Host to get/put buffer content in the correct byte order 1056 */ 1057 void ath11k_ce_byte_swap(void *mem, u32 len) 1058 { 1059 int i; 1060 1061 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) { 1062 if (!mem) 1063 return; 1064 1065 for (i = 0; i < (len / 4); i++) { 1066 *(u32 *)mem = swab32(*(u32 *)mem); 1067 mem += 4; 1068 } 1069 } 1070 } 1071 1072 int ath11k_ce_get_attr_flags(struct ath11k_base *ab, int ce_id) 1073 { 1074 if (ce_id >= ab->hw_params.ce_count) 1075 return -EINVAL; 1076 1077 return ab->hw_params.host_ce_config[ce_id].flags; 1078 } 1079 EXPORT_SYMBOL(ath11k_ce_get_attr_flags); 1080