1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 */ 5 6 #include "core.h" 7 #include "dp_tx.h" 8 #include "hal_tx.h" 9 #include "debug.h" 10 #include "dp_rx.h" 11 #include "peer.h" 12 13 static void ath11k_dp_htt_htc_tx_complete(struct ath11k_base *ab, 14 struct sk_buff *skb) 15 { 16 dev_kfree_skb_any(skb); 17 } 18 19 void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr) 20 { 21 struct ath11k_base *ab = ar->ab; 22 struct ath11k_peer *peer; 23 24 /* TODO: Any other peer specific DP cleanup */ 25 26 spin_lock_bh(&ab->base_lock); 27 peer = ath11k_peer_find(ab, vdev_id, addr); 28 if (!peer) { 29 ath11k_warn(ab, "failed to lookup peer %pM on vdev %d\n", 30 addr, vdev_id); 31 spin_unlock_bh(&ab->base_lock); 32 return; 33 } 34 35 ath11k_peer_rx_tid_cleanup(ar, peer); 36 spin_unlock_bh(&ab->base_lock); 37 } 38 39 int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr) 40 { 41 struct ath11k_base *ab = ar->ab; 42 struct ath11k_peer *peer; 43 u32 reo_dest; 44 int ret = 0, tid; 45 46 /* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */ 47 reo_dest = ar->dp.mac_id + 1; 48 ret = ath11k_wmi_set_peer_param(ar, addr, vdev_id, 49 WMI_PEER_SET_DEFAULT_ROUTING, 50 DP_RX_HASH_ENABLE | (reo_dest << 1)); 51 52 if (ret) { 53 ath11k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n", 54 ret, addr, vdev_id); 55 return ret; 56 } 57 58 for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) { 59 ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, 60 tid, 1, 0); 61 if (ret) { 62 ath11k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n", 63 tid, ret); 64 goto peer_clean; 65 } 66 } 67 68 /* TODO: Setup other peer specific resource used in data path */ 69 70 return 0; 71 72 peer_clean: 73 spin_lock_bh(&ab->base_lock); 74 75 peer = ath11k_peer_find(ab, vdev_id, addr); 76 if (!peer) { 77 ath11k_warn(ab, "failed to find the peer to del rx tid\n"); 78 spin_unlock_bh(&ab->base_lock); 79 return -ENOENT; 80 } 81 82 for (; tid >= 0; tid--) 83 ath11k_peer_rx_tid_delete(ar, peer, tid); 84 85 spin_unlock_bh(&ab->base_lock); 86 87 return ret; 88 } 89 90 void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring) 91 { 92 if (!ring->vaddr_unaligned) 93 return; 94 95 dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned, 96 ring->paddr_unaligned); 97 98 ring->vaddr_unaligned = NULL; 99 } 100 101 int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring, 102 enum hal_ring_type type, int ring_num, 103 int mac_id, int num_entries) 104 { 105 struct hal_srng_params params = { 0 }; 106 int entry_sz = ath11k_hal_srng_get_entrysize(type); 107 int max_entries = ath11k_hal_srng_get_max_entries(type); 108 int ret; 109 110 if (max_entries < 0 || entry_sz < 0) 111 return -EINVAL; 112 113 if (num_entries > max_entries) 114 num_entries = max_entries; 115 116 ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1; 117 ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size, 118 &ring->paddr_unaligned, 119 GFP_KERNEL); 120 if (!ring->vaddr_unaligned) 121 return -ENOMEM; 122 123 ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN); 124 ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr - 125 (unsigned long)ring->vaddr_unaligned); 126 127 params.ring_base_vaddr = ring->vaddr; 128 params.ring_base_paddr = ring->paddr; 129 params.num_entries = num_entries; 130 131 switch (type) { 132 case HAL_REO_DST: 133 params.intr_batch_cntr_thres_entries = 134 HAL_SRNG_INT_BATCH_THRESHOLD_RX; 135 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX; 136 break; 137 case HAL_RXDMA_BUF: 138 case HAL_RXDMA_MONITOR_BUF: 139 case HAL_RXDMA_MONITOR_STATUS: 140 params.low_threshold = num_entries >> 3; 141 params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN; 142 params.intr_batch_cntr_thres_entries = 0; 143 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX; 144 break; 145 case HAL_WBM2SW_RELEASE: 146 if (ring_num < 3) { 147 params.intr_batch_cntr_thres_entries = 148 HAL_SRNG_INT_BATCH_THRESHOLD_TX; 149 params.intr_timer_thres_us = 150 HAL_SRNG_INT_TIMER_THRESHOLD_TX; 151 break; 152 } 153 /* follow through when ring_num >= 3 */ 154 /* fall through */ 155 case HAL_REO_EXCEPTION: 156 case HAL_REO_REINJECT: 157 case HAL_REO_CMD: 158 case HAL_REO_STATUS: 159 case HAL_TCL_DATA: 160 case HAL_TCL_CMD: 161 case HAL_TCL_STATUS: 162 case HAL_WBM_IDLE_LINK: 163 case HAL_SW2WBM_RELEASE: 164 case HAL_RXDMA_DST: 165 case HAL_RXDMA_MONITOR_DST: 166 case HAL_RXDMA_MONITOR_DESC: 167 case HAL_RXDMA_DIR_BUF: 168 params.intr_batch_cntr_thres_entries = 169 HAL_SRNG_INT_BATCH_THRESHOLD_OTHER; 170 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER; 171 break; 172 default: 173 ath11k_warn(ab, "Not a valid ring type in dp :%d\n", type); 174 return -EINVAL; 175 } 176 177 ret = ath11k_hal_srng_setup(ab, type, ring_num, mac_id, ¶ms); 178 if (ret < 0) { 179 ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n", 180 ret, ring_num); 181 return ret; 182 } 183 184 ring->ring_id = ret; 185 186 return 0; 187 } 188 189 static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab) 190 { 191 struct ath11k_dp *dp = &ab->dp; 192 int i; 193 194 ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring); 195 ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring); 196 ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring); 197 for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) { 198 ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring); 199 ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring); 200 } 201 ath11k_dp_srng_cleanup(ab, &dp->reo_reinject_ring); 202 ath11k_dp_srng_cleanup(ab, &dp->rx_rel_ring); 203 ath11k_dp_srng_cleanup(ab, &dp->reo_except_ring); 204 ath11k_dp_srng_cleanup(ab, &dp->reo_cmd_ring); 205 ath11k_dp_srng_cleanup(ab, &dp->reo_status_ring); 206 } 207 208 static int ath11k_dp_srng_common_setup(struct ath11k_base *ab) 209 { 210 struct ath11k_dp *dp = &ab->dp; 211 struct hal_srng *srng; 212 int i, ret; 213 214 ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring, 215 HAL_SW2WBM_RELEASE, 0, 0, 216 DP_WBM_RELEASE_RING_SIZE); 217 if (ret) { 218 ath11k_warn(ab, "failed to set up wbm2sw_release ring :%d\n", 219 ret); 220 goto err; 221 } 222 223 ret = ath11k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0, 224 DP_TCL_CMD_RING_SIZE); 225 if (ret) { 226 ath11k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret); 227 goto err; 228 } 229 230 ret = ath11k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS, 231 0, 0, DP_TCL_STATUS_RING_SIZE); 232 if (ret) { 233 ath11k_warn(ab, "failed to set up tcl_status ring :%d\n", ret); 234 goto err; 235 } 236 237 for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) { 238 ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring, 239 HAL_TCL_DATA, i, 0, 240 DP_TCL_DATA_RING_SIZE); 241 if (ret) { 242 ath11k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n", 243 i, ret); 244 goto err; 245 } 246 247 ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring, 248 HAL_WBM2SW_RELEASE, i, 0, 249 DP_TX_COMP_RING_SIZE); 250 if (ret) { 251 ath11k_warn(ab, "failed to set up tcl_comp ring ring (%d) :%d\n", 252 i, ret); 253 goto err; 254 } 255 256 srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id]; 257 ath11k_hal_tx_init_data_ring(ab, srng); 258 } 259 260 ret = ath11k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT, 261 0, 0, DP_REO_REINJECT_RING_SIZE); 262 if (ret) { 263 ath11k_warn(ab, "failed to set up reo_reinject ring :%d\n", 264 ret); 265 goto err; 266 } 267 268 ret = ath11k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE, 269 3, 0, DP_RX_RELEASE_RING_SIZE); 270 if (ret) { 271 ath11k_warn(ab, "failed to set up rx_rel ring :%d\n", ret); 272 goto err; 273 } 274 275 ret = ath11k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION, 276 0, 0, DP_REO_EXCEPTION_RING_SIZE); 277 if (ret) { 278 ath11k_warn(ab, "failed to set up reo_exception ring :%d\n", 279 ret); 280 goto err; 281 } 282 283 ret = ath11k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD, 284 0, 0, DP_REO_CMD_RING_SIZE); 285 if (ret) { 286 ath11k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret); 287 goto err; 288 } 289 290 srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id]; 291 ath11k_hal_reo_init_cmd_ring(ab, srng); 292 293 ret = ath11k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS, 294 0, 0, DP_REO_STATUS_RING_SIZE); 295 if (ret) { 296 ath11k_warn(ab, "failed to set up reo_status ring :%d\n", ret); 297 goto err; 298 } 299 300 ath11k_hal_reo_hw_setup(ab); 301 302 return 0; 303 304 err: 305 ath11k_dp_srng_common_cleanup(ab); 306 307 return ret; 308 } 309 310 static void ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base *ab) 311 { 312 struct ath11k_dp *dp = &ab->dp; 313 struct hal_wbm_idle_scatter_list *slist = dp->scatter_list; 314 int i; 315 316 for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) { 317 if (!slist[i].vaddr) 318 continue; 319 320 dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX, 321 slist[i].vaddr, slist[i].paddr); 322 slist[i].vaddr = NULL; 323 } 324 } 325 326 static int ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base *ab, 327 int size, 328 u32 n_link_desc_bank, 329 u32 n_link_desc, 330 u32 last_bank_sz) 331 { 332 struct ath11k_dp *dp = &ab->dp; 333 struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks; 334 struct hal_wbm_idle_scatter_list *slist = dp->scatter_list; 335 u32 n_entries_per_buf; 336 int num_scatter_buf, scatter_idx; 337 struct hal_wbm_link_desc *scatter_buf; 338 int align_bytes, n_entries; 339 dma_addr_t paddr; 340 int rem_entries; 341 int i; 342 int ret = 0; 343 u32 end_offset; 344 345 n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE / 346 ath11k_hal_srng_get_entrysize(HAL_WBM_IDLE_LINK); 347 num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE); 348 349 if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX) 350 return -EINVAL; 351 352 for (i = 0; i < num_scatter_buf; i++) { 353 slist[i].vaddr = dma_alloc_coherent(ab->dev, 354 HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX, 355 &slist[i].paddr, GFP_KERNEL); 356 if (!slist[i].vaddr) { 357 ret = -ENOMEM; 358 goto err; 359 } 360 } 361 362 scatter_idx = 0; 363 scatter_buf = slist[scatter_idx].vaddr; 364 rem_entries = n_entries_per_buf; 365 366 for (i = 0; i < n_link_desc_bank; i++) { 367 align_bytes = link_desc_banks[i].vaddr - 368 link_desc_banks[i].vaddr_unaligned; 369 n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) / 370 HAL_LINK_DESC_SIZE; 371 paddr = link_desc_banks[i].paddr; 372 while (n_entries) { 373 ath11k_hal_set_link_desc_addr(scatter_buf, i, paddr); 374 n_entries--; 375 paddr += HAL_LINK_DESC_SIZE; 376 if (rem_entries) { 377 rem_entries--; 378 scatter_buf++; 379 continue; 380 } 381 382 rem_entries = n_entries_per_buf; 383 scatter_idx++; 384 scatter_buf = slist[scatter_idx].vaddr; 385 } 386 } 387 388 end_offset = (scatter_buf - slist[scatter_idx].vaddr) * 389 sizeof(struct hal_wbm_link_desc); 390 ath11k_hal_setup_link_idle_list(ab, slist, num_scatter_buf, 391 n_link_desc, end_offset); 392 393 return 0; 394 395 err: 396 ath11k_dp_scatter_idle_link_desc_cleanup(ab); 397 398 return ret; 399 } 400 401 static void 402 ath11k_dp_link_desc_bank_free(struct ath11k_base *ab, 403 struct dp_link_desc_bank *link_desc_banks) 404 { 405 int i; 406 407 for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) { 408 if (link_desc_banks[i].vaddr_unaligned) { 409 dma_free_coherent(ab->dev, 410 link_desc_banks[i].size, 411 link_desc_banks[i].vaddr_unaligned, 412 link_desc_banks[i].paddr_unaligned); 413 link_desc_banks[i].vaddr_unaligned = NULL; 414 } 415 } 416 } 417 418 static int ath11k_dp_link_desc_bank_alloc(struct ath11k_base *ab, 419 struct dp_link_desc_bank *desc_bank, 420 int n_link_desc_bank, 421 int last_bank_sz) 422 { 423 struct ath11k_dp *dp = &ab->dp; 424 int i; 425 int ret = 0; 426 int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH; 427 428 for (i = 0; i < n_link_desc_bank; i++) { 429 if (i == (n_link_desc_bank - 1) && last_bank_sz) 430 desc_sz = last_bank_sz; 431 432 desc_bank[i].vaddr_unaligned = 433 dma_alloc_coherent(ab->dev, desc_sz, 434 &desc_bank[i].paddr_unaligned, 435 GFP_KERNEL); 436 if (!desc_bank[i].vaddr_unaligned) { 437 ret = -ENOMEM; 438 goto err; 439 } 440 441 desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned, 442 HAL_LINK_DESC_ALIGN); 443 desc_bank[i].paddr = desc_bank[i].paddr_unaligned + 444 ((unsigned long)desc_bank[i].vaddr - 445 (unsigned long)desc_bank[i].vaddr_unaligned); 446 desc_bank[i].size = desc_sz; 447 } 448 449 return 0; 450 451 err: 452 ath11k_dp_link_desc_bank_free(ab, dp->link_desc_banks); 453 454 return ret; 455 } 456 457 void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab, 458 struct dp_link_desc_bank *desc_bank, 459 u32 ring_type, struct dp_srng *ring) 460 { 461 ath11k_dp_link_desc_bank_free(ab, desc_bank); 462 463 if (ring_type != HAL_RXDMA_MONITOR_DESC) { 464 ath11k_dp_srng_cleanup(ab, ring); 465 ath11k_dp_scatter_idle_link_desc_cleanup(ab); 466 } 467 } 468 469 static int ath11k_wbm_idle_ring_setup(struct ath11k_base *ab, u32 *n_link_desc) 470 { 471 struct ath11k_dp *dp = &ab->dp; 472 u32 n_mpdu_link_desc, n_mpdu_queue_desc; 473 u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc; 474 int ret = 0; 475 476 n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) / 477 HAL_NUM_MPDUS_PER_LINK_DESC; 478 479 n_mpdu_queue_desc = n_mpdu_link_desc / 480 HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC; 481 482 n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID * 483 DP_AVG_MSDUS_PER_FLOW) / 484 HAL_NUM_TX_MSDUS_PER_LINK_DESC; 485 486 n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX * 487 DP_AVG_MSDUS_PER_MPDU) / 488 HAL_NUM_RX_MSDUS_PER_LINK_DESC; 489 490 *n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc + 491 n_tx_msdu_link_desc + n_rx_msdu_link_desc; 492 493 if (*n_link_desc & (*n_link_desc - 1)) 494 *n_link_desc = 1 << fls(*n_link_desc); 495 496 ret = ath11k_dp_srng_setup(ab, &dp->wbm_idle_ring, 497 HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc); 498 if (ret) { 499 ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret); 500 return ret; 501 } 502 return ret; 503 } 504 505 int ath11k_dp_link_desc_setup(struct ath11k_base *ab, 506 struct dp_link_desc_bank *link_desc_banks, 507 u32 ring_type, struct hal_srng *srng, 508 u32 n_link_desc) 509 { 510 u32 tot_mem_sz; 511 u32 n_link_desc_bank, last_bank_sz; 512 u32 entry_sz, align_bytes, n_entries; 513 u32 paddr; 514 u32 *desc; 515 int i, ret; 516 517 tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE; 518 tot_mem_sz += HAL_LINK_DESC_ALIGN; 519 520 if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) { 521 n_link_desc_bank = 1; 522 last_bank_sz = tot_mem_sz; 523 } else { 524 n_link_desc_bank = tot_mem_sz / 525 (DP_LINK_DESC_ALLOC_SIZE_THRESH - 526 HAL_LINK_DESC_ALIGN); 527 last_bank_sz = tot_mem_sz % 528 (DP_LINK_DESC_ALLOC_SIZE_THRESH - 529 HAL_LINK_DESC_ALIGN); 530 531 if (last_bank_sz) 532 n_link_desc_bank += 1; 533 } 534 535 if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX) 536 return -EINVAL; 537 538 ret = ath11k_dp_link_desc_bank_alloc(ab, link_desc_banks, 539 n_link_desc_bank, last_bank_sz); 540 if (ret) 541 return ret; 542 543 /* Setup link desc idle list for HW internal usage */ 544 entry_sz = ath11k_hal_srng_get_entrysize(ring_type); 545 tot_mem_sz = entry_sz * n_link_desc; 546 547 /* Setup scatter desc list when the total memory requirement is more */ 548 if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH && 549 ring_type != HAL_RXDMA_MONITOR_DESC) { 550 ret = ath11k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz, 551 n_link_desc_bank, 552 n_link_desc, 553 last_bank_sz); 554 if (ret) { 555 ath11k_warn(ab, "failed to setup scatting idle list descriptor :%d\n", 556 ret); 557 goto fail_desc_bank_free; 558 } 559 560 return 0; 561 } 562 563 spin_lock_bh(&srng->lock); 564 565 ath11k_hal_srng_access_begin(ab, srng); 566 567 for (i = 0; i < n_link_desc_bank; i++) { 568 align_bytes = link_desc_banks[i].vaddr - 569 link_desc_banks[i].vaddr_unaligned; 570 n_entries = (link_desc_banks[i].size - align_bytes) / 571 HAL_LINK_DESC_SIZE; 572 paddr = link_desc_banks[i].paddr; 573 while (n_entries && 574 (desc = ath11k_hal_srng_src_get_next_entry(ab, srng))) { 575 ath11k_hal_set_link_desc_addr((struct hal_wbm_link_desc *)desc, 576 i, paddr); 577 n_entries--; 578 paddr += HAL_LINK_DESC_SIZE; 579 } 580 } 581 582 ath11k_hal_srng_access_end(ab, srng); 583 584 spin_unlock_bh(&srng->lock); 585 586 return 0; 587 588 fail_desc_bank_free: 589 ath11k_dp_link_desc_bank_free(ab, link_desc_banks); 590 591 return ret; 592 } 593 594 int ath11k_dp_service_srng(struct ath11k_base *ab, 595 struct ath11k_ext_irq_grp *irq_grp, 596 int budget) 597 { 598 struct napi_struct *napi = &irq_grp->napi; 599 int grp_id = irq_grp->grp_id; 600 int work_done = 0; 601 int i = 0; 602 int tot_work_done = 0; 603 604 while (ath11k_tx_ring_mask[grp_id] >> i) { 605 if (ath11k_tx_ring_mask[grp_id] & BIT(i)) 606 ath11k_dp_tx_completion_handler(ab, i); 607 i++; 608 } 609 610 if (ath11k_rx_err_ring_mask[grp_id]) { 611 work_done = ath11k_dp_process_rx_err(ab, napi, budget); 612 budget -= work_done; 613 tot_work_done += work_done; 614 if (budget <= 0) 615 goto done; 616 } 617 618 if (ath11k_rx_wbm_rel_ring_mask[grp_id]) { 619 work_done = ath11k_dp_rx_process_wbm_err(ab, 620 napi, 621 budget); 622 budget -= work_done; 623 tot_work_done += work_done; 624 625 if (budget <= 0) 626 goto done; 627 } 628 629 if (ath11k_rx_ring_mask[grp_id]) { 630 for (i = 0; i < ab->num_radios; i++) { 631 if (ath11k_rx_ring_mask[grp_id] & BIT(i)) { 632 work_done = ath11k_dp_process_rx(ab, i, napi, 633 &irq_grp->pending_q, 634 budget); 635 budget -= work_done; 636 tot_work_done += work_done; 637 } 638 if (budget <= 0) 639 goto done; 640 } 641 } 642 643 if (rx_mon_status_ring_mask[grp_id]) { 644 for (i = 0; i < ab->num_radios; i++) { 645 if (rx_mon_status_ring_mask[grp_id] & BIT(i)) { 646 work_done = 647 ath11k_dp_rx_process_mon_rings(ab, 648 i, napi, 649 budget); 650 budget -= work_done; 651 tot_work_done += work_done; 652 } 653 if (budget <= 0) 654 goto done; 655 } 656 } 657 658 if (ath11k_reo_status_ring_mask[grp_id]) 659 ath11k_dp_process_reo_status(ab); 660 661 for (i = 0; i < ab->num_radios; i++) { 662 if (ath11k_rxdma2host_ring_mask[grp_id] & BIT(i)) { 663 work_done = ath11k_dp_process_rxdma_err(ab, i, budget); 664 budget -= work_done; 665 tot_work_done += work_done; 666 } 667 668 if (budget <= 0) 669 goto done; 670 671 if (ath11k_host2rxdma_ring_mask[grp_id] & BIT(i)) { 672 struct ath11k_pdev_dp *dp = &ab->pdevs[i].ar->dp; 673 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 674 675 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, 0, 676 HAL_RX_BUF_RBM_SW3_BM, 677 GFP_ATOMIC); 678 } 679 } 680 /* TODO: Implement handler for other interrupts */ 681 682 done: 683 return tot_work_done; 684 } 685 686 void ath11k_dp_pdev_free(struct ath11k_base *ab) 687 { 688 struct ath11k *ar; 689 int i; 690 691 for (i = 0; i < ab->num_radios; i++) { 692 ar = ab->pdevs[i].ar; 693 ath11k_dp_rx_pdev_free(ab, i); 694 ath11k_debug_unregister(ar); 695 ath11k_dp_rx_pdev_mon_detach(ar); 696 } 697 } 698 699 void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab) 700 { 701 struct ath11k *ar; 702 struct ath11k_pdev_dp *dp; 703 int i; 704 705 for (i = 0; i < ab->num_radios; i++) { 706 ar = ab->pdevs[i].ar; 707 dp = &ar->dp; 708 dp->mac_id = i; 709 idr_init(&dp->rx_refill_buf_ring.bufs_idr); 710 spin_lock_init(&dp->rx_refill_buf_ring.idr_lock); 711 atomic_set(&dp->num_tx_pending, 0); 712 init_waitqueue_head(&dp->tx_empty_waitq); 713 idr_init(&dp->rx_mon_status_refill_ring.bufs_idr); 714 spin_lock_init(&dp->rx_mon_status_refill_ring.idr_lock); 715 idr_init(&dp->rxdma_mon_buf_ring.bufs_idr); 716 spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock); 717 } 718 } 719 720 int ath11k_dp_pdev_alloc(struct ath11k_base *ab) 721 { 722 struct ath11k *ar; 723 int ret; 724 int i; 725 726 /* TODO:Per-pdev rx ring unlike tx ring which is mapped to different AC's */ 727 for (i = 0; i < ab->num_radios; i++) { 728 ar = ab->pdevs[i].ar; 729 ret = ath11k_dp_rx_pdev_alloc(ab, i); 730 if (ret) { 731 ath11k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n", 732 i); 733 goto err; 734 } 735 ret = ath11k_dp_rx_pdev_mon_attach(ar); 736 if (ret) { 737 ath11k_warn(ab, "failed to initialize mon pdev %d\n", 738 i); 739 goto err; 740 } 741 } 742 743 return 0; 744 745 err: 746 ath11k_dp_pdev_free(ab); 747 748 return ret; 749 } 750 751 int ath11k_dp_htt_connect(struct ath11k_dp *dp) 752 { 753 struct ath11k_htc_svc_conn_req conn_req; 754 struct ath11k_htc_svc_conn_resp conn_resp; 755 int status; 756 757 memset(&conn_req, 0, sizeof(conn_req)); 758 memset(&conn_resp, 0, sizeof(conn_resp)); 759 760 conn_req.ep_ops.ep_tx_complete = ath11k_dp_htt_htc_tx_complete; 761 conn_req.ep_ops.ep_rx_complete = ath11k_dp_htt_htc_t2h_msg_handler; 762 763 /* connect to control service */ 764 conn_req.service_id = ATH11K_HTC_SVC_ID_HTT_DATA_MSG; 765 766 status = ath11k_htc_connect_service(&dp->ab->htc, &conn_req, 767 &conn_resp); 768 769 if (status) 770 return status; 771 772 dp->eid = conn_resp.eid; 773 774 return 0; 775 } 776 777 static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif) 778 { 779 /* For STA mode, enable address search index, 780 * tcl uses ast_hash value in the descriptor. 781 */ 782 switch (arvif->vdev_type) { 783 case WMI_VDEV_TYPE_STA: 784 arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN; 785 arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX; 786 break; 787 case WMI_VDEV_TYPE_AP: 788 case WMI_VDEV_TYPE_IBSS: 789 arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN; 790 arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT; 791 break; 792 case WMI_VDEV_TYPE_MONITOR: 793 default: 794 return; 795 } 796 } 797 798 void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif) 799 { 800 arvif->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 1) | 801 FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID, 802 arvif->vdev_id) | 803 FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID, 804 ar->pdev->pdev_id); 805 806 /* set HTT extension valid bit to 0 by default */ 807 arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT; 808 809 ath11k_dp_update_vdev_search(arvif); 810 } 811 812 static int ath11k_dp_tx_pending_cleanup(int buf_id, void *skb, void *ctx) 813 { 814 struct ath11k_base *ab = (struct ath11k_base *)ctx; 815 struct sk_buff *msdu = skb; 816 817 dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len, 818 DMA_TO_DEVICE); 819 820 dev_kfree_skb_any(msdu); 821 822 return 0; 823 } 824 825 void ath11k_dp_free(struct ath11k_base *ab) 826 { 827 struct ath11k_dp *dp = &ab->dp; 828 int i; 829 830 ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks, 831 HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring); 832 833 ath11k_dp_srng_common_cleanup(ab); 834 835 ath11k_dp_reo_cmd_list_cleanup(ab); 836 837 for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) { 838 spin_lock_bh(&dp->tx_ring[i].tx_idr_lock); 839 idr_for_each(&dp->tx_ring[i].txbuf_idr, 840 ath11k_dp_tx_pending_cleanup, ab); 841 idr_destroy(&dp->tx_ring[i].txbuf_idr); 842 spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock); 843 kfree(dp->tx_ring[i].tx_status); 844 } 845 846 /* Deinit any SOC level resource */ 847 } 848 849 int ath11k_dp_alloc(struct ath11k_base *ab) 850 { 851 struct ath11k_dp *dp = &ab->dp; 852 struct hal_srng *srng = NULL; 853 size_t size = 0; 854 u32 n_link_desc = 0; 855 int ret; 856 int i; 857 858 dp->ab = ab; 859 860 INIT_LIST_HEAD(&dp->reo_cmd_list); 861 INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list); 862 spin_lock_init(&dp->reo_cmd_lock); 863 864 ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc); 865 if (ret) { 866 ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret); 867 return ret; 868 } 869 870 srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id]; 871 872 ret = ath11k_dp_link_desc_setup(ab, dp->link_desc_banks, 873 HAL_WBM_IDLE_LINK, srng, n_link_desc); 874 if (ret) { 875 ath11k_warn(ab, "failed to setup link desc: %d\n", ret); 876 return ret; 877 } 878 879 ret = ath11k_dp_srng_common_setup(ab); 880 if (ret) 881 goto fail_link_desc_cleanup; 882 883 size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE; 884 885 for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) { 886 idr_init(&dp->tx_ring[i].txbuf_idr); 887 spin_lock_init(&dp->tx_ring[i].tx_idr_lock); 888 dp->tx_ring[i].tcl_data_ring_id = i; 889 890 dp->tx_ring[i].tx_status_head = 0; 891 dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1; 892 dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL); 893 if (!dp->tx_ring[i].tx_status) 894 goto fail_cmn_srng_cleanup; 895 } 896 897 for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++) 898 ath11k_hal_tx_set_dscp_tid_map(ab, i); 899 900 /* Init any SOC level resource for DP */ 901 902 return 0; 903 904 fail_cmn_srng_cleanup: 905 ath11k_dp_srng_common_cleanup(ab); 906 907 fail_link_desc_cleanup: 908 ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks, 909 HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring); 910 911 return ret; 912 } 913