1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 */ 6 7 #include <crypto/hash.h> 8 #include "core.h" 9 #include "dp_tx.h" 10 #include "hal_tx.h" 11 #include "hif.h" 12 #include "debug.h" 13 #include "dp_rx.h" 14 #include "peer.h" 15 16 static void ath11k_dp_htt_htc_tx_complete(struct ath11k_base *ab, 17 struct sk_buff *skb) 18 { 19 dev_kfree_skb_any(skb); 20 } 21 22 void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr) 23 { 24 struct ath11k_base *ab = ar->ab; 25 struct ath11k_peer *peer; 26 27 /* TODO: Any other peer specific DP cleanup */ 28 29 spin_lock_bh(&ab->base_lock); 30 peer = ath11k_peer_find(ab, vdev_id, addr); 31 if (!peer) { 32 ath11k_warn(ab, "failed to lookup peer %pM on vdev %d\n", 33 addr, vdev_id); 34 spin_unlock_bh(&ab->base_lock); 35 return; 36 } 37 38 ath11k_peer_rx_tid_cleanup(ar, peer); 39 peer->dp_setup_done = false; 40 crypto_free_shash(peer->tfm_mmic); 41 spin_unlock_bh(&ab->base_lock); 42 } 43 44 int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr) 45 { 46 struct ath11k_base *ab = ar->ab; 47 struct ath11k_peer *peer; 48 u32 reo_dest; 49 int ret = 0, tid; 50 51 /* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */ 52 reo_dest = ar->dp.mac_id + 1; 53 ret = ath11k_wmi_set_peer_param(ar, addr, vdev_id, 54 WMI_PEER_SET_DEFAULT_ROUTING, 55 DP_RX_HASH_ENABLE | (reo_dest << 1)); 56 57 if (ret) { 58 ath11k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n", 59 ret, addr, vdev_id); 60 return ret; 61 } 62 63 for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) { 64 ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, tid, 1, 0, 65 HAL_PN_TYPE_NONE); 66 if (ret) { 67 ath11k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n", 68 tid, ret); 69 goto peer_clean; 70 } 71 } 72 73 ret = ath11k_peer_rx_frag_setup(ar, addr, vdev_id); 74 if (ret) { 75 ath11k_warn(ab, "failed to setup rx defrag context\n"); 76 tid--; 77 goto peer_clean; 78 } 79 80 /* TODO: Setup other peer specific resource used in data path */ 81 82 return 0; 83 84 peer_clean: 85 spin_lock_bh(&ab->base_lock); 86 87 peer = ath11k_peer_find(ab, vdev_id, addr); 88 if (!peer) { 89 ath11k_warn(ab, "failed to find the peer to del rx tid\n"); 90 spin_unlock_bh(&ab->base_lock); 91 return -ENOENT; 92 } 93 94 for (; tid >= 0; tid--) 95 ath11k_peer_rx_tid_delete(ar, peer, tid); 96 97 spin_unlock_bh(&ab->base_lock); 98 99 return ret; 100 } 101 102 void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring) 103 { 104 if (!ring->vaddr_unaligned) 105 return; 106 107 if (ring->cached) { 108 dma_unmap_single(ab->dev, ring->paddr_unaligned, ring->size, 109 DMA_FROM_DEVICE); 110 kfree(ring->vaddr_unaligned); 111 } else { 112 dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned, 113 ring->paddr_unaligned); 114 } 115 116 ring->vaddr_unaligned = NULL; 117 } 118 119 static int ath11k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask) 120 { 121 int ext_group_num; 122 u8 mask = 1 << ring_num; 123 124 for (ext_group_num = 0; ext_group_num < ATH11K_EXT_IRQ_GRP_NUM_MAX; 125 ext_group_num++) { 126 if (mask & grp_mask[ext_group_num]) 127 return ext_group_num; 128 } 129 130 return -ENOENT; 131 } 132 133 static int ath11k_dp_srng_calculate_msi_group(struct ath11k_base *ab, 134 enum hal_ring_type type, int ring_num) 135 { 136 const u8 *grp_mask; 137 138 switch (type) { 139 case HAL_WBM2SW_RELEASE: 140 if (ring_num == DP_RX_RELEASE_RING_NUM) { 141 grp_mask = &ab->hw_params.ring_mask->rx_wbm_rel[0]; 142 ring_num = 0; 143 } else { 144 grp_mask = &ab->hw_params.ring_mask->tx[0]; 145 } 146 break; 147 case HAL_REO_EXCEPTION: 148 grp_mask = &ab->hw_params.ring_mask->rx_err[0]; 149 break; 150 case HAL_REO_DST: 151 grp_mask = &ab->hw_params.ring_mask->rx[0]; 152 break; 153 case HAL_REO_STATUS: 154 grp_mask = &ab->hw_params.ring_mask->reo_status[0]; 155 break; 156 case HAL_RXDMA_MONITOR_STATUS: 157 case HAL_RXDMA_MONITOR_DST: 158 grp_mask = &ab->hw_params.ring_mask->rx_mon_status[0]; 159 break; 160 case HAL_RXDMA_DST: 161 grp_mask = &ab->hw_params.ring_mask->rxdma2host[0]; 162 break; 163 case HAL_RXDMA_BUF: 164 grp_mask = &ab->hw_params.ring_mask->host2rxdma[0]; 165 break; 166 case HAL_RXDMA_MONITOR_BUF: 167 case HAL_TCL_DATA: 168 case HAL_TCL_CMD: 169 case HAL_REO_CMD: 170 case HAL_SW2WBM_RELEASE: 171 case HAL_WBM_IDLE_LINK: 172 case HAL_TCL_STATUS: 173 case HAL_REO_REINJECT: 174 case HAL_CE_SRC: 175 case HAL_CE_DST: 176 case HAL_CE_DST_STATUS: 177 default: 178 return -ENOENT; 179 } 180 181 return ath11k_dp_srng_find_ring_in_mask(ring_num, grp_mask); 182 } 183 184 static void ath11k_dp_srng_msi_setup(struct ath11k_base *ab, 185 struct hal_srng_params *ring_params, 186 enum hal_ring_type type, int ring_num) 187 { 188 int msi_group_number, msi_data_count; 189 u32 msi_data_start, msi_irq_start, addr_lo, addr_hi; 190 int ret; 191 192 ret = ath11k_get_user_msi_vector(ab, "DP", 193 &msi_data_count, &msi_data_start, 194 &msi_irq_start); 195 if (ret) 196 return; 197 198 msi_group_number = ath11k_dp_srng_calculate_msi_group(ab, type, 199 ring_num); 200 if (msi_group_number < 0) { 201 ath11k_dbg(ab, ATH11K_DBG_PCI, 202 "ring not part of an ext_group; ring_type: %d,ring_num %d", 203 type, ring_num); 204 ring_params->msi_addr = 0; 205 ring_params->msi_data = 0; 206 return; 207 } 208 209 if (msi_group_number > msi_data_count) { 210 ath11k_dbg(ab, ATH11K_DBG_PCI, 211 "multiple msi_groups share one msi, msi_group_num %d", 212 msi_group_number); 213 } 214 215 ath11k_get_msi_address(ab, &addr_lo, &addr_hi); 216 217 ring_params->msi_addr = addr_lo; 218 ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32); 219 ring_params->msi_data = (msi_group_number % msi_data_count) 220 + msi_data_start; 221 ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR; 222 } 223 224 int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring, 225 enum hal_ring_type type, int ring_num, 226 int mac_id, int num_entries) 227 { 228 struct hal_srng_params params = { 0 }; 229 int entry_sz = ath11k_hal_srng_get_entrysize(ab, type); 230 int max_entries = ath11k_hal_srng_get_max_entries(ab, type); 231 int ret; 232 bool cached = false; 233 234 if (max_entries < 0 || entry_sz < 0) 235 return -EINVAL; 236 237 if (num_entries > max_entries) 238 num_entries = max_entries; 239 240 ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1; 241 242 if (ab->hw_params.alloc_cacheable_memory) { 243 /* Allocate the reo dst and tx completion rings from cacheable memory */ 244 switch (type) { 245 case HAL_REO_DST: 246 case HAL_WBM2SW_RELEASE: 247 cached = true; 248 break; 249 default: 250 cached = false; 251 } 252 253 if (cached) { 254 ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL); 255 if (!ring->vaddr_unaligned) 256 return -ENOMEM; 257 258 ring->paddr_unaligned = dma_map_single(ab->dev, 259 ring->vaddr_unaligned, 260 ring->size, 261 DMA_FROM_DEVICE); 262 if (dma_mapping_error(ab->dev, ring->paddr_unaligned)) { 263 kfree(ring->vaddr_unaligned); 264 ring->vaddr_unaligned = NULL; 265 return -ENOMEM; 266 } 267 } 268 } 269 270 if (!cached) 271 ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size, 272 &ring->paddr_unaligned, 273 GFP_KERNEL); 274 275 if (!ring->vaddr_unaligned) 276 return -ENOMEM; 277 278 ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN); 279 ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr - 280 (unsigned long)ring->vaddr_unaligned); 281 282 params.ring_base_vaddr = ring->vaddr; 283 params.ring_base_paddr = ring->paddr; 284 params.num_entries = num_entries; 285 ath11k_dp_srng_msi_setup(ab, ¶ms, type, ring_num + mac_id); 286 287 switch (type) { 288 case HAL_REO_DST: 289 params.intr_batch_cntr_thres_entries = 290 HAL_SRNG_INT_BATCH_THRESHOLD_RX; 291 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX; 292 break; 293 case HAL_RXDMA_BUF: 294 case HAL_RXDMA_MONITOR_BUF: 295 case HAL_RXDMA_MONITOR_STATUS: 296 params.low_threshold = num_entries >> 3; 297 params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN; 298 params.intr_batch_cntr_thres_entries = 0; 299 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX; 300 break; 301 case HAL_WBM2SW_RELEASE: 302 if (ring_num < 3) { 303 params.intr_batch_cntr_thres_entries = 304 HAL_SRNG_INT_BATCH_THRESHOLD_TX; 305 params.intr_timer_thres_us = 306 HAL_SRNG_INT_TIMER_THRESHOLD_TX; 307 break; 308 } 309 /* follow through when ring_num >= 3 */ 310 fallthrough; 311 case HAL_REO_EXCEPTION: 312 case HAL_REO_REINJECT: 313 case HAL_REO_CMD: 314 case HAL_REO_STATUS: 315 case HAL_TCL_DATA: 316 case HAL_TCL_CMD: 317 case HAL_TCL_STATUS: 318 case HAL_WBM_IDLE_LINK: 319 case HAL_SW2WBM_RELEASE: 320 case HAL_RXDMA_DST: 321 case HAL_RXDMA_MONITOR_DST: 322 case HAL_RXDMA_MONITOR_DESC: 323 params.intr_batch_cntr_thres_entries = 324 HAL_SRNG_INT_BATCH_THRESHOLD_OTHER; 325 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER; 326 break; 327 case HAL_RXDMA_DIR_BUF: 328 break; 329 default: 330 ath11k_warn(ab, "Not a valid ring type in dp :%d\n", type); 331 return -EINVAL; 332 } 333 334 if (cached) { 335 params.flags |= HAL_SRNG_FLAGS_CACHED; 336 ring->cached = 1; 337 } 338 339 ret = ath11k_hal_srng_setup(ab, type, ring_num, mac_id, ¶ms); 340 if (ret < 0) { 341 ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n", 342 ret, ring_num); 343 return ret; 344 } 345 346 ring->ring_id = ret; 347 348 return 0; 349 } 350 351 void ath11k_dp_stop_shadow_timers(struct ath11k_base *ab) 352 { 353 int i; 354 355 if (!ab->hw_params.supports_shadow_regs) 356 return; 357 358 for (i = 0; i < ab->hw_params.max_tx_ring; i++) 359 ath11k_dp_shadow_stop_timer(ab, &ab->dp.tx_ring_timer[i]); 360 361 ath11k_dp_shadow_stop_timer(ab, &ab->dp.reo_cmd_timer); 362 } 363 364 static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab) 365 { 366 struct ath11k_dp *dp = &ab->dp; 367 int i; 368 369 ath11k_dp_stop_shadow_timers(ab); 370 ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring); 371 ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring); 372 ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring); 373 for (i = 0; i < ab->hw_params.max_tx_ring; i++) { 374 ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring); 375 ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring); 376 } 377 ath11k_dp_srng_cleanup(ab, &dp->reo_reinject_ring); 378 ath11k_dp_srng_cleanup(ab, &dp->rx_rel_ring); 379 ath11k_dp_srng_cleanup(ab, &dp->reo_except_ring); 380 ath11k_dp_srng_cleanup(ab, &dp->reo_cmd_ring); 381 ath11k_dp_srng_cleanup(ab, &dp->reo_status_ring); 382 } 383 384 static int ath11k_dp_srng_common_setup(struct ath11k_base *ab) 385 { 386 struct ath11k_dp *dp = &ab->dp; 387 struct hal_srng *srng; 388 int i, ret; 389 u8 tcl_num, wbm_num; 390 391 ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring, 392 HAL_SW2WBM_RELEASE, 0, 0, 393 DP_WBM_RELEASE_RING_SIZE); 394 if (ret) { 395 ath11k_warn(ab, "failed to set up wbm2sw_release ring :%d\n", 396 ret); 397 goto err; 398 } 399 400 ret = ath11k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0, 401 DP_TCL_CMD_RING_SIZE); 402 if (ret) { 403 ath11k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret); 404 goto err; 405 } 406 407 ret = ath11k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS, 408 0, 0, DP_TCL_STATUS_RING_SIZE); 409 if (ret) { 410 ath11k_warn(ab, "failed to set up tcl_status ring :%d\n", ret); 411 goto err; 412 } 413 414 for (i = 0; i < ab->hw_params.max_tx_ring; i++) { 415 tcl_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].tcl_ring_num; 416 wbm_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num; 417 418 ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring, 419 HAL_TCL_DATA, tcl_num, 0, 420 ab->hw_params.tx_ring_size); 421 if (ret) { 422 ath11k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n", 423 i, ret); 424 goto err; 425 } 426 427 ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring, 428 HAL_WBM2SW_RELEASE, wbm_num, 0, 429 DP_TX_COMP_RING_SIZE); 430 if (ret) { 431 ath11k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n", 432 i, ret); 433 goto err; 434 } 435 436 srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id]; 437 ath11k_hal_tx_init_data_ring(ab, srng); 438 439 ath11k_dp_shadow_init_timer(ab, &dp->tx_ring_timer[i], 440 ATH11K_SHADOW_DP_TIMER_INTERVAL, 441 dp->tx_ring[i].tcl_data_ring.ring_id); 442 } 443 444 ret = ath11k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT, 445 0, 0, DP_REO_REINJECT_RING_SIZE); 446 if (ret) { 447 ath11k_warn(ab, "failed to set up reo_reinject ring :%d\n", 448 ret); 449 goto err; 450 } 451 452 ret = ath11k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE, 453 DP_RX_RELEASE_RING_NUM, 0, DP_RX_RELEASE_RING_SIZE); 454 if (ret) { 455 ath11k_warn(ab, "failed to set up rx_rel ring :%d\n", ret); 456 goto err; 457 } 458 459 ret = ath11k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION, 460 0, 0, DP_REO_EXCEPTION_RING_SIZE); 461 if (ret) { 462 ath11k_warn(ab, "failed to set up reo_exception ring :%d\n", 463 ret); 464 goto err; 465 } 466 467 ret = ath11k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD, 468 0, 0, DP_REO_CMD_RING_SIZE); 469 if (ret) { 470 ath11k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret); 471 goto err; 472 } 473 474 srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id]; 475 ath11k_hal_reo_init_cmd_ring(ab, srng); 476 477 ath11k_dp_shadow_init_timer(ab, &dp->reo_cmd_timer, 478 ATH11K_SHADOW_CTRL_TIMER_INTERVAL, 479 dp->reo_cmd_ring.ring_id); 480 481 ret = ath11k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS, 482 0, 0, DP_REO_STATUS_RING_SIZE); 483 if (ret) { 484 ath11k_warn(ab, "failed to set up reo_status ring :%d\n", ret); 485 goto err; 486 } 487 488 /* When hash based routing of rx packet is enabled, 32 entries to map 489 * the hash values to the ring will be configured. 490 */ 491 ab->hw_params.hw_ops->reo_setup(ab); 492 493 return 0; 494 495 err: 496 ath11k_dp_srng_common_cleanup(ab); 497 498 return ret; 499 } 500 501 static void ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base *ab) 502 { 503 struct ath11k_dp *dp = &ab->dp; 504 struct hal_wbm_idle_scatter_list *slist = dp->scatter_list; 505 int i; 506 507 for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) { 508 if (!slist[i].vaddr) 509 continue; 510 511 dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX, 512 slist[i].vaddr, slist[i].paddr); 513 slist[i].vaddr = NULL; 514 } 515 } 516 517 static int ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base *ab, 518 int size, 519 u32 n_link_desc_bank, 520 u32 n_link_desc, 521 u32 last_bank_sz) 522 { 523 struct ath11k_dp *dp = &ab->dp; 524 struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks; 525 struct hal_wbm_idle_scatter_list *slist = dp->scatter_list; 526 u32 n_entries_per_buf; 527 int num_scatter_buf, scatter_idx; 528 struct hal_wbm_link_desc *scatter_buf; 529 int align_bytes, n_entries; 530 dma_addr_t paddr; 531 int rem_entries; 532 int i; 533 int ret = 0; 534 u32 end_offset; 535 536 n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE / 537 ath11k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK); 538 num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE); 539 540 if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX) 541 return -EINVAL; 542 543 for (i = 0; i < num_scatter_buf; i++) { 544 slist[i].vaddr = dma_alloc_coherent(ab->dev, 545 HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX, 546 &slist[i].paddr, GFP_KERNEL); 547 if (!slist[i].vaddr) { 548 ret = -ENOMEM; 549 goto err; 550 } 551 } 552 553 scatter_idx = 0; 554 scatter_buf = slist[scatter_idx].vaddr; 555 rem_entries = n_entries_per_buf; 556 557 for (i = 0; i < n_link_desc_bank; i++) { 558 align_bytes = link_desc_banks[i].vaddr - 559 link_desc_banks[i].vaddr_unaligned; 560 n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) / 561 HAL_LINK_DESC_SIZE; 562 paddr = link_desc_banks[i].paddr; 563 while (n_entries) { 564 ath11k_hal_set_link_desc_addr(scatter_buf, i, paddr); 565 n_entries--; 566 paddr += HAL_LINK_DESC_SIZE; 567 if (rem_entries) { 568 rem_entries--; 569 scatter_buf++; 570 continue; 571 } 572 573 rem_entries = n_entries_per_buf; 574 scatter_idx++; 575 scatter_buf = slist[scatter_idx].vaddr; 576 } 577 } 578 579 end_offset = (scatter_buf - slist[scatter_idx].vaddr) * 580 sizeof(struct hal_wbm_link_desc); 581 ath11k_hal_setup_link_idle_list(ab, slist, num_scatter_buf, 582 n_link_desc, end_offset); 583 584 return 0; 585 586 err: 587 ath11k_dp_scatter_idle_link_desc_cleanup(ab); 588 589 return ret; 590 } 591 592 static void 593 ath11k_dp_link_desc_bank_free(struct ath11k_base *ab, 594 struct dp_link_desc_bank *link_desc_banks) 595 { 596 int i; 597 598 for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) { 599 if (link_desc_banks[i].vaddr_unaligned) { 600 dma_free_coherent(ab->dev, 601 link_desc_banks[i].size, 602 link_desc_banks[i].vaddr_unaligned, 603 link_desc_banks[i].paddr_unaligned); 604 link_desc_banks[i].vaddr_unaligned = NULL; 605 } 606 } 607 } 608 609 static int ath11k_dp_link_desc_bank_alloc(struct ath11k_base *ab, 610 struct dp_link_desc_bank *desc_bank, 611 int n_link_desc_bank, 612 int last_bank_sz) 613 { 614 struct ath11k_dp *dp = &ab->dp; 615 int i; 616 int ret = 0; 617 int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH; 618 619 for (i = 0; i < n_link_desc_bank; i++) { 620 if (i == (n_link_desc_bank - 1) && last_bank_sz) 621 desc_sz = last_bank_sz; 622 623 desc_bank[i].vaddr_unaligned = 624 dma_alloc_coherent(ab->dev, desc_sz, 625 &desc_bank[i].paddr_unaligned, 626 GFP_KERNEL); 627 if (!desc_bank[i].vaddr_unaligned) { 628 ret = -ENOMEM; 629 goto err; 630 } 631 632 desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned, 633 HAL_LINK_DESC_ALIGN); 634 desc_bank[i].paddr = desc_bank[i].paddr_unaligned + 635 ((unsigned long)desc_bank[i].vaddr - 636 (unsigned long)desc_bank[i].vaddr_unaligned); 637 desc_bank[i].size = desc_sz; 638 } 639 640 return 0; 641 642 err: 643 ath11k_dp_link_desc_bank_free(ab, dp->link_desc_banks); 644 645 return ret; 646 } 647 648 void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab, 649 struct dp_link_desc_bank *desc_bank, 650 u32 ring_type, struct dp_srng *ring) 651 { 652 ath11k_dp_link_desc_bank_free(ab, desc_bank); 653 654 if (ring_type != HAL_RXDMA_MONITOR_DESC) { 655 ath11k_dp_srng_cleanup(ab, ring); 656 ath11k_dp_scatter_idle_link_desc_cleanup(ab); 657 } 658 } 659 660 static int ath11k_wbm_idle_ring_setup(struct ath11k_base *ab, u32 *n_link_desc) 661 { 662 struct ath11k_dp *dp = &ab->dp; 663 u32 n_mpdu_link_desc, n_mpdu_queue_desc; 664 u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc; 665 int ret = 0; 666 667 n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) / 668 HAL_NUM_MPDUS_PER_LINK_DESC; 669 670 n_mpdu_queue_desc = n_mpdu_link_desc / 671 HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC; 672 673 n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID * 674 DP_AVG_MSDUS_PER_FLOW) / 675 HAL_NUM_TX_MSDUS_PER_LINK_DESC; 676 677 n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX * 678 DP_AVG_MSDUS_PER_MPDU) / 679 HAL_NUM_RX_MSDUS_PER_LINK_DESC; 680 681 *n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc + 682 n_tx_msdu_link_desc + n_rx_msdu_link_desc; 683 684 if (*n_link_desc & (*n_link_desc - 1)) 685 *n_link_desc = 1 << fls(*n_link_desc); 686 687 ret = ath11k_dp_srng_setup(ab, &dp->wbm_idle_ring, 688 HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc); 689 if (ret) { 690 ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret); 691 return ret; 692 } 693 return ret; 694 } 695 696 int ath11k_dp_link_desc_setup(struct ath11k_base *ab, 697 struct dp_link_desc_bank *link_desc_banks, 698 u32 ring_type, struct hal_srng *srng, 699 u32 n_link_desc) 700 { 701 u32 tot_mem_sz; 702 u32 n_link_desc_bank, last_bank_sz; 703 u32 entry_sz, align_bytes, n_entries; 704 u32 paddr; 705 u32 *desc; 706 int i, ret; 707 708 tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE; 709 tot_mem_sz += HAL_LINK_DESC_ALIGN; 710 711 if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) { 712 n_link_desc_bank = 1; 713 last_bank_sz = tot_mem_sz; 714 } else { 715 n_link_desc_bank = tot_mem_sz / 716 (DP_LINK_DESC_ALLOC_SIZE_THRESH - 717 HAL_LINK_DESC_ALIGN); 718 last_bank_sz = tot_mem_sz % 719 (DP_LINK_DESC_ALLOC_SIZE_THRESH - 720 HAL_LINK_DESC_ALIGN); 721 722 if (last_bank_sz) 723 n_link_desc_bank += 1; 724 } 725 726 if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX) 727 return -EINVAL; 728 729 ret = ath11k_dp_link_desc_bank_alloc(ab, link_desc_banks, 730 n_link_desc_bank, last_bank_sz); 731 if (ret) 732 return ret; 733 734 /* Setup link desc idle list for HW internal usage */ 735 entry_sz = ath11k_hal_srng_get_entrysize(ab, ring_type); 736 tot_mem_sz = entry_sz * n_link_desc; 737 738 /* Setup scatter desc list when the total memory requirement is more */ 739 if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH && 740 ring_type != HAL_RXDMA_MONITOR_DESC) { 741 ret = ath11k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz, 742 n_link_desc_bank, 743 n_link_desc, 744 last_bank_sz); 745 if (ret) { 746 ath11k_warn(ab, "failed to setup scatting idle list descriptor :%d\n", 747 ret); 748 goto fail_desc_bank_free; 749 } 750 751 return 0; 752 } 753 754 spin_lock_bh(&srng->lock); 755 756 ath11k_hal_srng_access_begin(ab, srng); 757 758 for (i = 0; i < n_link_desc_bank; i++) { 759 align_bytes = link_desc_banks[i].vaddr - 760 link_desc_banks[i].vaddr_unaligned; 761 n_entries = (link_desc_banks[i].size - align_bytes) / 762 HAL_LINK_DESC_SIZE; 763 paddr = link_desc_banks[i].paddr; 764 while (n_entries && 765 (desc = ath11k_hal_srng_src_get_next_entry(ab, srng))) { 766 ath11k_hal_set_link_desc_addr((struct hal_wbm_link_desc *)desc, 767 i, paddr); 768 n_entries--; 769 paddr += HAL_LINK_DESC_SIZE; 770 } 771 } 772 773 ath11k_hal_srng_access_end(ab, srng); 774 775 spin_unlock_bh(&srng->lock); 776 777 return 0; 778 779 fail_desc_bank_free: 780 ath11k_dp_link_desc_bank_free(ab, link_desc_banks); 781 782 return ret; 783 } 784 785 int ath11k_dp_service_srng(struct ath11k_base *ab, 786 struct ath11k_ext_irq_grp *irq_grp, 787 int budget) 788 { 789 struct napi_struct *napi = &irq_grp->napi; 790 const struct ath11k_hw_hal_params *hal_params; 791 int grp_id = irq_grp->grp_id; 792 int work_done = 0; 793 int i, j; 794 int tot_work_done = 0; 795 796 for (i = 0; i < ab->hw_params.max_tx_ring; i++) { 797 if (BIT(ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num) & 798 ab->hw_params.ring_mask->tx[grp_id]) 799 ath11k_dp_tx_completion_handler(ab, i); 800 } 801 802 if (ab->hw_params.ring_mask->rx_err[grp_id]) { 803 work_done = ath11k_dp_process_rx_err(ab, napi, budget); 804 budget -= work_done; 805 tot_work_done += work_done; 806 if (budget <= 0) 807 goto done; 808 } 809 810 if (ab->hw_params.ring_mask->rx_wbm_rel[grp_id]) { 811 work_done = ath11k_dp_rx_process_wbm_err(ab, 812 napi, 813 budget); 814 budget -= work_done; 815 tot_work_done += work_done; 816 817 if (budget <= 0) 818 goto done; 819 } 820 821 if (ab->hw_params.ring_mask->rx[grp_id]) { 822 i = fls(ab->hw_params.ring_mask->rx[grp_id]) - 1; 823 work_done = ath11k_dp_process_rx(ab, i, napi, 824 budget); 825 budget -= work_done; 826 tot_work_done += work_done; 827 if (budget <= 0) 828 goto done; 829 } 830 831 if (ab->hw_params.ring_mask->rx_mon_status[grp_id]) { 832 for (i = 0; i < ab->num_radios; i++) { 833 for (j = 0; j < ab->hw_params.num_rxdma_per_pdev; j++) { 834 int id = i * ab->hw_params.num_rxdma_per_pdev + j; 835 836 if (ab->hw_params.ring_mask->rx_mon_status[grp_id] & 837 BIT(id)) { 838 work_done = 839 ath11k_dp_rx_process_mon_rings(ab, 840 id, 841 napi, budget); 842 budget -= work_done; 843 tot_work_done += work_done; 844 845 if (budget <= 0) 846 goto done; 847 } 848 } 849 } 850 } 851 852 if (ab->hw_params.ring_mask->reo_status[grp_id]) 853 ath11k_dp_process_reo_status(ab); 854 855 for (i = 0; i < ab->num_radios; i++) { 856 for (j = 0; j < ab->hw_params.num_rxdma_per_pdev; j++) { 857 int id = i * ab->hw_params.num_rxdma_per_pdev + j; 858 859 if (ab->hw_params.ring_mask->rxdma2host[grp_id] & BIT(id)) { 860 work_done = ath11k_dp_process_rxdma_err(ab, id, budget); 861 budget -= work_done; 862 tot_work_done += work_done; 863 } 864 865 if (budget <= 0) 866 goto done; 867 868 if (ab->hw_params.ring_mask->host2rxdma[grp_id] & BIT(id)) { 869 struct ath11k *ar = ath11k_ab_to_ar(ab, id); 870 struct ath11k_pdev_dp *dp = &ar->dp; 871 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 872 873 hal_params = ab->hw_params.hal_params; 874 ath11k_dp_rxbufs_replenish(ab, id, rx_ring, 0, 875 hal_params->rx_buf_rbm); 876 } 877 } 878 } 879 /* TODO: Implement handler for other interrupts */ 880 881 done: 882 return tot_work_done; 883 } 884 EXPORT_SYMBOL(ath11k_dp_service_srng); 885 886 void ath11k_dp_pdev_free(struct ath11k_base *ab) 887 { 888 struct ath11k *ar; 889 int i; 890 891 del_timer_sync(&ab->mon_reap_timer); 892 893 for (i = 0; i < ab->num_radios; i++) { 894 ar = ab->pdevs[i].ar; 895 ath11k_dp_rx_pdev_free(ab, i); 896 ath11k_debugfs_unregister(ar); 897 ath11k_dp_rx_pdev_mon_detach(ar); 898 } 899 } 900 901 void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab) 902 { 903 struct ath11k *ar; 904 struct ath11k_pdev_dp *dp; 905 int i; 906 int j; 907 908 for (i = 0; i < ab->num_radios; i++) { 909 ar = ab->pdevs[i].ar; 910 dp = &ar->dp; 911 dp->mac_id = i; 912 idr_init(&dp->rx_refill_buf_ring.bufs_idr); 913 spin_lock_init(&dp->rx_refill_buf_ring.idr_lock); 914 atomic_set(&dp->num_tx_pending, 0); 915 init_waitqueue_head(&dp->tx_empty_waitq); 916 for (j = 0; j < ab->hw_params.num_rxdma_per_pdev; j++) { 917 idr_init(&dp->rx_mon_status_refill_ring[j].bufs_idr); 918 spin_lock_init(&dp->rx_mon_status_refill_ring[j].idr_lock); 919 } 920 idr_init(&dp->rxdma_mon_buf_ring.bufs_idr); 921 spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock); 922 } 923 } 924 925 int ath11k_dp_pdev_alloc(struct ath11k_base *ab) 926 { 927 struct ath11k *ar; 928 int ret; 929 int i; 930 931 /* TODO:Per-pdev rx ring unlike tx ring which is mapped to different AC's */ 932 for (i = 0; i < ab->num_radios; i++) { 933 ar = ab->pdevs[i].ar; 934 ret = ath11k_dp_rx_pdev_alloc(ab, i); 935 if (ret) { 936 ath11k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n", 937 i); 938 goto err; 939 } 940 ret = ath11k_dp_rx_pdev_mon_attach(ar); 941 if (ret) { 942 ath11k_warn(ab, "failed to initialize mon pdev %d\n", 943 i); 944 goto err; 945 } 946 } 947 948 return 0; 949 950 err: 951 ath11k_dp_pdev_free(ab); 952 953 return ret; 954 } 955 956 int ath11k_dp_htt_connect(struct ath11k_dp *dp) 957 { 958 struct ath11k_htc_svc_conn_req conn_req; 959 struct ath11k_htc_svc_conn_resp conn_resp; 960 int status; 961 962 memset(&conn_req, 0, sizeof(conn_req)); 963 memset(&conn_resp, 0, sizeof(conn_resp)); 964 965 conn_req.ep_ops.ep_tx_complete = ath11k_dp_htt_htc_tx_complete; 966 conn_req.ep_ops.ep_rx_complete = ath11k_dp_htt_htc_t2h_msg_handler; 967 968 /* connect to control service */ 969 conn_req.service_id = ATH11K_HTC_SVC_ID_HTT_DATA_MSG; 970 971 status = ath11k_htc_connect_service(&dp->ab->htc, &conn_req, 972 &conn_resp); 973 974 if (status) 975 return status; 976 977 dp->eid = conn_resp.eid; 978 979 return 0; 980 } 981 982 static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif) 983 { 984 /* When v2_map_support is true:for STA mode, enable address 985 * search index, tcl uses ast_hash value in the descriptor. 986 * When v2_map_support is false: for STA mode, don't enable 987 * address search index. 988 */ 989 switch (arvif->vdev_type) { 990 case WMI_VDEV_TYPE_STA: 991 if (arvif->ar->ab->hw_params.htt_peer_map_v2) { 992 arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN; 993 arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX; 994 } else { 995 arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN; 996 arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT; 997 } 998 break; 999 case WMI_VDEV_TYPE_AP: 1000 case WMI_VDEV_TYPE_IBSS: 1001 arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN; 1002 arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT; 1003 break; 1004 case WMI_VDEV_TYPE_MONITOR: 1005 default: 1006 return; 1007 } 1008 } 1009 1010 void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif) 1011 { 1012 arvif->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 1) | 1013 FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID, 1014 arvif->vdev_id) | 1015 FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID, 1016 ar->pdev->pdev_id); 1017 1018 /* set HTT extension valid bit to 0 by default */ 1019 arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT; 1020 1021 ath11k_dp_update_vdev_search(arvif); 1022 } 1023 1024 static int ath11k_dp_tx_pending_cleanup(int buf_id, void *skb, void *ctx) 1025 { 1026 struct ath11k_base *ab = ctx; 1027 struct sk_buff *msdu = skb; 1028 1029 dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len, 1030 DMA_TO_DEVICE); 1031 1032 dev_kfree_skb_any(msdu); 1033 1034 return 0; 1035 } 1036 1037 void ath11k_dp_free(struct ath11k_base *ab) 1038 { 1039 struct ath11k_dp *dp = &ab->dp; 1040 int i; 1041 1042 ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks, 1043 HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring); 1044 1045 ath11k_dp_srng_common_cleanup(ab); 1046 1047 ath11k_dp_reo_cmd_list_cleanup(ab); 1048 1049 for (i = 0; i < ab->hw_params.max_tx_ring; i++) { 1050 spin_lock_bh(&dp->tx_ring[i].tx_idr_lock); 1051 idr_for_each(&dp->tx_ring[i].txbuf_idr, 1052 ath11k_dp_tx_pending_cleanup, ab); 1053 idr_destroy(&dp->tx_ring[i].txbuf_idr); 1054 spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock); 1055 kfree(dp->tx_ring[i].tx_status); 1056 } 1057 1058 /* Deinit any SOC level resource */ 1059 } 1060 1061 int ath11k_dp_alloc(struct ath11k_base *ab) 1062 { 1063 struct ath11k_dp *dp = &ab->dp; 1064 struct hal_srng *srng = NULL; 1065 size_t size = 0; 1066 u32 n_link_desc = 0; 1067 int ret; 1068 int i; 1069 1070 dp->ab = ab; 1071 1072 INIT_LIST_HEAD(&dp->reo_cmd_list); 1073 INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list); 1074 INIT_LIST_HEAD(&dp->dp_full_mon_mpdu_list); 1075 spin_lock_init(&dp->reo_cmd_lock); 1076 1077 dp->reo_cmd_cache_flush_count = 0; 1078 1079 ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc); 1080 if (ret) { 1081 ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret); 1082 return ret; 1083 } 1084 1085 srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id]; 1086 1087 ret = ath11k_dp_link_desc_setup(ab, dp->link_desc_banks, 1088 HAL_WBM_IDLE_LINK, srng, n_link_desc); 1089 if (ret) { 1090 ath11k_warn(ab, "failed to setup link desc: %d\n", ret); 1091 return ret; 1092 } 1093 1094 ret = ath11k_dp_srng_common_setup(ab); 1095 if (ret) 1096 goto fail_link_desc_cleanup; 1097 1098 size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE; 1099 1100 for (i = 0; i < ab->hw_params.max_tx_ring; i++) { 1101 idr_init(&dp->tx_ring[i].txbuf_idr); 1102 spin_lock_init(&dp->tx_ring[i].tx_idr_lock); 1103 dp->tx_ring[i].tcl_data_ring_id = i; 1104 1105 dp->tx_ring[i].tx_status_head = 0; 1106 dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1; 1107 dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL); 1108 if (!dp->tx_ring[i].tx_status) { 1109 ret = -ENOMEM; 1110 goto fail_cmn_srng_cleanup; 1111 } 1112 } 1113 1114 for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++) 1115 ath11k_hal_tx_set_dscp_tid_map(ab, i); 1116 1117 /* Init any SOC level resource for DP */ 1118 1119 return 0; 1120 1121 fail_cmn_srng_cleanup: 1122 ath11k_dp_srng_common_cleanup(ab); 1123 1124 fail_link_desc_cleanup: 1125 ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks, 1126 HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring); 1127 1128 return ret; 1129 } 1130 1131 static void ath11k_dp_shadow_timer_handler(struct timer_list *t) 1132 { 1133 struct ath11k_hp_update_timer *update_timer = from_timer(update_timer, 1134 t, timer); 1135 struct ath11k_base *ab = update_timer->ab; 1136 struct hal_srng *srng = &ab->hal.srng_list[update_timer->ring_id]; 1137 1138 spin_lock_bh(&srng->lock); 1139 1140 /* when the timer is fired, the handler checks whether there 1141 * are new TX happened. The handler updates HP only when there 1142 * are no TX operations during the timeout interval, and stop 1143 * the timer. Timer will be started again when TX happens again. 1144 */ 1145 if (update_timer->timer_tx_num != update_timer->tx_num) { 1146 update_timer->timer_tx_num = update_timer->tx_num; 1147 mod_timer(&update_timer->timer, jiffies + 1148 msecs_to_jiffies(update_timer->interval)); 1149 } else { 1150 update_timer->started = false; 1151 ath11k_hal_srng_shadow_update_hp_tp(ab, srng); 1152 } 1153 1154 spin_unlock_bh(&srng->lock); 1155 } 1156 1157 void ath11k_dp_shadow_start_timer(struct ath11k_base *ab, 1158 struct hal_srng *srng, 1159 struct ath11k_hp_update_timer *update_timer) 1160 { 1161 lockdep_assert_held(&srng->lock); 1162 1163 if (!ab->hw_params.supports_shadow_regs) 1164 return; 1165 1166 update_timer->tx_num++; 1167 1168 if (update_timer->started) 1169 return; 1170 1171 update_timer->started = true; 1172 update_timer->timer_tx_num = update_timer->tx_num; 1173 mod_timer(&update_timer->timer, jiffies + 1174 msecs_to_jiffies(update_timer->interval)); 1175 } 1176 1177 void ath11k_dp_shadow_stop_timer(struct ath11k_base *ab, 1178 struct ath11k_hp_update_timer *update_timer) 1179 { 1180 if (!ab->hw_params.supports_shadow_regs) 1181 return; 1182 1183 if (!update_timer->init) 1184 return; 1185 1186 del_timer_sync(&update_timer->timer); 1187 } 1188 1189 void ath11k_dp_shadow_init_timer(struct ath11k_base *ab, 1190 struct ath11k_hp_update_timer *update_timer, 1191 u32 interval, u32 ring_id) 1192 { 1193 if (!ab->hw_params.supports_shadow_regs) 1194 return; 1195 1196 update_timer->tx_num = 0; 1197 update_timer->timer_tx_num = 0; 1198 update_timer->ab = ab; 1199 update_timer->ring_id = ring_id; 1200 update_timer->interval = interval; 1201 update_timer->init = true; 1202 timer_setup(&update_timer->timer, 1203 ath11k_dp_shadow_timer_handler, 0); 1204 } 1205