1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 */ 5 6 #if defined(__FreeBSD__) 7 #include <asm/io.h> 8 #endif 9 #include <crypto/hash.h> 10 #include "core.h" 11 #include "dp_tx.h" 12 #include "hal_tx.h" 13 #include "hif.h" 14 #include "debug.h" 15 #include "dp_rx.h" 16 #include "peer.h" 17 18 static void ath11k_dp_htt_htc_tx_complete(struct ath11k_base *ab, 19 struct sk_buff *skb) 20 { 21 dev_kfree_skb_any(skb); 22 } 23 24 void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr) 25 { 26 struct ath11k_base *ab = ar->ab; 27 struct ath11k_peer *peer; 28 29 /* TODO: Any other peer specific DP cleanup */ 30 31 spin_lock_bh(&ab->base_lock); 32 peer = ath11k_peer_find(ab, vdev_id, addr); 33 if (!peer) { 34 ath11k_warn(ab, "failed to lookup peer %pM on vdev %d\n", 35 addr, vdev_id); 36 spin_unlock_bh(&ab->base_lock); 37 return; 38 } 39 40 ath11k_peer_rx_tid_cleanup(ar, peer); 41 crypto_free_shash(peer->tfm_mmic); 42 spin_unlock_bh(&ab->base_lock); 43 } 44 45 int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr) 46 { 47 struct ath11k_base *ab = ar->ab; 48 struct ath11k_peer *peer; 49 u32 reo_dest; 50 int ret = 0, tid; 51 52 /* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */ 53 reo_dest = ar->dp.mac_id + 1; 54 ret = ath11k_wmi_set_peer_param(ar, addr, vdev_id, 55 WMI_PEER_SET_DEFAULT_ROUTING, 56 DP_RX_HASH_ENABLE | (reo_dest << 1)); 57 58 if (ret) { 59 ath11k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n", 60 ret, addr, vdev_id); 61 return ret; 62 } 63 64 for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) { 65 ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, tid, 1, 0, 66 HAL_PN_TYPE_NONE); 67 if (ret) { 68 ath11k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n", 69 tid, ret); 70 goto peer_clean; 71 } 72 } 73 74 ret = ath11k_peer_rx_frag_setup(ar, addr, vdev_id); 75 if (ret) { 76 ath11k_warn(ab, "failed to setup rx defrag context\n"); 77 return ret; 78 } 79 80 /* TODO: Setup other peer specific resource used in data path */ 81 82 return 0; 83 84 peer_clean: 85 spin_lock_bh(&ab->base_lock); 86 87 peer = ath11k_peer_find(ab, vdev_id, addr); 88 if (!peer) { 89 ath11k_warn(ab, "failed to find the peer to del rx tid\n"); 90 spin_unlock_bh(&ab->base_lock); 91 return -ENOENT; 92 } 93 94 for (; tid >= 0; tid--) 95 ath11k_peer_rx_tid_delete(ar, peer, tid); 96 97 spin_unlock_bh(&ab->base_lock); 98 99 return ret; 100 } 101 102 void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring) 103 { 104 if (!ring->vaddr_unaligned) 105 return; 106 107 if (ring->cached) 108 kfree(ring->vaddr_unaligned); 109 else 110 dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned, 111 ring->paddr_unaligned); 112 113 ring->vaddr_unaligned = NULL; 114 } 115 116 static int ath11k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask) 117 { 118 int ext_group_num; 119 u8 mask = 1 << ring_num; 120 121 for (ext_group_num = 0; ext_group_num < ATH11K_EXT_IRQ_GRP_NUM_MAX; 122 ext_group_num++) { 123 if (mask & grp_mask[ext_group_num]) 124 return ext_group_num; 125 } 126 127 return -ENOENT; 128 } 129 130 static int ath11k_dp_srng_calculate_msi_group(struct ath11k_base *ab, 131 enum hal_ring_type type, int ring_num) 132 { 133 const u8 *grp_mask; 134 135 switch (type) { 136 case HAL_WBM2SW_RELEASE: 137 if (ring_num < 3) { 138 grp_mask = &ab->hw_params.ring_mask->tx[0]; 139 } else if (ring_num == 3) { 140 grp_mask = &ab->hw_params.ring_mask->rx_wbm_rel[0]; 141 ring_num = 0; 142 } else { 143 return -ENOENT; 144 } 145 break; 146 case HAL_REO_EXCEPTION: 147 grp_mask = &ab->hw_params.ring_mask->rx_err[0]; 148 break; 149 case HAL_REO_DST: 150 grp_mask = &ab->hw_params.ring_mask->rx[0]; 151 break; 152 case HAL_REO_STATUS: 153 grp_mask = &ab->hw_params.ring_mask->reo_status[0]; 154 break; 155 case HAL_RXDMA_MONITOR_STATUS: 156 case HAL_RXDMA_MONITOR_DST: 157 grp_mask = &ab->hw_params.ring_mask->rx_mon_status[0]; 158 break; 159 case HAL_RXDMA_DST: 160 grp_mask = &ab->hw_params.ring_mask->rxdma2host[0]; 161 break; 162 case HAL_RXDMA_BUF: 163 grp_mask = &ab->hw_params.ring_mask->host2rxdma[0]; 164 break; 165 case HAL_RXDMA_MONITOR_BUF: 166 case HAL_TCL_DATA: 167 case HAL_TCL_CMD: 168 case HAL_REO_CMD: 169 case HAL_SW2WBM_RELEASE: 170 case HAL_WBM_IDLE_LINK: 171 case HAL_TCL_STATUS: 172 case HAL_REO_REINJECT: 173 case HAL_CE_SRC: 174 case HAL_CE_DST: 175 case HAL_CE_DST_STATUS: 176 default: 177 return -ENOENT; 178 } 179 180 return ath11k_dp_srng_find_ring_in_mask(ring_num, grp_mask); 181 } 182 183 static void ath11k_dp_srng_msi_setup(struct ath11k_base *ab, 184 struct hal_srng_params *ring_params, 185 enum hal_ring_type type, int ring_num) 186 { 187 int msi_group_number, msi_data_count; 188 u32 msi_data_start, msi_irq_start, addr_lo, addr_hi; 189 int ret; 190 191 ret = ath11k_get_user_msi_vector(ab, "DP", 192 &msi_data_count, &msi_data_start, 193 &msi_irq_start); 194 if (ret) 195 return; 196 197 msi_group_number = ath11k_dp_srng_calculate_msi_group(ab, type, 198 ring_num); 199 if (msi_group_number < 0) { 200 ath11k_dbg(ab, ATH11K_DBG_PCI, 201 "ring not part of an ext_group; ring_type: %d,ring_num %d", 202 type, ring_num); 203 ring_params->msi_addr = 0; 204 ring_params->msi_data = 0; 205 return; 206 } 207 208 if (msi_group_number > msi_data_count) { 209 ath11k_dbg(ab, ATH11K_DBG_PCI, 210 "multiple msi_groups share one msi, msi_group_num %d", 211 msi_group_number); 212 } 213 214 ath11k_get_msi_address(ab, &addr_lo, &addr_hi); 215 216 ring_params->msi_addr = addr_lo; 217 ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32); 218 ring_params->msi_data = (msi_group_number % msi_data_count) 219 + msi_data_start; 220 ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR; 221 } 222 223 int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring, 224 enum hal_ring_type type, int ring_num, 225 int mac_id, int num_entries) 226 { 227 struct hal_srng_params params = { 0 }; 228 int entry_sz = ath11k_hal_srng_get_entrysize(ab, type); 229 int max_entries = ath11k_hal_srng_get_max_entries(ab, type); 230 int ret; 231 bool cached = false; 232 233 if (max_entries < 0 || entry_sz < 0) 234 return -EINVAL; 235 236 if (num_entries > max_entries) 237 num_entries = max_entries; 238 239 ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1; 240 241 if (ab->hw_params.alloc_cacheable_memory) { 242 /* Allocate the reo dst and tx completion rings from cacheable memory */ 243 switch (type) { 244 case HAL_REO_DST: 245 case HAL_WBM2SW_RELEASE: 246 cached = true; 247 break; 248 default: 249 cached = false; 250 } 251 252 if (cached) { 253 ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL); 254 ring->paddr_unaligned = virt_to_phys(ring->vaddr_unaligned); 255 } 256 } 257 258 if (!cached) 259 ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size, 260 &ring->paddr_unaligned, 261 GFP_KERNEL); 262 263 if (!ring->vaddr_unaligned) 264 return -ENOMEM; 265 266 ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN); 267 ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr - 268 (unsigned long)ring->vaddr_unaligned); 269 270 params.ring_base_vaddr = ring->vaddr; 271 params.ring_base_paddr = ring->paddr; 272 params.num_entries = num_entries; 273 ath11k_dp_srng_msi_setup(ab, ¶ms, type, ring_num + mac_id); 274 275 switch (type) { 276 case HAL_REO_DST: 277 params.intr_batch_cntr_thres_entries = 278 HAL_SRNG_INT_BATCH_THRESHOLD_RX; 279 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX; 280 break; 281 case HAL_RXDMA_BUF: 282 case HAL_RXDMA_MONITOR_BUF: 283 case HAL_RXDMA_MONITOR_STATUS: 284 params.low_threshold = num_entries >> 3; 285 params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN; 286 params.intr_batch_cntr_thres_entries = 0; 287 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX; 288 break; 289 case HAL_WBM2SW_RELEASE: 290 if (ring_num < 3) { 291 params.intr_batch_cntr_thres_entries = 292 HAL_SRNG_INT_BATCH_THRESHOLD_TX; 293 params.intr_timer_thres_us = 294 HAL_SRNG_INT_TIMER_THRESHOLD_TX; 295 break; 296 } 297 /* follow through when ring_num >= 3 */ 298 fallthrough; 299 case HAL_REO_EXCEPTION: 300 case HAL_REO_REINJECT: 301 case HAL_REO_CMD: 302 case HAL_REO_STATUS: 303 case HAL_TCL_DATA: 304 case HAL_TCL_CMD: 305 case HAL_TCL_STATUS: 306 case HAL_WBM_IDLE_LINK: 307 case HAL_SW2WBM_RELEASE: 308 case HAL_RXDMA_DST: 309 case HAL_RXDMA_MONITOR_DST: 310 case HAL_RXDMA_MONITOR_DESC: 311 params.intr_batch_cntr_thres_entries = 312 HAL_SRNG_INT_BATCH_THRESHOLD_OTHER; 313 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER; 314 break; 315 case HAL_RXDMA_DIR_BUF: 316 break; 317 default: 318 ath11k_warn(ab, "Not a valid ring type in dp :%d\n", type); 319 return -EINVAL; 320 } 321 322 if (cached) { 323 params.flags |= HAL_SRNG_FLAGS_CACHED; 324 ring->cached = 1; 325 } 326 327 ret = ath11k_hal_srng_setup(ab, type, ring_num, mac_id, ¶ms); 328 if (ret < 0) { 329 ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n", 330 ret, ring_num); 331 return ret; 332 } 333 334 ring->ring_id = ret; 335 336 return 0; 337 } 338 339 void ath11k_dp_stop_shadow_timers(struct ath11k_base *ab) 340 { 341 int i; 342 343 if (!ab->hw_params.supports_shadow_regs) 344 return; 345 346 for (i = 0; i < ab->hw_params.max_tx_ring; i++) 347 ath11k_dp_shadow_stop_timer(ab, &ab->dp.tx_ring_timer[i]); 348 349 ath11k_dp_shadow_stop_timer(ab, &ab->dp.reo_cmd_timer); 350 } 351 352 static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab) 353 { 354 struct ath11k_dp *dp = &ab->dp; 355 int i; 356 357 ath11k_dp_stop_shadow_timers(ab); 358 ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring); 359 ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring); 360 ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring); 361 for (i = 0; i < ab->hw_params.max_tx_ring; i++) { 362 ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring); 363 ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring); 364 } 365 ath11k_dp_srng_cleanup(ab, &dp->reo_reinject_ring); 366 ath11k_dp_srng_cleanup(ab, &dp->rx_rel_ring); 367 ath11k_dp_srng_cleanup(ab, &dp->reo_except_ring); 368 ath11k_dp_srng_cleanup(ab, &dp->reo_cmd_ring); 369 ath11k_dp_srng_cleanup(ab, &dp->reo_status_ring); 370 } 371 372 static int ath11k_dp_srng_common_setup(struct ath11k_base *ab) 373 { 374 struct ath11k_dp *dp = &ab->dp; 375 struct hal_srng *srng; 376 int i, ret; 377 378 ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring, 379 HAL_SW2WBM_RELEASE, 0, 0, 380 DP_WBM_RELEASE_RING_SIZE); 381 if (ret) { 382 ath11k_warn(ab, "failed to set up wbm2sw_release ring :%d\n", 383 ret); 384 goto err; 385 } 386 387 ret = ath11k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0, 388 DP_TCL_CMD_RING_SIZE); 389 if (ret) { 390 ath11k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret); 391 goto err; 392 } 393 394 ret = ath11k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS, 395 0, 0, DP_TCL_STATUS_RING_SIZE); 396 if (ret) { 397 ath11k_warn(ab, "failed to set up tcl_status ring :%d\n", ret); 398 goto err; 399 } 400 401 for (i = 0; i < ab->hw_params.max_tx_ring; i++) { 402 ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring, 403 HAL_TCL_DATA, i, 0, 404 DP_TCL_DATA_RING_SIZE); 405 if (ret) { 406 ath11k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n", 407 i, ret); 408 goto err; 409 } 410 411 ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring, 412 HAL_WBM2SW_RELEASE, i, 0, 413 DP_TX_COMP_RING_SIZE); 414 if (ret) { 415 ath11k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n", 416 i, ret); 417 goto err; 418 } 419 420 srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id]; 421 ath11k_hal_tx_init_data_ring(ab, srng); 422 423 ath11k_dp_shadow_init_timer(ab, &dp->tx_ring_timer[i], 424 ATH11K_SHADOW_DP_TIMER_INTERVAL, 425 dp->tx_ring[i].tcl_data_ring.ring_id); 426 } 427 428 ret = ath11k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT, 429 0, 0, DP_REO_REINJECT_RING_SIZE); 430 if (ret) { 431 ath11k_warn(ab, "failed to set up reo_reinject ring :%d\n", 432 ret); 433 goto err; 434 } 435 436 ret = ath11k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE, 437 3, 0, DP_RX_RELEASE_RING_SIZE); 438 if (ret) { 439 ath11k_warn(ab, "failed to set up rx_rel ring :%d\n", ret); 440 goto err; 441 } 442 443 ret = ath11k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION, 444 0, 0, DP_REO_EXCEPTION_RING_SIZE); 445 if (ret) { 446 ath11k_warn(ab, "failed to set up reo_exception ring :%d\n", 447 ret); 448 goto err; 449 } 450 451 ret = ath11k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD, 452 0, 0, DP_REO_CMD_RING_SIZE); 453 if (ret) { 454 ath11k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret); 455 goto err; 456 } 457 458 srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id]; 459 ath11k_hal_reo_init_cmd_ring(ab, srng); 460 461 ath11k_dp_shadow_init_timer(ab, &dp->reo_cmd_timer, 462 ATH11K_SHADOW_CTRL_TIMER_INTERVAL, 463 dp->reo_cmd_ring.ring_id); 464 465 ret = ath11k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS, 466 0, 0, DP_REO_STATUS_RING_SIZE); 467 if (ret) { 468 ath11k_warn(ab, "failed to set up reo_status ring :%d\n", ret); 469 goto err; 470 } 471 472 /* When hash based routing of rx packet is enabled, 32 entries to map 473 * the hash values to the ring will be configured. 474 */ 475 ab->hw_params.hw_ops->reo_setup(ab); 476 477 return 0; 478 479 err: 480 ath11k_dp_srng_common_cleanup(ab); 481 482 return ret; 483 } 484 485 static void ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base *ab) 486 { 487 struct ath11k_dp *dp = &ab->dp; 488 struct hal_wbm_idle_scatter_list *slist = dp->scatter_list; 489 int i; 490 491 for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) { 492 if (!slist[i].vaddr) 493 continue; 494 495 dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX, 496 slist[i].vaddr, slist[i].paddr); 497 slist[i].vaddr = NULL; 498 } 499 } 500 501 static int ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base *ab, 502 int size, 503 u32 n_link_desc_bank, 504 u32 n_link_desc, 505 u32 last_bank_sz) 506 { 507 struct ath11k_dp *dp = &ab->dp; 508 struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks; 509 struct hal_wbm_idle_scatter_list *slist = dp->scatter_list; 510 u32 n_entries_per_buf; 511 int num_scatter_buf, scatter_idx; 512 struct hal_wbm_link_desc *scatter_buf; 513 int align_bytes, n_entries; 514 dma_addr_t paddr; 515 int rem_entries; 516 int i; 517 int ret = 0; 518 u32 end_offset; 519 520 n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE / 521 ath11k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK); 522 num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE); 523 524 if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX) 525 return -EINVAL; 526 527 for (i = 0; i < num_scatter_buf; i++) { 528 slist[i].vaddr = dma_alloc_coherent(ab->dev, 529 HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX, 530 &slist[i].paddr, GFP_KERNEL); 531 if (!slist[i].vaddr) { 532 ret = -ENOMEM; 533 goto err; 534 } 535 } 536 537 scatter_idx = 0; 538 scatter_buf = slist[scatter_idx].vaddr; 539 rem_entries = n_entries_per_buf; 540 541 for (i = 0; i < n_link_desc_bank; i++) { 542 #if defined(__linux__) 543 align_bytes = link_desc_banks[i].vaddr - 544 link_desc_banks[i].vaddr_unaligned; 545 #elif defined(__FreeBSD__) 546 align_bytes = (uintptr_t)link_desc_banks[i].vaddr - 547 (uintptr_t)link_desc_banks[i].vaddr_unaligned; 548 #endif 549 n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) / 550 HAL_LINK_DESC_SIZE; 551 paddr = link_desc_banks[i].paddr; 552 while (n_entries) { 553 ath11k_hal_set_link_desc_addr(scatter_buf, i, paddr); 554 n_entries--; 555 paddr += HAL_LINK_DESC_SIZE; 556 if (rem_entries) { 557 rem_entries--; 558 scatter_buf++; 559 continue; 560 } 561 562 rem_entries = n_entries_per_buf; 563 scatter_idx++; 564 scatter_buf = slist[scatter_idx].vaddr; 565 } 566 } 567 568 end_offset = (scatter_buf - slist[scatter_idx].vaddr) * 569 sizeof(struct hal_wbm_link_desc); 570 ath11k_hal_setup_link_idle_list(ab, slist, num_scatter_buf, 571 n_link_desc, end_offset); 572 573 return 0; 574 575 err: 576 ath11k_dp_scatter_idle_link_desc_cleanup(ab); 577 578 return ret; 579 } 580 581 static void 582 ath11k_dp_link_desc_bank_free(struct ath11k_base *ab, 583 struct dp_link_desc_bank *link_desc_banks) 584 { 585 int i; 586 587 for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) { 588 if (link_desc_banks[i].vaddr_unaligned) { 589 dma_free_coherent(ab->dev, 590 link_desc_banks[i].size, 591 link_desc_banks[i].vaddr_unaligned, 592 link_desc_banks[i].paddr_unaligned); 593 link_desc_banks[i].vaddr_unaligned = NULL; 594 } 595 } 596 } 597 598 static int ath11k_dp_link_desc_bank_alloc(struct ath11k_base *ab, 599 struct dp_link_desc_bank *desc_bank, 600 int n_link_desc_bank, 601 int last_bank_sz) 602 { 603 struct ath11k_dp *dp = &ab->dp; 604 int i; 605 int ret = 0; 606 int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH; 607 608 for (i = 0; i < n_link_desc_bank; i++) { 609 if (i == (n_link_desc_bank - 1) && last_bank_sz) 610 desc_sz = last_bank_sz; 611 612 desc_bank[i].vaddr_unaligned = 613 dma_alloc_coherent(ab->dev, desc_sz, 614 &desc_bank[i].paddr_unaligned, 615 GFP_KERNEL); 616 if (!desc_bank[i].vaddr_unaligned) { 617 ret = -ENOMEM; 618 goto err; 619 } 620 621 desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned, 622 HAL_LINK_DESC_ALIGN); 623 desc_bank[i].paddr = desc_bank[i].paddr_unaligned + 624 ((unsigned long)desc_bank[i].vaddr - 625 (unsigned long)desc_bank[i].vaddr_unaligned); 626 desc_bank[i].size = desc_sz; 627 } 628 629 return 0; 630 631 err: 632 ath11k_dp_link_desc_bank_free(ab, dp->link_desc_banks); 633 634 return ret; 635 } 636 637 void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab, 638 struct dp_link_desc_bank *desc_bank, 639 u32 ring_type, struct dp_srng *ring) 640 { 641 ath11k_dp_link_desc_bank_free(ab, desc_bank); 642 643 if (ring_type != HAL_RXDMA_MONITOR_DESC) { 644 ath11k_dp_srng_cleanup(ab, ring); 645 ath11k_dp_scatter_idle_link_desc_cleanup(ab); 646 } 647 } 648 649 static int ath11k_wbm_idle_ring_setup(struct ath11k_base *ab, u32 *n_link_desc) 650 { 651 struct ath11k_dp *dp = &ab->dp; 652 u32 n_mpdu_link_desc, n_mpdu_queue_desc; 653 u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc; 654 int ret = 0; 655 656 n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) / 657 HAL_NUM_MPDUS_PER_LINK_DESC; 658 659 n_mpdu_queue_desc = n_mpdu_link_desc / 660 HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC; 661 662 n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID * 663 DP_AVG_MSDUS_PER_FLOW) / 664 HAL_NUM_TX_MSDUS_PER_LINK_DESC; 665 666 n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX * 667 DP_AVG_MSDUS_PER_MPDU) / 668 HAL_NUM_RX_MSDUS_PER_LINK_DESC; 669 670 *n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc + 671 n_tx_msdu_link_desc + n_rx_msdu_link_desc; 672 673 if (*n_link_desc & (*n_link_desc - 1)) 674 *n_link_desc = 1 << fls(*n_link_desc); 675 676 ret = ath11k_dp_srng_setup(ab, &dp->wbm_idle_ring, 677 HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc); 678 if (ret) { 679 ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret); 680 return ret; 681 } 682 return ret; 683 } 684 685 int ath11k_dp_link_desc_setup(struct ath11k_base *ab, 686 struct dp_link_desc_bank *link_desc_banks, 687 u32 ring_type, struct hal_srng *srng, 688 u32 n_link_desc) 689 { 690 u32 tot_mem_sz; 691 u32 n_link_desc_bank, last_bank_sz; 692 u32 entry_sz, align_bytes, n_entries; 693 u32 paddr; 694 u32 *desc; 695 int i, ret; 696 697 tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE; 698 tot_mem_sz += HAL_LINK_DESC_ALIGN; 699 700 if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) { 701 n_link_desc_bank = 1; 702 last_bank_sz = tot_mem_sz; 703 } else { 704 n_link_desc_bank = tot_mem_sz / 705 (DP_LINK_DESC_ALLOC_SIZE_THRESH - 706 HAL_LINK_DESC_ALIGN); 707 last_bank_sz = tot_mem_sz % 708 (DP_LINK_DESC_ALLOC_SIZE_THRESH - 709 HAL_LINK_DESC_ALIGN); 710 711 if (last_bank_sz) 712 n_link_desc_bank += 1; 713 } 714 715 if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX) 716 return -EINVAL; 717 718 ret = ath11k_dp_link_desc_bank_alloc(ab, link_desc_banks, 719 n_link_desc_bank, last_bank_sz); 720 if (ret) 721 return ret; 722 723 /* Setup link desc idle list for HW internal usage */ 724 entry_sz = ath11k_hal_srng_get_entrysize(ab, ring_type); 725 tot_mem_sz = entry_sz * n_link_desc; 726 727 /* Setup scatter desc list when the total memory requirement is more */ 728 if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH && 729 ring_type != HAL_RXDMA_MONITOR_DESC) { 730 ret = ath11k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz, 731 n_link_desc_bank, 732 n_link_desc, 733 last_bank_sz); 734 if (ret) { 735 ath11k_warn(ab, "failed to setup scatting idle list descriptor :%d\n", 736 ret); 737 goto fail_desc_bank_free; 738 } 739 740 return 0; 741 } 742 743 spin_lock_bh(&srng->lock); 744 745 ath11k_hal_srng_access_begin(ab, srng); 746 747 for (i = 0; i < n_link_desc_bank; i++) { 748 #if defined(__linux__) 749 align_bytes = link_desc_banks[i].vaddr - 750 link_desc_banks[i].vaddr_unaligned; 751 #elif defined(__FreeBSD__) 752 align_bytes = (uintptr_t)link_desc_banks[i].vaddr - 753 (uintptr_t)link_desc_banks[i].vaddr_unaligned; 754 #endif 755 n_entries = (link_desc_banks[i].size - align_bytes) / 756 HAL_LINK_DESC_SIZE; 757 paddr = link_desc_banks[i].paddr; 758 while (n_entries && 759 (desc = ath11k_hal_srng_src_get_next_entry(ab, srng))) { 760 ath11k_hal_set_link_desc_addr((struct hal_wbm_link_desc *)desc, 761 i, paddr); 762 n_entries--; 763 paddr += HAL_LINK_DESC_SIZE; 764 } 765 } 766 767 ath11k_hal_srng_access_end(ab, srng); 768 769 spin_unlock_bh(&srng->lock); 770 771 return 0; 772 773 fail_desc_bank_free: 774 ath11k_dp_link_desc_bank_free(ab, link_desc_banks); 775 776 return ret; 777 } 778 779 int ath11k_dp_service_srng(struct ath11k_base *ab, 780 struct ath11k_ext_irq_grp *irq_grp, 781 int budget) 782 { 783 struct napi_struct *napi = &irq_grp->napi; 784 const struct ath11k_hw_hal_params *hal_params; 785 int grp_id = irq_grp->grp_id; 786 int work_done = 0; 787 int i, j; 788 int tot_work_done = 0; 789 790 if (ab->hw_params.ring_mask->tx[grp_id]) { 791 i = __fls(ab->hw_params.ring_mask->tx[grp_id]); 792 ath11k_dp_tx_completion_handler(ab, i); 793 } 794 795 if (ab->hw_params.ring_mask->rx_err[grp_id]) { 796 work_done = ath11k_dp_process_rx_err(ab, napi, budget); 797 budget -= work_done; 798 tot_work_done += work_done; 799 if (budget <= 0) 800 goto done; 801 } 802 803 if (ab->hw_params.ring_mask->rx_wbm_rel[grp_id]) { 804 work_done = ath11k_dp_rx_process_wbm_err(ab, 805 napi, 806 budget); 807 budget -= work_done; 808 tot_work_done += work_done; 809 810 if (budget <= 0) 811 goto done; 812 } 813 814 if (ab->hw_params.ring_mask->rx[grp_id]) { 815 i = fls(ab->hw_params.ring_mask->rx[grp_id]) - 1; 816 work_done = ath11k_dp_process_rx(ab, i, napi, 817 budget); 818 budget -= work_done; 819 tot_work_done += work_done; 820 if (budget <= 0) 821 goto done; 822 } 823 824 if (ab->hw_params.ring_mask->rx_mon_status[grp_id]) { 825 for (i = 0; i < ab->num_radios; i++) { 826 for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) { 827 int id = i * ab->hw_params.num_rxmda_per_pdev + j; 828 829 if (ab->hw_params.ring_mask->rx_mon_status[grp_id] & 830 BIT(id)) { 831 work_done = 832 ath11k_dp_rx_process_mon_rings(ab, 833 id, 834 napi, budget); 835 budget -= work_done; 836 tot_work_done += work_done; 837 838 if (budget <= 0) 839 goto done; 840 } 841 } 842 } 843 } 844 845 if (ab->hw_params.ring_mask->reo_status[grp_id]) 846 ath11k_dp_process_reo_status(ab); 847 848 for (i = 0; i < ab->num_radios; i++) { 849 for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) { 850 int id = i * ab->hw_params.num_rxmda_per_pdev + j; 851 852 if (ab->hw_params.ring_mask->rxdma2host[grp_id] & BIT(id)) { 853 work_done = ath11k_dp_process_rxdma_err(ab, id, budget); 854 budget -= work_done; 855 tot_work_done += work_done; 856 } 857 858 if (budget <= 0) 859 goto done; 860 861 if (ab->hw_params.ring_mask->host2rxdma[grp_id] & BIT(id)) { 862 struct ath11k *ar = ath11k_ab_to_ar(ab, id); 863 struct ath11k_pdev_dp *dp = &ar->dp; 864 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 865 866 hal_params = ab->hw_params.hal_params; 867 ath11k_dp_rxbufs_replenish(ab, id, rx_ring, 0, 868 hal_params->rx_buf_rbm); 869 } 870 } 871 } 872 /* TODO: Implement handler for other interrupts */ 873 874 done: 875 return tot_work_done; 876 } 877 EXPORT_SYMBOL(ath11k_dp_service_srng); 878 879 void ath11k_dp_pdev_free(struct ath11k_base *ab) 880 { 881 struct ath11k *ar; 882 int i; 883 884 del_timer_sync(&ab->mon_reap_timer); 885 886 for (i = 0; i < ab->num_radios; i++) { 887 ar = ab->pdevs[i].ar; 888 ath11k_dp_rx_pdev_free(ab, i); 889 ath11k_debugfs_unregister(ar); 890 ath11k_dp_rx_pdev_mon_detach(ar); 891 } 892 } 893 894 void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab) 895 { 896 struct ath11k *ar; 897 struct ath11k_pdev_dp *dp; 898 int i; 899 int j; 900 901 for (i = 0; i < ab->num_radios; i++) { 902 ar = ab->pdevs[i].ar; 903 dp = &ar->dp; 904 dp->mac_id = i; 905 idr_init(&dp->rx_refill_buf_ring.bufs_idr); 906 spin_lock_init(&dp->rx_refill_buf_ring.idr_lock); 907 atomic_set(&dp->num_tx_pending, 0); 908 init_waitqueue_head(&dp->tx_empty_waitq); 909 for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) { 910 idr_init(&dp->rx_mon_status_refill_ring[j].bufs_idr); 911 spin_lock_init(&dp->rx_mon_status_refill_ring[j].idr_lock); 912 } 913 idr_init(&dp->rxdma_mon_buf_ring.bufs_idr); 914 spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock); 915 } 916 } 917 918 int ath11k_dp_pdev_alloc(struct ath11k_base *ab) 919 { 920 struct ath11k *ar; 921 int ret; 922 int i; 923 924 /* TODO:Per-pdev rx ring unlike tx ring which is mapped to different AC's */ 925 for (i = 0; i < ab->num_radios; i++) { 926 ar = ab->pdevs[i].ar; 927 ret = ath11k_dp_rx_pdev_alloc(ab, i); 928 if (ret) { 929 ath11k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n", 930 i); 931 goto err; 932 } 933 ret = ath11k_dp_rx_pdev_mon_attach(ar); 934 if (ret) { 935 ath11k_warn(ab, "failed to initialize mon pdev %d\n", 936 i); 937 goto err; 938 } 939 } 940 941 return 0; 942 943 err: 944 ath11k_dp_pdev_free(ab); 945 946 return ret; 947 } 948 949 int ath11k_dp_htt_connect(struct ath11k_dp *dp) 950 { 951 struct ath11k_htc_svc_conn_req conn_req; 952 struct ath11k_htc_svc_conn_resp conn_resp; 953 int status; 954 955 memset(&conn_req, 0, sizeof(conn_req)); 956 memset(&conn_resp, 0, sizeof(conn_resp)); 957 958 conn_req.ep_ops.ep_tx_complete = ath11k_dp_htt_htc_tx_complete; 959 conn_req.ep_ops.ep_rx_complete = ath11k_dp_htt_htc_t2h_msg_handler; 960 961 /* connect to control service */ 962 conn_req.service_id = ATH11K_HTC_SVC_ID_HTT_DATA_MSG; 963 964 status = ath11k_htc_connect_service(&dp->ab->htc, &conn_req, 965 &conn_resp); 966 967 if (status) 968 return status; 969 970 dp->eid = conn_resp.eid; 971 972 return 0; 973 } 974 975 static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif) 976 { 977 /* When v2_map_support is true:for STA mode, enable address 978 * search index, tcl uses ast_hash value in the descriptor. 979 * When v2_map_support is false: for STA mode, dont' enable 980 * address search index. 981 */ 982 switch (arvif->vdev_type) { 983 case WMI_VDEV_TYPE_STA: 984 if (arvif->ar->ab->hw_params.htt_peer_map_v2) { 985 arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN; 986 arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX; 987 } else { 988 arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN; 989 arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT; 990 } 991 break; 992 case WMI_VDEV_TYPE_AP: 993 case WMI_VDEV_TYPE_IBSS: 994 arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN; 995 arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT; 996 break; 997 case WMI_VDEV_TYPE_MONITOR: 998 default: 999 return; 1000 } 1001 } 1002 1003 void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif) 1004 { 1005 arvif->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 1) | 1006 FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID, 1007 arvif->vdev_id) | 1008 FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID, 1009 ar->pdev->pdev_id); 1010 1011 /* set HTT extension valid bit to 0 by default */ 1012 arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT; 1013 1014 ath11k_dp_update_vdev_search(arvif); 1015 } 1016 1017 static int ath11k_dp_tx_pending_cleanup(int buf_id, void *skb, void *ctx) 1018 { 1019 struct ath11k_base *ab = (struct ath11k_base *)ctx; 1020 struct sk_buff *msdu = skb; 1021 1022 dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len, 1023 DMA_TO_DEVICE); 1024 1025 dev_kfree_skb_any(msdu); 1026 1027 return 0; 1028 } 1029 1030 void ath11k_dp_free(struct ath11k_base *ab) 1031 { 1032 struct ath11k_dp *dp = &ab->dp; 1033 int i; 1034 1035 ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks, 1036 HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring); 1037 1038 ath11k_dp_srng_common_cleanup(ab); 1039 1040 ath11k_dp_reo_cmd_list_cleanup(ab); 1041 1042 for (i = 0; i < ab->hw_params.max_tx_ring; i++) { 1043 spin_lock_bh(&dp->tx_ring[i].tx_idr_lock); 1044 idr_for_each(&dp->tx_ring[i].txbuf_idr, 1045 ath11k_dp_tx_pending_cleanup, ab); 1046 idr_destroy(&dp->tx_ring[i].txbuf_idr); 1047 spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock); 1048 kfree(dp->tx_ring[i].tx_status); 1049 } 1050 1051 /* Deinit any SOC level resource */ 1052 } 1053 1054 int ath11k_dp_alloc(struct ath11k_base *ab) 1055 { 1056 struct ath11k_dp *dp = &ab->dp; 1057 struct hal_srng *srng = NULL; 1058 size_t size = 0; 1059 u32 n_link_desc = 0; 1060 int ret; 1061 int i; 1062 1063 dp->ab = ab; 1064 1065 INIT_LIST_HEAD(&dp->reo_cmd_list); 1066 INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list); 1067 INIT_LIST_HEAD(&dp->dp_full_mon_mpdu_list); 1068 spin_lock_init(&dp->reo_cmd_lock); 1069 1070 dp->reo_cmd_cache_flush_count = 0; 1071 1072 ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc); 1073 if (ret) { 1074 ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret); 1075 return ret; 1076 } 1077 1078 srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id]; 1079 1080 ret = ath11k_dp_link_desc_setup(ab, dp->link_desc_banks, 1081 HAL_WBM_IDLE_LINK, srng, n_link_desc); 1082 if (ret) { 1083 ath11k_warn(ab, "failed to setup link desc: %d\n", ret); 1084 return ret; 1085 } 1086 1087 ret = ath11k_dp_srng_common_setup(ab); 1088 if (ret) 1089 goto fail_link_desc_cleanup; 1090 1091 size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE; 1092 1093 for (i = 0; i < ab->hw_params.max_tx_ring; i++) { 1094 idr_init(&dp->tx_ring[i].txbuf_idr); 1095 spin_lock_init(&dp->tx_ring[i].tx_idr_lock); 1096 dp->tx_ring[i].tcl_data_ring_id = i; 1097 1098 dp->tx_ring[i].tx_status_head = 0; 1099 dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1; 1100 dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL); 1101 if (!dp->tx_ring[i].tx_status) { 1102 ret = -ENOMEM; 1103 goto fail_cmn_srng_cleanup; 1104 } 1105 } 1106 1107 for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++) 1108 ath11k_hal_tx_set_dscp_tid_map(ab, i); 1109 1110 /* Init any SOC level resource for DP */ 1111 1112 return 0; 1113 1114 fail_cmn_srng_cleanup: 1115 ath11k_dp_srng_common_cleanup(ab); 1116 1117 fail_link_desc_cleanup: 1118 ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks, 1119 HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring); 1120 1121 return ret; 1122 } 1123 1124 static void ath11k_dp_shadow_timer_handler(struct timer_list *t) 1125 { 1126 struct ath11k_hp_update_timer *update_timer = from_timer(update_timer, 1127 t, timer); 1128 struct ath11k_base *ab = update_timer->ab; 1129 struct hal_srng *srng = &ab->hal.srng_list[update_timer->ring_id]; 1130 1131 spin_lock_bh(&srng->lock); 1132 1133 /* when the timer is fired, the handler checks whether there 1134 * are new TX happened. The handler updates HP only when there 1135 * are no TX operations during the timeout interval, and stop 1136 * the timer. Timer will be started again when TX happens again. 1137 */ 1138 if (update_timer->timer_tx_num != update_timer->tx_num) { 1139 update_timer->timer_tx_num = update_timer->tx_num; 1140 mod_timer(&update_timer->timer, jiffies + 1141 msecs_to_jiffies(update_timer->interval)); 1142 } else { 1143 update_timer->started = false; 1144 ath11k_hal_srng_shadow_update_hp_tp(ab, srng); 1145 } 1146 1147 spin_unlock_bh(&srng->lock); 1148 } 1149 1150 void ath11k_dp_shadow_start_timer(struct ath11k_base *ab, 1151 struct hal_srng *srng, 1152 struct ath11k_hp_update_timer *update_timer) 1153 { 1154 lockdep_assert_held(&srng->lock); 1155 1156 if (!ab->hw_params.supports_shadow_regs) 1157 return; 1158 1159 update_timer->tx_num++; 1160 1161 if (update_timer->started) 1162 return; 1163 1164 update_timer->started = true; 1165 update_timer->timer_tx_num = update_timer->tx_num; 1166 mod_timer(&update_timer->timer, jiffies + 1167 msecs_to_jiffies(update_timer->interval)); 1168 } 1169 1170 void ath11k_dp_shadow_stop_timer(struct ath11k_base *ab, 1171 struct ath11k_hp_update_timer *update_timer) 1172 { 1173 if (!ab->hw_params.supports_shadow_regs) 1174 return; 1175 1176 if (!update_timer->init) 1177 return; 1178 1179 del_timer_sync(&update_timer->timer); 1180 } 1181 1182 void ath11k_dp_shadow_init_timer(struct ath11k_base *ab, 1183 struct ath11k_hp_update_timer *update_timer, 1184 u32 interval, u32 ring_id) 1185 { 1186 if (!ab->hw_params.supports_shadow_regs) 1187 return; 1188 1189 update_timer->tx_num = 0; 1190 update_timer->timer_tx_num = 0; 1191 update_timer->ab = ab; 1192 update_timer->ring_id = ring_id; 1193 update_timer->interval = interval; 1194 update_timer->init = true; 1195 timer_setup(&update_timer->timer, 1196 ath11k_dp_shadow_timer_handler, 0); 1197 } 1198