1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. 5 */ 6 7 #include <linux/ieee80211.h> 8 #include <linux/kernel.h> 9 #include <linux/skbuff.h> 10 #include <crypto/hash.h> 11 #include "core.h" 12 #include "debug.h" 13 #include "hw.h" 14 #include "dp_rx.h" 15 #include "dp_tx.h" 16 #include "peer.h" 17 #include "dp_mon.h" 18 #include "debugfs_htt_stats.h" 19 20 static int ath12k_dp_rx_tid_delete_handler(struct ath12k_base *ab, 21 struct ath12k_dp_rx_tid_rxq *rx_tid); 22 23 static size_t ath12k_dp_list_cut_nodes(struct list_head *list, 24 struct list_head *head, 25 size_t count) 26 { 27 struct list_head *cur; 28 struct ath12k_rx_desc_info *rx_desc; 29 size_t nodes = 0; 30 31 if (!count) { 32 INIT_LIST_HEAD(list); 33 goto out; 34 } 35 36 list_for_each(cur, head) { 37 if (!count) 38 break; 39 40 rx_desc = list_entry(cur, struct ath12k_rx_desc_info, list); 41 rx_desc->in_use = true; 42 43 count--; 44 nodes++; 45 } 46 47 list_cut_before(list, head, cur); 48 out: 49 return nodes; 50 } 51 52 static void ath12k_dp_rx_enqueue_free(struct ath12k_dp *dp, 53 struct list_head *used_list) 54 { 55 struct ath12k_rx_desc_info *rx_desc, *safe; 56 57 /* Reset the use flag */ 58 list_for_each_entry_safe(rx_desc, safe, used_list, list) 59 rx_desc->in_use = false; 60 61 spin_lock_bh(&dp->rx_desc_lock); 62 list_splice_tail(used_list, &dp->rx_desc_free_list); 63 spin_unlock_bh(&dp->rx_desc_lock); 64 } 65 66 /* Returns number of Rx buffers replenished */ 67 int ath12k_dp_rx_bufs_replenish(struct ath12k_dp *dp, 68 struct dp_rxdma_ring *rx_ring, 69 struct list_head *used_list, 70 int req_entries) 71 { 72 struct ath12k_base *ab = dp->ab; 73 struct ath12k_buffer_addr *desc; 74 struct hal_srng *srng; 75 struct sk_buff *skb; 76 int num_free; 77 int num_remain; 78 u32 cookie; 79 dma_addr_t paddr; 80 struct ath12k_rx_desc_info *rx_desc; 81 enum hal_rx_buf_return_buf_manager mgr = dp->hal->hal_params->rx_buf_rbm; 82 83 req_entries = min(req_entries, rx_ring->bufs_max); 84 85 srng = &dp->hal->srng_list[rx_ring->refill_buf_ring.ring_id]; 86 87 spin_lock_bh(&srng->lock); 88 89 ath12k_hal_srng_access_begin(ab, srng); 90 91 num_free = ath12k_hal_srng_src_num_free(ab, srng, true); 92 if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4)) 93 req_entries = num_free; 94 95 req_entries = min(num_free, req_entries); 96 num_remain = req_entries; 97 98 if (!num_remain) 99 goto out; 100 101 /* Get the descriptor from free list */ 102 if (list_empty(used_list)) { 103 spin_lock_bh(&dp->rx_desc_lock); 104 req_entries = ath12k_dp_list_cut_nodes(used_list, 105 &dp->rx_desc_free_list, 106 num_remain); 107 spin_unlock_bh(&dp->rx_desc_lock); 108 num_remain = req_entries; 109 } 110 111 while (num_remain > 0) { 112 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + 113 DP_RX_BUFFER_ALIGN_SIZE); 114 if (!skb) 115 break; 116 117 if (!IS_ALIGNED((unsigned long)skb->data, 118 DP_RX_BUFFER_ALIGN_SIZE)) { 119 skb_pull(skb, 120 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - 121 skb->data); 122 } 123 124 paddr = dma_map_single(dp->dev, skb->data, 125 skb->len + skb_tailroom(skb), 126 DMA_FROM_DEVICE); 127 if (dma_mapping_error(dp->dev, paddr)) 128 goto fail_free_skb; 129 130 rx_desc = list_first_entry_or_null(used_list, 131 struct ath12k_rx_desc_info, 132 list); 133 if (!rx_desc) 134 goto fail_dma_unmap; 135 136 rx_desc->skb = skb; 137 cookie = rx_desc->cookie; 138 139 desc = ath12k_hal_srng_src_get_next_entry(ab, srng); 140 if (!desc) 141 goto fail_dma_unmap; 142 143 list_del(&rx_desc->list); 144 ATH12K_SKB_RXCB(skb)->paddr = paddr; 145 146 num_remain--; 147 148 ath12k_hal_rx_buf_addr_info_set(dp->hal, desc, paddr, cookie, 149 mgr); 150 } 151 152 goto out; 153 154 fail_dma_unmap: 155 dma_unmap_single(dp->dev, paddr, skb->len + skb_tailroom(skb), 156 DMA_FROM_DEVICE); 157 fail_free_skb: 158 dev_kfree_skb_any(skb); 159 out: 160 ath12k_hal_srng_access_end(ab, srng); 161 162 if (!list_empty(used_list)) 163 ath12k_dp_rx_enqueue_free(dp, used_list); 164 165 spin_unlock_bh(&srng->lock); 166 167 return req_entries - num_remain; 168 } 169 EXPORT_SYMBOL(ath12k_dp_rx_bufs_replenish); 170 171 static int ath12k_dp_rxdma_mon_buf_ring_free(struct ath12k_base *ab, 172 struct dp_rxdma_mon_ring *rx_ring) 173 { 174 struct sk_buff *skb; 175 int buf_id; 176 177 spin_lock_bh(&rx_ring->idr_lock); 178 idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { 179 idr_remove(&rx_ring->bufs_idr, buf_id); 180 /* TODO: Understand where internal driver does this dma_unmap 181 * of rxdma_buffer. 182 */ 183 dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr, 184 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); 185 dev_kfree_skb_any(skb); 186 } 187 188 idr_destroy(&rx_ring->bufs_idr); 189 spin_unlock_bh(&rx_ring->idr_lock); 190 191 return 0; 192 } 193 194 static int ath12k_dp_rxdma_buf_free(struct ath12k_base *ab) 195 { 196 struct ath12k_dp *dp = ath12k_ab_to_dp(ab); 197 int i; 198 199 ath12k_dp_rxdma_mon_buf_ring_free(ab, &dp->rxdma_mon_buf_ring); 200 201 if (ab->hw_params->rxdma1_enable) 202 return 0; 203 204 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) 205 ath12k_dp_rxdma_mon_buf_ring_free(ab, 206 &dp->rx_mon_status_refill_ring[i]); 207 208 return 0; 209 } 210 211 static int ath12k_dp_rxdma_mon_ring_buf_setup(struct ath12k_base *ab, 212 struct dp_rxdma_mon_ring *rx_ring, 213 u32 ringtype) 214 { 215 int num_entries; 216 217 num_entries = rx_ring->refill_buf_ring.size / 218 ath12k_hal_srng_get_entrysize(ab, ringtype); 219 220 rx_ring->bufs_max = num_entries; 221 222 if (ringtype == HAL_RXDMA_MONITOR_STATUS) 223 ath12k_dp_mon_status_bufs_replenish(ab, rx_ring, 224 num_entries); 225 else 226 ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries); 227 228 return 0; 229 } 230 231 static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab, 232 struct dp_rxdma_ring *rx_ring) 233 { 234 LIST_HEAD(list); 235 236 rx_ring->bufs_max = rx_ring->refill_buf_ring.size / 237 ath12k_hal_srng_get_entrysize(ab, HAL_RXDMA_BUF); 238 239 ath12k_dp_rx_bufs_replenish(ath12k_ab_to_dp(ab), rx_ring, &list, 0); 240 241 return 0; 242 } 243 244 static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab) 245 { 246 struct ath12k_dp *dp = ath12k_ab_to_dp(ab); 247 struct dp_rxdma_mon_ring *mon_ring; 248 int ret, i; 249 250 ret = ath12k_dp_rxdma_ring_buf_setup(ab, &dp->rx_refill_buf_ring); 251 if (ret) { 252 ath12k_warn(ab, 253 "failed to setup HAL_RXDMA_BUF\n"); 254 return ret; 255 } 256 257 if (ab->hw_params->rxdma1_enable) { 258 ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab, 259 &dp->rxdma_mon_buf_ring, 260 HAL_RXDMA_MONITOR_BUF); 261 if (ret) 262 ath12k_warn(ab, 263 "failed to setup HAL_RXDMA_MONITOR_BUF\n"); 264 return ret; 265 } 266 267 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 268 mon_ring = &dp->rx_mon_status_refill_ring[i]; 269 ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab, mon_ring, 270 HAL_RXDMA_MONITOR_STATUS); 271 if (ret) { 272 ath12k_warn(ab, 273 "failed to setup HAL_RXDMA_MONITOR_STATUS\n"); 274 return ret; 275 } 276 } 277 278 return 0; 279 } 280 281 static void ath12k_dp_rx_pdev_srng_free(struct ath12k *ar) 282 { 283 struct ath12k_pdev_dp *dp = &ar->dp; 284 struct ath12k_base *ab = ar->ab; 285 int i; 286 287 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) 288 ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_dst_ring[i]); 289 } 290 291 void ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base *ab) 292 { 293 struct ath12k_dp *dp = ath12k_ab_to_dp(ab); 294 int i; 295 296 for (i = 0; i < DP_REO_DST_RING_MAX; i++) 297 ath12k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]); 298 } 299 300 int ath12k_dp_rx_pdev_reo_setup(struct ath12k_base *ab) 301 { 302 struct ath12k_dp *dp = ath12k_ab_to_dp(ab); 303 int ret; 304 int i; 305 306 for (i = 0; i < DP_REO_DST_RING_MAX; i++) { 307 ret = ath12k_dp_srng_setup(ab, &dp->reo_dst_ring[i], 308 HAL_REO_DST, i, 0, 309 DP_REO_DST_RING_SIZE); 310 if (ret) { 311 ath12k_warn(ab, "failed to setup reo_dst_ring\n"); 312 goto err_reo_cleanup; 313 } 314 } 315 316 return 0; 317 318 err_reo_cleanup: 319 ath12k_dp_rx_pdev_reo_cleanup(ab); 320 321 return ret; 322 } 323 324 static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar) 325 { 326 struct ath12k_pdev_dp *dp = &ar->dp; 327 struct ath12k_base *ab = ar->ab; 328 int i; 329 int ret; 330 u32 mac_id = dp->mac_id; 331 332 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 333 ret = ath12k_dp_srng_setup(ar->ab, 334 &dp->rxdma_mon_dst_ring[i], 335 HAL_RXDMA_MONITOR_DST, 336 0, mac_id + i, 337 DP_RXDMA_MONITOR_DST_RING_SIZE(ab)); 338 if (ret) { 339 ath12k_warn(ar->ab, 340 "failed to setup HAL_RXDMA_MONITOR_DST\n"); 341 return ret; 342 } 343 } 344 345 return 0; 346 } 347 348 void ath12k_dp_init_rx_tid_rxq(struct ath12k_dp_rx_tid_rxq *rx_tid_rxq, 349 struct ath12k_dp_rx_tid *rx_tid, 350 bool active) 351 { 352 rx_tid_rxq->tid = rx_tid->tid; 353 rx_tid_rxq->active = active; 354 rx_tid_rxq->qbuf = rx_tid->qbuf; 355 } 356 EXPORT_SYMBOL(ath12k_dp_init_rx_tid_rxq); 357 358 static void ath12k_dp_rx_tid_cleanup(struct ath12k_base *ab, 359 struct ath12k_reoq_buf *tid_qbuf) 360 { 361 if (tid_qbuf->vaddr) { 362 dma_unmap_single(ab->dev, tid_qbuf->paddr_aligned, 363 tid_qbuf->size, DMA_BIDIRECTIONAL); 364 kfree(tid_qbuf->vaddr); 365 tid_qbuf->vaddr = NULL; 366 } 367 } 368 369 void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab) 370 { 371 struct ath12k_dp *dp = ath12k_ab_to_dp(ab); 372 struct ath12k_dp_rx_reo_cmd *cmd, *tmp; 373 struct ath12k_dp_rx_reo_cache_flush_elem *cmd_cache, *tmp_cache; 374 struct dp_reo_update_rx_queue_elem *cmd_queue, *tmp_queue; 375 376 spin_lock_bh(&dp->reo_rxq_flush_lock); 377 list_for_each_entry_safe(cmd_queue, tmp_queue, &dp->reo_cmd_update_rx_queue_list, 378 list) { 379 list_del(&cmd_queue->list); 380 ath12k_dp_rx_tid_cleanup(ab, &cmd_queue->rx_tid.qbuf); 381 kfree(cmd_queue); 382 } 383 list_for_each_entry_safe(cmd_cache, tmp_cache, 384 &dp->reo_cmd_cache_flush_list, list) { 385 list_del(&cmd_cache->list); 386 dp->reo_cmd_cache_flush_count--; 387 ath12k_dp_rx_tid_cleanup(ab, &cmd_cache->data.qbuf); 388 kfree(cmd_cache); 389 } 390 spin_unlock_bh(&dp->reo_rxq_flush_lock); 391 392 spin_lock_bh(&dp->reo_cmd_lock); 393 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 394 list_del(&cmd->list); 395 ath12k_dp_rx_tid_cleanup(ab, &cmd->data.qbuf); 396 kfree(cmd); 397 } 398 spin_unlock_bh(&dp->reo_cmd_lock); 399 } 400 401 void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx, 402 enum hal_reo_cmd_status status) 403 { 404 struct ath12k_dp_rx_tid_rxq *rx_tid = ctx; 405 406 if (status != HAL_REO_CMD_SUCCESS) 407 ath12k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n", 408 rx_tid->tid, status); 409 410 ath12k_dp_rx_tid_cleanup(dp->ab, &rx_tid->qbuf); 411 } 412 EXPORT_SYMBOL(ath12k_dp_reo_cmd_free); 413 414 void ath12k_dp_rx_process_reo_cmd_update_rx_queue_list(struct ath12k_dp *dp) 415 { 416 struct ath12k_base *ab = dp->ab; 417 struct dp_reo_update_rx_queue_elem *elem, *tmp; 418 419 spin_lock_bh(&dp->reo_rxq_flush_lock); 420 421 list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_update_rx_queue_list, list) { 422 if (elem->rx_tid.active) 423 continue; 424 425 if (ath12k_dp_rx_tid_delete_handler(ab, &elem->rx_tid)) 426 break; 427 428 ath12k_dp_arch_peer_rx_tid_qref_reset(dp, 429 elem->is_ml_peer ? 430 elem->ml_peer_id : elem->peer_id, 431 elem->rx_tid.tid); 432 433 if (ab->hw_params->reoq_lut_support) 434 ath12k_hal_reo_shared_qaddr_cache_clear(ab); 435 436 list_del(&elem->list); 437 kfree(elem); 438 } 439 440 spin_unlock_bh(&dp->reo_rxq_flush_lock); 441 } 442 EXPORT_SYMBOL(ath12k_dp_rx_process_reo_cmd_update_rx_queue_list); 443 444 void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx, 445 enum hal_reo_cmd_status status) 446 { 447 struct ath12k_base *ab = dp->ab; 448 struct ath12k_dp_rx_tid_rxq *rx_tid = ctx; 449 struct ath12k_dp_rx_reo_cache_flush_elem *elem, *tmp; 450 451 if (status == HAL_REO_CMD_DRAIN) { 452 goto free_desc; 453 } else if (status != HAL_REO_CMD_SUCCESS) { 454 /* Shouldn't happen! Cleanup in case of other failure? */ 455 ath12k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n", 456 rx_tid->tid, status); 457 return; 458 } 459 460 /* Retry the HAL_REO_CMD_UPDATE_RX_QUEUE command for entries 461 * in the pending queue list marked TID as inactive 462 */ 463 spin_lock_bh(&dp->dp_lock); 464 ath12k_dp_rx_process_reo_cmd_update_rx_queue_list(dp); 465 spin_unlock_bh(&dp->dp_lock); 466 467 elem = kzalloc_obj(*elem, GFP_ATOMIC); 468 if (!elem) 469 goto free_desc; 470 471 elem->ts = jiffies; 472 memcpy(&elem->data, rx_tid, sizeof(*rx_tid)); 473 474 spin_lock_bh(&dp->reo_rxq_flush_lock); 475 list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list); 476 dp->reo_cmd_cache_flush_count++; 477 478 /* Flush and invalidate aged REO desc from HW cache */ 479 list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list, 480 list) { 481 if (dp->reo_cmd_cache_flush_count > ATH12K_DP_RX_REO_DESC_FREE_THRES || 482 time_after(jiffies, elem->ts + 483 msecs_to_jiffies(ATH12K_DP_RX_REO_DESC_FREE_TIMEOUT_MS))) { 484 /* The reo_cmd_cache_flush_list is used in only two contexts, 485 * one is in this function called from napi and the 486 * other in ath12k_dp_free during core destroy. 487 * If cache command sent is success, delete the element in 488 * the cache list. ath12k_dp_rx_reo_cmd_list_cleanup 489 * will be called during core destroy. 490 */ 491 492 if (ath12k_dp_arch_reo_cache_flush(dp, &elem->data)) 493 break; 494 495 list_del(&elem->list); 496 dp->reo_cmd_cache_flush_count--; 497 498 kfree(elem); 499 } 500 } 501 spin_unlock_bh(&dp->reo_rxq_flush_lock); 502 503 return; 504 free_desc: 505 ath12k_dp_rx_tid_cleanup(ab, &rx_tid->qbuf); 506 } 507 EXPORT_SYMBOL(ath12k_dp_rx_tid_del_func); 508 509 static int ath12k_dp_rx_tid_delete_handler(struct ath12k_base *ab, 510 struct ath12k_dp_rx_tid_rxq *rx_tid) 511 { 512 struct ath12k_dp *dp = ath12k_ab_to_dp(ab); 513 514 return ath12k_dp_arch_rx_tid_delete_handler(dp, rx_tid); 515 } 516 517 void ath12k_dp_mark_tid_as_inactive(struct ath12k_dp *dp, int peer_id, u8 tid) 518 { 519 struct dp_reo_update_rx_queue_elem *elem; 520 struct ath12k_dp_rx_tid_rxq *rx_tid; 521 522 spin_lock_bh(&dp->reo_rxq_flush_lock); 523 list_for_each_entry(elem, &dp->reo_cmd_update_rx_queue_list, list) { 524 if (elem->peer_id == peer_id) { 525 rx_tid = &elem->rx_tid; 526 if (rx_tid->tid == tid) { 527 rx_tid->active = false; 528 break; 529 } 530 } 531 } 532 spin_unlock_bh(&dp->reo_rxq_flush_lock); 533 } 534 EXPORT_SYMBOL(ath12k_dp_mark_tid_as_inactive); 535 536 void ath12k_dp_rx_peer_tid_cleanup(struct ath12k *ar, struct ath12k_dp_link_peer *peer) 537 { 538 struct ath12k_dp_rx_tid *rx_tid; 539 int i; 540 struct ath12k_base *ab = ar->ab; 541 struct ath12k_dp *dp = ath12k_ab_to_dp(ab); 542 543 lockdep_assert_held(&dp->dp_lock); 544 545 if (!peer->primary_link) 546 return; 547 548 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 549 rx_tid = &peer->dp_peer->rx_tid[i]; 550 551 ath12k_dp_arch_rx_peer_tid_delete(dp, peer, i); 552 ath12k_dp_arch_rx_frags_cleanup(dp, rx_tid, true); 553 554 spin_unlock_bh(&dp->dp_lock); 555 timer_delete_sync(&rx_tid->frag_timer); 556 spin_lock_bh(&dp->dp_lock); 557 } 558 } 559 560 static int ath12k_dp_prepare_reo_update_elem(struct ath12k_dp *dp, 561 struct ath12k_dp_link_peer *peer, 562 struct ath12k_dp_rx_tid *rx_tid) 563 { 564 struct dp_reo_update_rx_queue_elem *elem; 565 566 lockdep_assert_held(&dp->dp_lock); 567 568 elem = kzalloc_obj(*elem, GFP_ATOMIC); 569 if (!elem) 570 return -ENOMEM; 571 572 elem->peer_id = peer->peer_id; 573 elem->is_ml_peer = peer->mlo; 574 elem->ml_peer_id = peer->ml_id; 575 576 ath12k_dp_init_rx_tid_rxq(&elem->rx_tid, rx_tid, 577 (peer->rx_tid_active_bitmask & (1 << rx_tid->tid))); 578 579 spin_lock_bh(&dp->reo_rxq_flush_lock); 580 list_add_tail(&elem->list, &dp->reo_cmd_update_rx_queue_list); 581 spin_unlock_bh(&dp->reo_rxq_flush_lock); 582 583 return 0; 584 } 585 586 int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id, 587 u8 tid, u32 ba_win_sz, u16 ssn, 588 enum hal_pn_type pn_type) 589 { 590 struct ath12k_base *ab = ar->ab; 591 struct ath12k_dp *dp = ath12k_ab_to_dp(ab); 592 struct ath12k_dp_link_peer *peer; 593 struct ath12k_dp_rx_tid *rx_tid; 594 dma_addr_t paddr_aligned; 595 int ret; 596 597 spin_lock_bh(&dp->dp_lock); 598 599 peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, vdev_id, peer_mac); 600 if (!peer || !peer->dp_peer) { 601 spin_unlock_bh(&dp->dp_lock); 602 ath12k_warn(ab, "failed to find the peer to set up rx tid\n"); 603 return -ENOENT; 604 } 605 606 if (ab->hw_params->dp_primary_link_only && 607 !peer->primary_link) { 608 spin_unlock_bh(&dp->dp_lock); 609 return 0; 610 } 611 612 if (ab->hw_params->reoq_lut_support && 613 (!dp->reoq_lut.vaddr || !dp->ml_reoq_lut.vaddr)) { 614 spin_unlock_bh(&dp->dp_lock); 615 ath12k_warn(ab, "reo qref table is not setup\n"); 616 return -EINVAL; 617 } 618 619 if (peer->peer_id > DP_MAX_PEER_ID || tid > IEEE80211_NUM_TIDS) { 620 ath12k_warn(ab, "peer id of peer %d or tid %d doesn't allow reoq setup\n", 621 peer->peer_id, tid); 622 spin_unlock_bh(&dp->dp_lock); 623 return -EINVAL; 624 } 625 626 rx_tid = &peer->dp_peer->rx_tid[tid]; 627 /* Update the tid queue if it is already setup */ 628 if (peer->rx_tid_active_bitmask & (1 << tid)) { 629 ret = ath12k_dp_arch_peer_rx_tid_reo_update(dp, peer, rx_tid, 630 ba_win_sz, ssn, true); 631 spin_unlock_bh(&dp->dp_lock); 632 if (ret) { 633 ath12k_warn(ab, "failed to update reo for rx tid %d\n", tid); 634 return ret; 635 } 636 637 if (!ab->hw_params->reoq_lut_support) { 638 paddr_aligned = rx_tid->qbuf.paddr_aligned; 639 ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, 640 peer_mac, 641 paddr_aligned, tid, 642 1, ba_win_sz); 643 if (ret) { 644 ath12k_warn(ab, "failed to setup peer rx reorder queuefor tid %d: %d\n", 645 tid, ret); 646 return ret; 647 } 648 } 649 650 return 0; 651 } 652 653 rx_tid->tid = tid; 654 655 rx_tid->ba_win_sz = ba_win_sz; 656 657 ret = ath12k_dp_arch_rx_assign_reoq(dp, peer->dp_peer, rx_tid, ssn, pn_type); 658 if (ret) { 659 spin_unlock_bh(&dp->dp_lock); 660 ath12k_warn(ab, "failed to assign reoq buf for rx tid %u\n", tid); 661 return ret; 662 } 663 664 peer->rx_tid_active_bitmask |= (1 << tid); 665 666 /* Pre-allocate the update_rxq_list for the corresponding tid 667 * This will be used during the tid delete. The reason we are not 668 * allocating during tid delete is that, if any alloc fail in update_rxq_list 669 * we may not be able to delete the tid vaddr/paddr and may lead to leak 670 */ 671 ret = ath12k_dp_prepare_reo_update_elem(dp, peer, rx_tid); 672 if (ret) { 673 ath12k_warn(ab, "failed to alloc update_rxq_list for rx tid %u\n", tid); 674 ath12k_dp_rx_tid_cleanup(ab, &rx_tid->qbuf); 675 spin_unlock_bh(&dp->dp_lock); 676 return ret; 677 } 678 679 paddr_aligned = rx_tid->qbuf.paddr_aligned; 680 if (ab->hw_params->reoq_lut_support) { 681 /* Update the REO queue LUT at the corresponding peer id 682 * and tid with qaddr. 683 */ 684 if (peer->mlo) 685 ath12k_dp_arch_peer_rx_tid_qref_setup(dp, peer->ml_id, tid, 686 paddr_aligned); 687 else 688 ath12k_dp_arch_peer_rx_tid_qref_setup(dp, peer->peer_id, tid, 689 paddr_aligned); 690 691 spin_unlock_bh(&dp->dp_lock); 692 } else { 693 spin_unlock_bh(&dp->dp_lock); 694 ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, 695 paddr_aligned, tid, 1, 696 ba_win_sz); 697 } 698 699 return ret; 700 } 701 702 int ath12k_dp_rx_ampdu_start(struct ath12k *ar, 703 struct ieee80211_ampdu_params *params, 704 u8 link_id) 705 { 706 struct ath12k_base *ab = ar->ab; 707 struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta); 708 struct ath12k_link_sta *arsta; 709 int vdev_id; 710 int ret; 711 712 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 713 714 arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy, 715 ahsta->link[link_id]); 716 if (!arsta) 717 return -ENOLINK; 718 719 vdev_id = arsta->arvif->vdev_id; 720 721 ret = ath12k_dp_rx_peer_tid_setup(ar, arsta->addr, vdev_id, 722 params->tid, params->buf_size, 723 params->ssn, arsta->ahsta->pn_type); 724 if (ret) 725 ath12k_warn(ab, "failed to setup rx tid %d\n", ret); 726 727 return ret; 728 } 729 730 int ath12k_dp_rx_ampdu_stop(struct ath12k *ar, 731 struct ieee80211_ampdu_params *params, 732 u8 link_id) 733 { 734 struct ath12k_base *ab = ar->ab; 735 struct ath12k_dp *dp = ath12k_ab_to_dp(ab); 736 struct ath12k_dp_link_peer *peer; 737 struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta); 738 struct ath12k_link_sta *arsta; 739 int vdev_id; 740 bool active; 741 int ret; 742 743 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 744 745 arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy, 746 ahsta->link[link_id]); 747 if (!arsta) 748 return -ENOLINK; 749 750 vdev_id = arsta->arvif->vdev_id; 751 752 spin_lock_bh(&dp->dp_lock); 753 754 peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, vdev_id, arsta->addr); 755 if (!peer || !peer->dp_peer) { 756 spin_unlock_bh(&dp->dp_lock); 757 ath12k_warn(ab, "failed to find the peer to stop rx aggregation\n"); 758 return -ENOENT; 759 } 760 761 if (ab->hw_params->dp_primary_link_only && 762 !peer->primary_link) { 763 spin_unlock_bh(&dp->dp_lock); 764 return 0; 765 } 766 767 active = peer->rx_tid_active_bitmask & (1 << params->tid); 768 if (!active) { 769 spin_unlock_bh(&dp->dp_lock); 770 return 0; 771 } 772 773 ret = ath12k_dp_arch_peer_rx_tid_reo_update(dp, peer, peer->dp_peer->rx_tid, 774 1, 0, false); 775 spin_unlock_bh(&dp->dp_lock); 776 if (ret) { 777 ath12k_warn(ab, "failed to update reo for rx tid %d: %d\n", 778 params->tid, ret); 779 return ret; 780 } 781 782 return ret; 783 } 784 785 int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif, 786 const u8 *peer_addr, 787 enum set_key_cmd key_cmd, 788 struct ieee80211_key_conf *key) 789 { 790 struct ath12k *ar = arvif->ar; 791 struct ath12k_base *ab = ar->ab; 792 struct ath12k_dp *dp = ath12k_ab_to_dp(ab); 793 struct ath12k_hal_reo_cmd cmd = {}; 794 struct ath12k_dp_link_peer *peer; 795 struct ath12k_dp_rx_tid *rx_tid; 796 struct ath12k_dp_rx_tid_rxq rx_tid_rxq; 797 u8 tid; 798 int ret = 0; 799 800 /* NOTE: Enable PN/TSC replay check offload only for unicast frames. 801 * We use mac80211 PN/TSC replay check functionality for bcast/mcast 802 * for now. 803 */ 804 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) 805 return 0; 806 807 spin_lock_bh(&dp->dp_lock); 808 809 peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, arvif->vdev_id, 810 peer_addr); 811 if (!peer || !peer->dp_peer) { 812 spin_unlock_bh(&dp->dp_lock); 813 ath12k_warn(ab, "failed to find the peer %pM to configure pn replay detection\n", 814 peer_addr); 815 return -ENOENT; 816 } 817 818 for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) { 819 if (!(peer->rx_tid_active_bitmask & (1 << tid))) 820 continue; 821 822 rx_tid = &peer->dp_peer->rx_tid[tid]; 823 ath12k_dp_init_rx_tid_rxq(&rx_tid_rxq, rx_tid, 824 (peer->rx_tid_active_bitmask & (1 << tid))); 825 ath12k_dp_arch_setup_pn_check_reo_cmd(dp, &cmd, rx_tid, key->cipher, 826 key_cmd); 827 ret = ath12k_dp_arch_reo_cmd_send(dp, &rx_tid_rxq, 828 HAL_REO_CMD_UPDATE_RX_QUEUE, 829 &cmd, NULL); 830 if (ret) { 831 ath12k_warn(ab, "failed to configure rx tid %d queue of peer %pM for pn replay detection %d\n", 832 tid, peer_addr, ret); 833 break; 834 } 835 } 836 837 spin_unlock_bh(&dp->dp_lock); 838 839 return ret; 840 } 841 EXPORT_SYMBOL(ath12k_dp_rx_get_msdu_last_buf); 842 843 struct sk_buff *ath12k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list, 844 struct sk_buff *first) 845 { 846 struct sk_buff *skb; 847 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first); 848 849 if (!rxcb->is_continuation) 850 return first; 851 852 skb_queue_walk(msdu_list, skb) { 853 rxcb = ATH12K_SKB_RXCB(skb); 854 if (!rxcb->is_continuation) 855 return skb; 856 } 857 858 return NULL; 859 } 860 861 int ath12k_dp_rx_crypto_mic_len(struct ath12k_dp *dp, enum hal_encrypt_type enctype) 862 { 863 switch (enctype) { 864 case HAL_ENCRYPT_TYPE_OPEN: 865 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 866 case HAL_ENCRYPT_TYPE_TKIP_MIC: 867 return 0; 868 case HAL_ENCRYPT_TYPE_CCMP_128: 869 return IEEE80211_CCMP_MIC_LEN; 870 case HAL_ENCRYPT_TYPE_CCMP_256: 871 return IEEE80211_CCMP_256_MIC_LEN; 872 case HAL_ENCRYPT_TYPE_GCMP_128: 873 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 874 return IEEE80211_GCMP_MIC_LEN; 875 case HAL_ENCRYPT_TYPE_WEP_40: 876 case HAL_ENCRYPT_TYPE_WEP_104: 877 case HAL_ENCRYPT_TYPE_WEP_128: 878 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 879 case HAL_ENCRYPT_TYPE_WAPI: 880 break; 881 } 882 883 ath12k_warn(dp->ab, "unsupported encryption type %d for mic len\n", enctype); 884 return 0; 885 } 886 887 static int ath12k_dp_rx_crypto_param_len(struct ath12k_pdev_dp *dp_pdev, 888 enum hal_encrypt_type enctype) 889 { 890 switch (enctype) { 891 case HAL_ENCRYPT_TYPE_OPEN: 892 return 0; 893 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 894 case HAL_ENCRYPT_TYPE_TKIP_MIC: 895 return IEEE80211_TKIP_IV_LEN; 896 case HAL_ENCRYPT_TYPE_CCMP_128: 897 return IEEE80211_CCMP_HDR_LEN; 898 case HAL_ENCRYPT_TYPE_CCMP_256: 899 return IEEE80211_CCMP_256_HDR_LEN; 900 case HAL_ENCRYPT_TYPE_GCMP_128: 901 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 902 return IEEE80211_GCMP_HDR_LEN; 903 case HAL_ENCRYPT_TYPE_WEP_40: 904 case HAL_ENCRYPT_TYPE_WEP_104: 905 case HAL_ENCRYPT_TYPE_WEP_128: 906 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 907 case HAL_ENCRYPT_TYPE_WAPI: 908 break; 909 } 910 911 ath12k_warn(dp_pdev->dp->ab, "unsupported encryption type %d\n", enctype); 912 return 0; 913 } 914 915 static int ath12k_dp_rx_crypto_icv_len(struct ath12k_pdev_dp *dp_pdev, 916 enum hal_encrypt_type enctype) 917 { 918 switch (enctype) { 919 case HAL_ENCRYPT_TYPE_OPEN: 920 case HAL_ENCRYPT_TYPE_CCMP_128: 921 case HAL_ENCRYPT_TYPE_CCMP_256: 922 case HAL_ENCRYPT_TYPE_GCMP_128: 923 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 924 return 0; 925 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 926 case HAL_ENCRYPT_TYPE_TKIP_MIC: 927 return IEEE80211_TKIP_ICV_LEN; 928 case HAL_ENCRYPT_TYPE_WEP_40: 929 case HAL_ENCRYPT_TYPE_WEP_104: 930 case HAL_ENCRYPT_TYPE_WEP_128: 931 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 932 case HAL_ENCRYPT_TYPE_WAPI: 933 break; 934 } 935 936 ath12k_warn(dp_pdev->dp->ab, "unsupported encryption type %d\n", enctype); 937 return 0; 938 } 939 940 static void ath12k_dp_rx_h_undecap_nwifi(struct ath12k_pdev_dp *dp_pdev, 941 struct sk_buff *msdu, 942 enum hal_encrypt_type enctype, 943 struct hal_rx_desc_data *rx_info) 944 { 945 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 946 u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN]; 947 struct ieee80211_hdr *hdr; 948 size_t hdr_len; 949 u8 *crypto_hdr; 950 u16 qos_ctl; 951 952 /* pull decapped header */ 953 hdr = (struct ieee80211_hdr *)msdu->data; 954 hdr_len = ieee80211_hdrlen(hdr->frame_control); 955 skb_pull(msdu, hdr_len); 956 957 /* Rebuild qos header */ 958 hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 959 960 /* Reset the order bit as the HT_Control header is stripped */ 961 hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER)); 962 963 qos_ctl = rxcb->tid; 964 965 if (rx_info->mesh_ctrl_present) 966 qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT; 967 968 /* TODO: Add other QoS ctl fields when required */ 969 970 /* copy decap header before overwriting for reuse below */ 971 memcpy(decap_hdr, hdr, hdr_len); 972 973 /* Rebuild crypto header for mac80211 use */ 974 if (!(rx_info->rx_status->flag & RX_FLAG_IV_STRIPPED)) { 975 crypto_hdr = skb_push(msdu, 976 ath12k_dp_rx_crypto_param_len(dp_pdev, enctype)); 977 ath12k_dp_rx_desc_get_crypto_header(dp_pdev->dp->hal, 978 rxcb->rx_desc, crypto_hdr, 979 enctype); 980 } 981 982 memcpy(skb_push(msdu, 983 IEEE80211_QOS_CTL_LEN), &qos_ctl, 984 IEEE80211_QOS_CTL_LEN); 985 memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len); 986 } 987 988 static void ath12k_dp_rx_h_undecap_raw(struct ath12k_pdev_dp *dp_pdev, 989 struct sk_buff *msdu, 990 enum hal_encrypt_type enctype, 991 struct ieee80211_rx_status *status, 992 bool decrypted) 993 { 994 struct ath12k_dp *dp = dp_pdev->dp; 995 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 996 struct ieee80211_hdr *hdr; 997 size_t hdr_len; 998 size_t crypto_len; 999 1000 if (!rxcb->is_first_msdu || 1001 !(rxcb->is_first_msdu && rxcb->is_last_msdu)) { 1002 WARN_ON_ONCE(1); 1003 return; 1004 } 1005 1006 skb_trim(msdu, msdu->len - FCS_LEN); 1007 1008 if (!decrypted) 1009 return; 1010 1011 hdr = (void *)msdu->data; 1012 1013 /* Tail */ 1014 if (status->flag & RX_FLAG_IV_STRIPPED) { 1015 skb_trim(msdu, msdu->len - 1016 ath12k_dp_rx_crypto_mic_len(dp, enctype)); 1017 1018 skb_trim(msdu, msdu->len - 1019 ath12k_dp_rx_crypto_icv_len(dp_pdev, enctype)); 1020 } else { 1021 /* MIC */ 1022 if (status->flag & RX_FLAG_MIC_STRIPPED) 1023 skb_trim(msdu, msdu->len - 1024 ath12k_dp_rx_crypto_mic_len(dp, enctype)); 1025 1026 /* ICV */ 1027 if (status->flag & RX_FLAG_ICV_STRIPPED) 1028 skb_trim(msdu, msdu->len - 1029 ath12k_dp_rx_crypto_icv_len(dp_pdev, enctype)); 1030 } 1031 1032 /* MMIC */ 1033 if ((status->flag & RX_FLAG_MMIC_STRIPPED) && 1034 !ieee80211_has_morefrags(hdr->frame_control) && 1035 enctype == HAL_ENCRYPT_TYPE_TKIP_MIC) 1036 skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN); 1037 1038 /* Head */ 1039 if (status->flag & RX_FLAG_IV_STRIPPED) { 1040 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1041 crypto_len = ath12k_dp_rx_crypto_param_len(dp_pdev, enctype); 1042 1043 memmove(msdu->data + crypto_len, msdu->data, hdr_len); 1044 skb_pull(msdu, crypto_len); 1045 } 1046 } 1047 1048 static void ath12k_get_dot11_hdr_from_rx_desc(struct ath12k_pdev_dp *dp_pdev, 1049 struct sk_buff *msdu, 1050 struct ath12k_skb_rxcb *rxcb, 1051 enum hal_encrypt_type enctype, 1052 struct hal_rx_desc_data *rx_info) 1053 { 1054 struct hal_rx_desc *rx_desc = rxcb->rx_desc; 1055 struct ath12k_dp *dp = dp_pdev->dp; 1056 struct ath12k_hal *hal = dp->hal; 1057 size_t hdr_len, crypto_len; 1058 struct ieee80211_hdr hdr; 1059 __le16 qos_ctl; 1060 u8 *crypto_hdr; 1061 1062 ath12k_dp_rx_desc_get_dot11_hdr(hal, rx_desc, &hdr); 1063 hdr_len = ieee80211_hdrlen(hdr.frame_control); 1064 1065 if (!(rx_info->rx_status->flag & RX_FLAG_IV_STRIPPED)) { 1066 crypto_len = ath12k_dp_rx_crypto_param_len(dp_pdev, enctype); 1067 crypto_hdr = skb_push(msdu, crypto_len); 1068 ath12k_dp_rx_desc_get_crypto_header(dp->hal, rx_desc, crypto_hdr, 1069 enctype); 1070 } 1071 1072 skb_push(msdu, hdr_len); 1073 memcpy(msdu->data, &hdr, min(hdr_len, sizeof(hdr))); 1074 1075 if (rxcb->is_mcbc) 1076 rx_info->rx_status->flag &= ~RX_FLAG_PN_VALIDATED; 1077 1078 /* Add QOS header */ 1079 if (ieee80211_is_data_qos(hdr.frame_control)) { 1080 struct ieee80211_hdr *qos_ptr = (struct ieee80211_hdr *)msdu->data; 1081 1082 qos_ctl = cpu_to_le16(rxcb->tid & IEEE80211_QOS_CTL_TID_MASK); 1083 if (rx_info->mesh_ctrl_present) 1084 qos_ctl |= cpu_to_le16(IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT); 1085 1086 memcpy(ieee80211_get_qos_ctl(qos_ptr), &qos_ctl, IEEE80211_QOS_CTL_LEN); 1087 } 1088 } 1089 1090 static void ath12k_dp_rx_h_undecap_eth(struct ath12k_pdev_dp *dp_pdev, 1091 struct sk_buff *msdu, 1092 enum hal_encrypt_type enctype, 1093 struct hal_rx_desc_data *rx_info) 1094 { 1095 struct ieee80211_hdr *hdr; 1096 struct ethhdr *eth; 1097 u8 da[ETH_ALEN]; 1098 u8 sa[ETH_ALEN]; 1099 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 1100 struct ath12k_dp_rx_rfc1042_hdr rfc = {0xaa, 0xaa, 0x03, {0x00, 0x00, 0x00}}; 1101 1102 eth = (struct ethhdr *)msdu->data; 1103 ether_addr_copy(da, eth->h_dest); 1104 ether_addr_copy(sa, eth->h_source); 1105 rfc.snap_type = eth->h_proto; 1106 skb_pull(msdu, sizeof(*eth)); 1107 memcpy(skb_push(msdu, sizeof(rfc)), &rfc, 1108 sizeof(rfc)); 1109 ath12k_get_dot11_hdr_from_rx_desc(dp_pdev, msdu, rxcb, enctype, rx_info); 1110 1111 /* original 802.11 header has a different DA and in 1112 * case of 4addr it may also have different SA 1113 */ 1114 hdr = (struct ieee80211_hdr *)msdu->data; 1115 ether_addr_copy(ieee80211_get_DA(hdr), da); 1116 ether_addr_copy(ieee80211_get_SA(hdr), sa); 1117 } 1118 1119 void ath12k_dp_rx_h_undecap(struct ath12k_pdev_dp *dp_pdev, struct sk_buff *msdu, 1120 struct hal_rx_desc *rx_desc, 1121 enum hal_encrypt_type enctype, 1122 bool decrypted, 1123 struct hal_rx_desc_data *rx_info) 1124 { 1125 struct ethhdr *ehdr; 1126 1127 switch (rx_info->decap_type) { 1128 case DP_RX_DECAP_TYPE_NATIVE_WIFI: 1129 ath12k_dp_rx_h_undecap_nwifi(dp_pdev, msdu, enctype, rx_info); 1130 break; 1131 case DP_RX_DECAP_TYPE_RAW: 1132 ath12k_dp_rx_h_undecap_raw(dp_pdev, msdu, enctype, rx_info->rx_status, 1133 decrypted); 1134 break; 1135 case DP_RX_DECAP_TYPE_ETHERNET2_DIX: 1136 ehdr = (struct ethhdr *)msdu->data; 1137 1138 /* mac80211 allows fast path only for authorized STA */ 1139 if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) { 1140 ATH12K_SKB_RXCB(msdu)->is_eapol = true; 1141 ath12k_dp_rx_h_undecap_eth(dp_pdev, msdu, enctype, rx_info); 1142 break; 1143 } 1144 1145 /* PN for mcast packets will be validated in mac80211; 1146 * remove eth header and add 802.11 header. 1147 */ 1148 if (ATH12K_SKB_RXCB(msdu)->is_mcbc && decrypted) 1149 ath12k_dp_rx_h_undecap_eth(dp_pdev, msdu, enctype, rx_info); 1150 break; 1151 case DP_RX_DECAP_TYPE_8023: 1152 /* TODO: Handle undecap for these formats */ 1153 break; 1154 } 1155 } 1156 EXPORT_SYMBOL(ath12k_dp_rx_h_undecap); 1157 1158 struct ath12k_dp_link_peer * 1159 ath12k_dp_rx_h_find_link_peer(struct ath12k_pdev_dp *dp_pdev, struct sk_buff *msdu, 1160 struct hal_rx_desc_data *rx_info) 1161 { 1162 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 1163 struct ath12k_dp_link_peer *peer = NULL; 1164 struct ath12k_dp *dp = dp_pdev->dp; 1165 1166 lockdep_assert_held(&dp->dp_lock); 1167 1168 if (rxcb->peer_id) 1169 peer = ath12k_dp_link_peer_find_by_peerid(dp_pdev, rxcb->peer_id); 1170 1171 if (peer) 1172 return peer; 1173 1174 if (rx_info->addr2_present) 1175 peer = ath12k_dp_link_peer_find_by_addr(dp, rx_info->addr2); 1176 1177 return peer; 1178 } 1179 1180 static void ath12k_dp_rx_h_rate(struct ath12k_pdev_dp *dp_pdev, 1181 struct hal_rx_desc_data *rx_info) 1182 { 1183 struct ath12k_dp *dp = dp_pdev->dp; 1184 struct ieee80211_supported_band *sband; 1185 struct ieee80211_rx_status *rx_status = rx_info->rx_status; 1186 enum rx_msdu_start_pkt_type pkt_type = rx_info->pkt_type; 1187 u8 bw = rx_info->bw, sgi = rx_info->sgi; 1188 u8 rate_mcs = rx_info->rate_mcs, nss = rx_info->nss; 1189 bool is_cck; 1190 struct ath12k *ar; 1191 1192 switch (pkt_type) { 1193 case RX_MSDU_START_PKT_TYPE_11A: 1194 case RX_MSDU_START_PKT_TYPE_11B: 1195 ar = ath12k_pdev_dp_to_ar(dp_pdev); 1196 is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B); 1197 sband = &ar->mac.sbands[rx_status->band]; 1198 rx_status->rate_idx = ath12k_mac_hw_rate_to_idx(sband, rate_mcs, 1199 is_cck); 1200 break; 1201 case RX_MSDU_START_PKT_TYPE_11N: 1202 rx_status->encoding = RX_ENC_HT; 1203 if (rate_mcs > ATH12K_HT_MCS_MAX) { 1204 ath12k_warn(dp->ab, 1205 "Received with invalid mcs in HT mode %d\n", 1206 rate_mcs); 1207 break; 1208 } 1209 rx_status->rate_idx = rate_mcs + (8 * (nss - 1)); 1210 if (sgi) 1211 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 1212 rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw); 1213 break; 1214 case RX_MSDU_START_PKT_TYPE_11AC: 1215 rx_status->encoding = RX_ENC_VHT; 1216 rx_status->rate_idx = rate_mcs; 1217 if (rate_mcs > ATH12K_VHT_MCS_MAX) { 1218 ath12k_warn(dp->ab, 1219 "Received with invalid mcs in VHT mode %d\n", 1220 rate_mcs); 1221 break; 1222 } 1223 rx_status->nss = nss; 1224 if (sgi) 1225 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 1226 rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw); 1227 break; 1228 case RX_MSDU_START_PKT_TYPE_11AX: 1229 rx_status->rate_idx = rate_mcs; 1230 if (rate_mcs > ATH12K_HE_MCS_MAX) { 1231 ath12k_warn(dp->ab, 1232 "Received with invalid mcs in HE mode %d\n", 1233 rate_mcs); 1234 break; 1235 } 1236 rx_status->encoding = RX_ENC_HE; 1237 rx_status->nss = nss; 1238 rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi); 1239 rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw); 1240 break; 1241 case RX_MSDU_START_PKT_TYPE_11BE: 1242 rx_status->rate_idx = rate_mcs; 1243 1244 if (rate_mcs > ATH12K_EHT_MCS_MAX) { 1245 ath12k_warn(dp->ab, 1246 "Received with invalid mcs in EHT mode %d\n", 1247 rate_mcs); 1248 break; 1249 } 1250 1251 rx_status->encoding = RX_ENC_EHT; 1252 rx_status->nss = nss; 1253 rx_status->eht.gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(sgi); 1254 rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw); 1255 break; 1256 default: 1257 break; 1258 } 1259 } 1260 1261 void ath12k_dp_rx_h_ppdu(struct ath12k_pdev_dp *dp_pdev, 1262 struct hal_rx_desc_data *rx_info) 1263 { 1264 struct ieee80211_rx_status *rx_status = rx_info->rx_status; 1265 u8 channel_num; 1266 u32 center_freq, meta_data; 1267 struct ieee80211_channel *channel; 1268 1269 rx_status->freq = 0; 1270 rx_status->rate_idx = 0; 1271 rx_status->nss = 0; 1272 rx_status->encoding = RX_ENC_LEGACY; 1273 rx_status->bw = RATE_INFO_BW_20; 1274 rx_status->enc_flags = 0; 1275 1276 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 1277 1278 meta_data = rx_info->phy_meta_data; 1279 channel_num = meta_data; 1280 center_freq = meta_data >> 16; 1281 1282 rx_status->band = NUM_NL80211_BANDS; 1283 1284 if (center_freq >= ATH12K_MIN_6GHZ_FREQ && 1285 center_freq <= ATH12K_MAX_6GHZ_FREQ) { 1286 rx_status->band = NL80211_BAND_6GHZ; 1287 rx_status->freq = center_freq; 1288 } else if (channel_num >= 1 && channel_num <= 14) { 1289 rx_status->band = NL80211_BAND_2GHZ; 1290 } else if (channel_num >= 36 && channel_num <= 173) { 1291 rx_status->band = NL80211_BAND_5GHZ; 1292 } 1293 1294 if (unlikely(rx_status->band == NUM_NL80211_BANDS || 1295 !ath12k_pdev_dp_to_hw(dp_pdev)->wiphy->bands[rx_status->band])) { 1296 struct ath12k *ar = ath12k_pdev_dp_to_ar(dp_pdev); 1297 1298 ath12k_warn(ar->ab, "sband is NULL for status band %d channel_num %d center_freq %d pdev_id %d\n", 1299 rx_status->band, channel_num, center_freq, ar->pdev_idx); 1300 1301 spin_lock_bh(&ar->data_lock); 1302 channel = ar->rx_channel; 1303 if (channel) { 1304 rx_status->band = channel->band; 1305 channel_num = 1306 ieee80211_frequency_to_channel(channel->center_freq); 1307 rx_status->freq = ieee80211_channel_to_frequency(channel_num, 1308 rx_status->band); 1309 } else { 1310 ath12k_err(ar->ab, "unable to determine channel, band for rx packet"); 1311 } 1312 spin_unlock_bh(&ar->data_lock); 1313 goto h_rate; 1314 } 1315 1316 if (rx_status->band != NL80211_BAND_6GHZ) 1317 rx_status->freq = ieee80211_channel_to_frequency(channel_num, 1318 rx_status->band); 1319 1320 h_rate: 1321 ath12k_dp_rx_h_rate(dp_pdev, rx_info); 1322 } 1323 EXPORT_SYMBOL(ath12k_dp_rx_h_ppdu); 1324 1325 void ath12k_dp_rx_deliver_msdu(struct ath12k_pdev_dp *dp_pdev, struct napi_struct *napi, 1326 struct sk_buff *msdu, 1327 struct hal_rx_desc_data *rx_info) 1328 { 1329 struct ath12k_dp *dp = dp_pdev->dp; 1330 struct ieee80211_rx_status *rx_status; 1331 struct ieee80211_sta *pubsta; 1332 struct ath12k_dp_peer *peer; 1333 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 1334 struct ieee80211_rx_status *status = rx_info->rx_status; 1335 u8 decap = rx_info->decap_type; 1336 bool is_mcbc = rxcb->is_mcbc; 1337 bool is_eapol = rxcb->is_eapol; 1338 1339 peer = ath12k_dp_peer_find_by_peerid(dp_pdev, rx_info->peer_id); 1340 1341 pubsta = peer ? peer->sta : NULL; 1342 1343 if (pubsta && pubsta->valid_links) { 1344 status->link_valid = 1; 1345 status->link_id = peer->hw_links[rxcb->hw_link_id]; 1346 } 1347 1348 ath12k_dbg(dp->ab, ATH12K_DBG_DATA, 1349 "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", 1350 msdu, 1351 msdu->len, 1352 peer ? peer->addr : NULL, 1353 rxcb->tid, 1354 is_mcbc ? "mcast" : "ucast", 1355 rx_info->seq_no, 1356 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", 1357 (status->encoding == RX_ENC_HT) ? "ht" : "", 1358 (status->encoding == RX_ENC_VHT) ? "vht" : "", 1359 (status->encoding == RX_ENC_HE) ? "he" : "", 1360 (status->encoding == RX_ENC_EHT) ? "eht" : "", 1361 (status->bw == RATE_INFO_BW_40) ? "40" : "", 1362 (status->bw == RATE_INFO_BW_80) ? "80" : "", 1363 (status->bw == RATE_INFO_BW_160) ? "160" : "", 1364 (status->bw == RATE_INFO_BW_320) ? "320" : "", 1365 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", 1366 status->rate_idx, 1367 status->nss, 1368 status->freq, 1369 status->band, status->flag, 1370 !!(status->flag & RX_FLAG_FAILED_FCS_CRC), 1371 !!(status->flag & RX_FLAG_MMIC_ERROR), 1372 !!(status->flag & RX_FLAG_AMSDU_MORE)); 1373 1374 ath12k_dbg_dump(dp->ab, ATH12K_DBG_DP_RX, NULL, "dp rx msdu: ", 1375 msdu->data, msdu->len); 1376 1377 rx_status = IEEE80211_SKB_RXCB(msdu); 1378 *rx_status = *status; 1379 1380 /* TODO: trace rx packet */ 1381 1382 /* PN for multicast packets are not validate in HW, 1383 * so skip 802.3 rx path 1384 * Also, fast_rx expects the STA to be authorized, hence 1385 * eapol packets are sent in slow path. 1386 */ 1387 if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol && 1388 !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED)) 1389 rx_status->flag |= RX_FLAG_8023; 1390 1391 ieee80211_rx_napi(ath12k_pdev_dp_to_hw(dp_pdev), pubsta, msdu, napi); 1392 } 1393 EXPORT_SYMBOL(ath12k_dp_rx_deliver_msdu); 1394 1395 bool ath12k_dp_rx_check_nwifi_hdr_len_valid(struct ath12k_dp *dp, 1396 struct hal_rx_desc *rx_desc, 1397 struct sk_buff *msdu, 1398 struct hal_rx_desc_data *rx_info) 1399 { 1400 struct ieee80211_hdr *hdr; 1401 u32 hdr_len; 1402 1403 if (rx_info->decap_type != DP_RX_DECAP_TYPE_NATIVE_WIFI) 1404 return true; 1405 1406 hdr = (struct ieee80211_hdr *)msdu->data; 1407 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1408 1409 if ((likely(hdr_len <= DP_MAX_NWIFI_HDR_LEN))) 1410 return true; 1411 1412 dp->device_stats.invalid_rbm++; 1413 WARN_ON_ONCE(1); 1414 return false; 1415 } 1416 EXPORT_SYMBOL(ath12k_dp_rx_check_nwifi_hdr_len_valid); 1417 1418 static void ath12k_dp_rx_frag_timer(struct timer_list *timer) 1419 { 1420 struct ath12k_dp_rx_tid *rx_tid = timer_container_of(rx_tid, timer, 1421 frag_timer); 1422 1423 spin_lock_bh(&rx_tid->dp->dp_lock); 1424 if (rx_tid->last_frag_no && 1425 rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) { 1426 spin_unlock_bh(&rx_tid->dp->dp_lock); 1427 return; 1428 } 1429 ath12k_dp_arch_rx_frags_cleanup(rx_tid->dp, rx_tid, true); 1430 spin_unlock_bh(&rx_tid->dp->dp_lock); 1431 } 1432 1433 int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id) 1434 { 1435 struct ath12k_base *ab = ar->ab; 1436 struct crypto_shash *tfm; 1437 struct ath12k_dp_link_peer *peer; 1438 struct ath12k_dp_rx_tid *rx_tid; 1439 int i; 1440 struct ath12k_dp *dp = ath12k_ab_to_dp(ab); 1441 1442 tfm = crypto_alloc_shash("michael_mic", 0, 0); 1443 if (IS_ERR(tfm)) 1444 return PTR_ERR(tfm); 1445 1446 spin_lock_bh(&dp->dp_lock); 1447 1448 peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, vdev_id, peer_mac); 1449 if (!peer || !peer->dp_peer) { 1450 spin_unlock_bh(&dp->dp_lock); 1451 crypto_free_shash(tfm); 1452 ath12k_warn(ab, "failed to find the peer to set up fragment info\n"); 1453 return -ENOENT; 1454 } 1455 1456 if (!peer->primary_link) { 1457 spin_unlock_bh(&dp->dp_lock); 1458 crypto_free_shash(tfm); 1459 return 0; 1460 } 1461 1462 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 1463 rx_tid = &peer->dp_peer->rx_tid[i]; 1464 rx_tid->dp = dp; 1465 timer_setup(&rx_tid->frag_timer, ath12k_dp_rx_frag_timer, 0); 1466 skb_queue_head_init(&rx_tid->rx_frags); 1467 } 1468 1469 peer->dp_peer->tfm_mmic = tfm; 1470 peer->dp_peer->dp_setup_done = true; 1471 spin_unlock_bh(&dp->dp_lock); 1472 1473 return 0; 1474 } 1475 1476 int ath12k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key, 1477 struct ieee80211_hdr *hdr, u8 *data, 1478 size_t data_len, u8 *mic) 1479 { 1480 SHASH_DESC_ON_STACK(desc, tfm); 1481 u8 mic_hdr[16] = {}; 1482 u8 tid = 0; 1483 int ret; 1484 1485 if (!tfm) 1486 return -EINVAL; 1487 1488 desc->tfm = tfm; 1489 1490 ret = crypto_shash_setkey(tfm, key, 8); 1491 if (ret) 1492 goto out; 1493 1494 ret = crypto_shash_init(desc); 1495 if (ret) 1496 goto out; 1497 1498 /* TKIP MIC header */ 1499 memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN); 1500 memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN); 1501 if (ieee80211_is_data_qos(hdr->frame_control)) 1502 tid = ieee80211_get_tid(hdr); 1503 mic_hdr[12] = tid; 1504 1505 ret = crypto_shash_update(desc, mic_hdr, 16); 1506 if (ret) 1507 goto out; 1508 ret = crypto_shash_update(desc, data, data_len); 1509 if (ret) 1510 goto out; 1511 ret = crypto_shash_final(desc, mic); 1512 out: 1513 shash_desc_zero(desc); 1514 return ret; 1515 } 1516 EXPORT_SYMBOL(ath12k_dp_rx_h_michael_mic); 1517 1518 void ath12k_dp_rx_h_undecap_frag(struct ath12k_pdev_dp *dp_pdev, struct sk_buff *msdu, 1519 enum hal_encrypt_type enctype, u32 flags) 1520 { 1521 struct ath12k_dp *dp = dp_pdev->dp; 1522 struct ieee80211_hdr *hdr; 1523 size_t hdr_len; 1524 size_t crypto_len; 1525 u32 hal_rx_desc_sz = dp->ab->hal.hal_desc_sz; 1526 1527 if (!flags) 1528 return; 1529 1530 hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz); 1531 1532 if (flags & RX_FLAG_MIC_STRIPPED) 1533 skb_trim(msdu, msdu->len - 1534 ath12k_dp_rx_crypto_mic_len(dp, enctype)); 1535 1536 if (flags & RX_FLAG_ICV_STRIPPED) 1537 skb_trim(msdu, msdu->len - 1538 ath12k_dp_rx_crypto_icv_len(dp_pdev, enctype)); 1539 1540 if (flags & RX_FLAG_IV_STRIPPED) { 1541 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1542 crypto_len = ath12k_dp_rx_crypto_param_len(dp_pdev, enctype); 1543 1544 memmove(msdu->data + hal_rx_desc_sz + crypto_len, 1545 msdu->data + hal_rx_desc_sz, hdr_len); 1546 skb_pull(msdu, crypto_len); 1547 } 1548 } 1549 EXPORT_SYMBOL(ath12k_dp_rx_h_undecap_frag); 1550 1551 static int ath12k_dp_rx_h_cmp_frags(struct ath12k_hal *hal, 1552 struct sk_buff *a, struct sk_buff *b) 1553 { 1554 int frag1, frag2; 1555 1556 frag1 = ath12k_dp_rx_h_frag_no(hal, a); 1557 frag2 = ath12k_dp_rx_h_frag_no(hal, b); 1558 1559 return frag1 - frag2; 1560 } 1561 1562 void ath12k_dp_rx_h_sort_frags(struct ath12k_hal *hal, 1563 struct sk_buff_head *frag_list, 1564 struct sk_buff *cur_frag) 1565 { 1566 struct sk_buff *skb; 1567 int cmp; 1568 1569 skb_queue_walk(frag_list, skb) { 1570 cmp = ath12k_dp_rx_h_cmp_frags(hal, skb, cur_frag); 1571 if (cmp < 0) 1572 continue; 1573 __skb_queue_before(frag_list, skb, cur_frag); 1574 return; 1575 } 1576 __skb_queue_tail(frag_list, cur_frag); 1577 } 1578 EXPORT_SYMBOL(ath12k_dp_rx_h_sort_frags); 1579 1580 u64 ath12k_dp_rx_h_get_pn(struct ath12k_dp *dp, struct sk_buff *skb) 1581 { 1582 struct ieee80211_hdr *hdr; 1583 u64 pn = 0; 1584 u8 *ehdr; 1585 u32 hal_rx_desc_sz = dp->ab->hal.hal_desc_sz; 1586 1587 hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz); 1588 ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control); 1589 1590 pn = ehdr[0]; 1591 pn |= (u64)ehdr[1] << 8; 1592 pn |= (u64)ehdr[4] << 16; 1593 pn |= (u64)ehdr[5] << 24; 1594 pn |= (u64)ehdr[6] << 32; 1595 pn |= (u64)ehdr[7] << 40; 1596 1597 return pn; 1598 } 1599 EXPORT_SYMBOL(ath12k_dp_rx_h_get_pn); 1600 1601 void ath12k_dp_rx_free(struct ath12k_base *ab) 1602 { 1603 struct ath12k_dp *dp = ath12k_ab_to_dp(ab); 1604 struct dp_srng *srng; 1605 int i; 1606 1607 ath12k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring); 1608 1609 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 1610 if (ab->hw_params->rx_mac_buf_ring) 1611 ath12k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]); 1612 if (!ab->hw_params->rxdma1_enable) { 1613 srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring; 1614 ath12k_dp_srng_cleanup(ab, srng); 1615 } 1616 } 1617 1618 for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) 1619 ath12k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]); 1620 1621 ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring); 1622 1623 ath12k_dp_rxdma_buf_free(ab); 1624 } 1625 1626 void ath12k_dp_rx_pdev_free(struct ath12k_base *ab, int mac_id) 1627 { 1628 struct ath12k *ar = ab->pdevs[mac_id].ar; 1629 1630 ath12k_dp_rx_pdev_srng_free(ar); 1631 } 1632 1633 int ath12k_dp_rx_htt_setup(struct ath12k_base *ab) 1634 { 1635 struct ath12k_dp *dp = ath12k_ab_to_dp(ab); 1636 u32 ring_id; 1637 int i, ret; 1638 1639 /* TODO: Need to verify the HTT setup for QCN9224 */ 1640 ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; 1641 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 0, HAL_RXDMA_BUF); 1642 if (ret) { 1643 ath12k_warn(ab, "failed to configure rx_refill_buf_ring %d\n", 1644 ret); 1645 return ret; 1646 } 1647 1648 if (ab->hw_params->rx_mac_buf_ring) { 1649 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 1650 ring_id = dp->rx_mac_buf_ring[i].ring_id; 1651 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 1652 i, HAL_RXDMA_BUF); 1653 if (ret) { 1654 ath12k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n", 1655 i, ret); 1656 return ret; 1657 } 1658 } 1659 } 1660 1661 for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) { 1662 ring_id = dp->rxdma_err_dst_ring[i].ring_id; 1663 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 1664 i, HAL_RXDMA_DST); 1665 if (ret) { 1666 ath12k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n", 1667 i, ret); 1668 return ret; 1669 } 1670 } 1671 1672 if (ab->hw_params->rxdma1_enable) { 1673 ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id; 1674 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 1675 0, HAL_RXDMA_MONITOR_BUF); 1676 if (ret) { 1677 ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n", 1678 ret); 1679 return ret; 1680 } 1681 } else { 1682 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 1683 ring_id = 1684 dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; 1685 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, i, 1686 HAL_RXDMA_MONITOR_STATUS); 1687 if (ret) { 1688 ath12k_warn(ab, 1689 "failed to configure mon_status_refill_ring%d %d\n", 1690 i, ret); 1691 return ret; 1692 } 1693 } 1694 } 1695 1696 ret = ab->hw_params->hw_ops->rxdma_ring_sel_config(ab); 1697 if (ret) { 1698 ath12k_warn(ab, "failed to setup rxdma ring selection config\n"); 1699 return ret; 1700 } 1701 1702 return 0; 1703 } 1704 1705 int ath12k_dp_rx_alloc(struct ath12k_base *ab) 1706 { 1707 struct ath12k_dp *dp = ath12k_ab_to_dp(ab); 1708 struct dp_srng *srng; 1709 int i, ret; 1710 1711 idr_init(&dp->rxdma_mon_buf_ring.bufs_idr); 1712 spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock); 1713 1714 ret = ath12k_dp_srng_setup(ab, 1715 &dp->rx_refill_buf_ring.refill_buf_ring, 1716 HAL_RXDMA_BUF, 0, 0, 1717 DP_RXDMA_BUF_RING_SIZE); 1718 if (ret) { 1719 ath12k_warn(ab, "failed to setup rx_refill_buf_ring\n"); 1720 return ret; 1721 } 1722 1723 if (ab->hw_params->rx_mac_buf_ring) { 1724 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 1725 ret = ath12k_dp_srng_setup(ab, 1726 &dp->rx_mac_buf_ring[i], 1727 HAL_RXDMA_BUF, 1, 1728 i, DP_RX_MAC_BUF_RING_SIZE); 1729 if (ret) { 1730 ath12k_warn(ab, "failed to setup rx_mac_buf_ring %d\n", 1731 i); 1732 return ret; 1733 } 1734 } 1735 } 1736 1737 for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) { 1738 ret = ath12k_dp_srng_setup(ab, &dp->rxdma_err_dst_ring[i], 1739 HAL_RXDMA_DST, 0, i, 1740 DP_RXDMA_ERR_DST_RING_SIZE); 1741 if (ret) { 1742 ath12k_warn(ab, "failed to setup rxdma_err_dst_ring %d\n", i); 1743 return ret; 1744 } 1745 } 1746 1747 if (ab->hw_params->rxdma1_enable) { 1748 ret = ath12k_dp_srng_setup(ab, 1749 &dp->rxdma_mon_buf_ring.refill_buf_ring, 1750 HAL_RXDMA_MONITOR_BUF, 0, 0, 1751 DP_RXDMA_MONITOR_BUF_RING_SIZE(ab)); 1752 if (ret) { 1753 ath12k_warn(ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n"); 1754 return ret; 1755 } 1756 } else { 1757 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 1758 idr_init(&dp->rx_mon_status_refill_ring[i].bufs_idr); 1759 spin_lock_init(&dp->rx_mon_status_refill_ring[i].idr_lock); 1760 } 1761 1762 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 1763 srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring; 1764 ret = ath12k_dp_srng_setup(ab, srng, 1765 HAL_RXDMA_MONITOR_STATUS, 0, i, 1766 DP_RXDMA_MON_STATUS_RING_SIZE); 1767 if (ret) { 1768 ath12k_warn(ab, "failed to setup mon status ring %d\n", 1769 i); 1770 return ret; 1771 } 1772 } 1773 } 1774 1775 ret = ath12k_dp_rxdma_buf_setup(ab); 1776 if (ret) { 1777 ath12k_warn(ab, "failed to setup rxdma ring\n"); 1778 return ret; 1779 } 1780 1781 return 0; 1782 } 1783 1784 int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int mac_id) 1785 { 1786 struct ath12k *ar = ab->pdevs[mac_id].ar; 1787 struct ath12k_pdev_dp *dp = &ar->dp; 1788 u32 ring_id; 1789 int i; 1790 int ret; 1791 1792 if (!ab->hw_params->rxdma1_enable) 1793 goto out; 1794 1795 ret = ath12k_dp_rx_pdev_srng_alloc(ar); 1796 if (ret) { 1797 ath12k_warn(ab, "failed to setup rx srngs\n"); 1798 return ret; 1799 } 1800 1801 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 1802 ring_id = dp->rxdma_mon_dst_ring[i].ring_id; 1803 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 1804 mac_id + i, 1805 HAL_RXDMA_MONITOR_DST); 1806 if (ret) { 1807 ath12k_warn(ab, 1808 "failed to configure rxdma_mon_dst_ring %d %d\n", 1809 i, ret); 1810 return ret; 1811 } 1812 } 1813 out: 1814 return 0; 1815 } 1816 1817 static int ath12k_dp_rx_pdev_mon_status_attach(struct ath12k *ar) 1818 { 1819 struct ath12k_pdev_dp *dp = &ar->dp; 1820 struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&dp->mon_data; 1821 1822 skb_queue_head_init(&pmon->rx_status_q); 1823 1824 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 1825 1826 memset(&pmon->rx_mon_stats, 0, 1827 sizeof(pmon->rx_mon_stats)); 1828 return 0; 1829 } 1830 1831 int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar) 1832 { 1833 struct ath12k_pdev_dp *dp = &ar->dp; 1834 struct ath12k_mon_data *pmon = &dp->mon_data; 1835 int ret = 0; 1836 1837 ret = ath12k_dp_rx_pdev_mon_status_attach(ar); 1838 if (ret) { 1839 ath12k_warn(ar->ab, "pdev_mon_status_attach() failed"); 1840 return ret; 1841 } 1842 1843 pmon->mon_last_linkdesc_paddr = 0; 1844 pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1; 1845 spin_lock_init(&pmon->mon_lock); 1846 1847 if (!ar->ab->hw_params->rxdma1_enable) 1848 return 0; 1849 1850 INIT_LIST_HEAD(&pmon->dp_rx_mon_mpdu_list); 1851 pmon->mon_mpdu = NULL; 1852 1853 return 0; 1854 } 1855