1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 */ 6 7 #include <linux/ieee80211.h> 8 #include <linux/kernel.h> 9 #include <linux/skbuff.h> 10 #include <crypto/hash.h> 11 #include "core.h" 12 #include "debug.h" 13 #include "hal_desc.h" 14 #include "hw.h" 15 #include "dp_rx.h" 16 #include "hal_rx.h" 17 #include "dp_tx.h" 18 #include "peer.h" 19 #include "dp_mon.h" 20 #include "debugfs_htt_stats.h" 21 22 #define ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ) 23 24 static int ath12k_dp_rx_tid_delete_handler(struct ath12k_base *ab, 25 struct ath12k_dp_rx_tid_rxq *rx_tid); 26 27 static enum hal_encrypt_type ath12k_dp_rx_h_enctype(struct ath12k_base *ab, 28 struct hal_rx_desc *desc) 29 { 30 if (!ab->hal_rx_ops->rx_desc_encrypt_valid(desc)) 31 return HAL_ENCRYPT_TYPE_OPEN; 32 33 return ab->hal_rx_ops->rx_desc_get_encrypt_type(desc); 34 } 35 36 u8 ath12k_dp_rx_h_decap_type(struct ath12k_base *ab, 37 struct hal_rx_desc *desc) 38 { 39 return ab->hal_rx_ops->rx_desc_get_decap_type(desc); 40 } 41 42 static u8 ath12k_dp_rx_h_mesh_ctl_present(struct ath12k_base *ab, 43 struct hal_rx_desc *desc) 44 { 45 return ab->hal_rx_ops->rx_desc_get_mesh_ctl(desc); 46 } 47 48 static bool ath12k_dp_rx_h_seq_ctrl_valid(struct ath12k_base *ab, 49 struct hal_rx_desc *desc) 50 { 51 return ab->hal_rx_ops->rx_desc_get_mpdu_seq_ctl_vld(desc); 52 } 53 54 static bool ath12k_dp_rx_h_fc_valid(struct ath12k_base *ab, 55 struct hal_rx_desc *desc) 56 { 57 return ab->hal_rx_ops->rx_desc_get_mpdu_fc_valid(desc); 58 } 59 60 static bool ath12k_dp_rx_h_more_frags(struct ath12k_base *ab, 61 struct sk_buff *skb) 62 { 63 struct ieee80211_hdr *hdr; 64 65 hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz); 66 return ieee80211_has_morefrags(hdr->frame_control); 67 } 68 69 static u16 ath12k_dp_rx_h_frag_no(struct ath12k_base *ab, 70 struct sk_buff *skb) 71 { 72 struct ieee80211_hdr *hdr; 73 74 hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz); 75 return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; 76 } 77 78 static u16 ath12k_dp_rx_h_seq_no(struct ath12k_base *ab, 79 struct hal_rx_desc *desc) 80 { 81 return ab->hal_rx_ops->rx_desc_get_mpdu_start_seq_no(desc); 82 } 83 84 static bool ath12k_dp_rx_h_msdu_done(struct ath12k_base *ab, 85 struct hal_rx_desc *desc) 86 { 87 return ab->hal_rx_ops->dp_rx_h_msdu_done(desc); 88 } 89 90 static bool ath12k_dp_rx_h_l4_cksum_fail(struct ath12k_base *ab, 91 struct hal_rx_desc *desc) 92 { 93 return ab->hal_rx_ops->dp_rx_h_l4_cksum_fail(desc); 94 } 95 96 static bool ath12k_dp_rx_h_ip_cksum_fail(struct ath12k_base *ab, 97 struct hal_rx_desc *desc) 98 { 99 return ab->hal_rx_ops->dp_rx_h_ip_cksum_fail(desc); 100 } 101 102 static bool ath12k_dp_rx_h_is_decrypted(struct ath12k_base *ab, 103 struct hal_rx_desc *desc) 104 { 105 return ab->hal_rx_ops->dp_rx_h_is_decrypted(desc); 106 } 107 108 u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab, 109 struct hal_rx_desc *desc) 110 { 111 return ab->hal_rx_ops->dp_rx_h_mpdu_err(desc); 112 } 113 114 static u16 ath12k_dp_rx_h_msdu_len(struct ath12k_base *ab, 115 struct hal_rx_desc *desc) 116 { 117 return ab->hal_rx_ops->rx_desc_get_msdu_len(desc); 118 } 119 120 static u8 ath12k_dp_rx_h_sgi(struct ath12k_base *ab, 121 struct hal_rx_desc *desc) 122 { 123 return ab->hal_rx_ops->rx_desc_get_msdu_sgi(desc); 124 } 125 126 static u8 ath12k_dp_rx_h_rate_mcs(struct ath12k_base *ab, 127 struct hal_rx_desc *desc) 128 { 129 return ab->hal_rx_ops->rx_desc_get_msdu_rate_mcs(desc); 130 } 131 132 static u8 ath12k_dp_rx_h_rx_bw(struct ath12k_base *ab, 133 struct hal_rx_desc *desc) 134 { 135 return ab->hal_rx_ops->rx_desc_get_msdu_rx_bw(desc); 136 } 137 138 static u32 ath12k_dp_rx_h_freq(struct ath12k_base *ab, 139 struct hal_rx_desc *desc) 140 { 141 return ab->hal_rx_ops->rx_desc_get_msdu_freq(desc); 142 } 143 144 static u8 ath12k_dp_rx_h_pkt_type(struct ath12k_base *ab, 145 struct hal_rx_desc *desc) 146 { 147 return ab->hal_rx_ops->rx_desc_get_msdu_pkt_type(desc); 148 } 149 150 static u8 ath12k_dp_rx_h_nss(struct ath12k_base *ab, 151 struct hal_rx_desc *desc) 152 { 153 return hweight8(ab->hal_rx_ops->rx_desc_get_msdu_nss(desc)); 154 } 155 156 static u8 ath12k_dp_rx_h_tid(struct ath12k_base *ab, 157 struct hal_rx_desc *desc) 158 { 159 return ab->hal_rx_ops->rx_desc_get_mpdu_tid(desc); 160 } 161 162 static u16 ath12k_dp_rx_h_peer_id(struct ath12k_base *ab, 163 struct hal_rx_desc *desc) 164 { 165 return ab->hal_rx_ops->rx_desc_get_mpdu_peer_id(desc); 166 } 167 168 u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab, 169 struct hal_rx_desc *desc) 170 { 171 return ab->hal_rx_ops->rx_desc_get_l3_pad_bytes(desc); 172 } 173 174 static bool ath12k_dp_rx_h_first_msdu(struct ath12k_base *ab, 175 struct hal_rx_desc *desc) 176 { 177 return ab->hal_rx_ops->rx_desc_get_first_msdu(desc); 178 } 179 180 static bool ath12k_dp_rx_h_last_msdu(struct ath12k_base *ab, 181 struct hal_rx_desc *desc) 182 { 183 return ab->hal_rx_ops->rx_desc_get_last_msdu(desc); 184 } 185 186 static void ath12k_dp_rx_desc_end_tlv_copy(struct ath12k_base *ab, 187 struct hal_rx_desc *fdesc, 188 struct hal_rx_desc *ldesc) 189 { 190 ab->hal_rx_ops->rx_desc_copy_end_tlv(fdesc, ldesc); 191 } 192 193 static void ath12k_dp_rxdesc_set_msdu_len(struct ath12k_base *ab, 194 struct hal_rx_desc *desc, 195 u16 len) 196 { 197 ab->hal_rx_ops->rx_desc_set_msdu_len(desc, len); 198 } 199 200 u32 ath12k_dp_rxdesc_get_ppduid(struct ath12k_base *ab, 201 struct hal_rx_desc *rx_desc) 202 { 203 return ab->hal_rx_ops->rx_desc_get_mpdu_ppdu_id(rx_desc); 204 } 205 206 bool ath12k_dp_rxdesc_mpdu_valid(struct ath12k_base *ab, 207 struct hal_rx_desc *rx_desc) 208 { 209 u32 tlv_tag; 210 211 tlv_tag = ab->hal_rx_ops->rx_desc_get_mpdu_start_tag(rx_desc); 212 213 return tlv_tag == HAL_RX_MPDU_START; 214 } 215 216 static bool ath12k_dp_rx_h_is_da_mcbc(struct ath12k_base *ab, 217 struct hal_rx_desc *desc) 218 { 219 return (ath12k_dp_rx_h_first_msdu(ab, desc) && 220 ab->hal_rx_ops->rx_desc_is_da_mcbc(desc)); 221 } 222 223 static bool ath12k_dp_rxdesc_mac_addr2_valid(struct ath12k_base *ab, 224 struct hal_rx_desc *desc) 225 { 226 return ab->hal_rx_ops->rx_desc_mac_addr2_valid(desc); 227 } 228 229 static u8 *ath12k_dp_rxdesc_get_mpdu_start_addr2(struct ath12k_base *ab, 230 struct hal_rx_desc *desc) 231 { 232 return ab->hal_rx_ops->rx_desc_mpdu_start_addr2(desc); 233 } 234 235 static void ath12k_dp_rx_desc_get_dot11_hdr(struct ath12k_base *ab, 236 struct hal_rx_desc *desc, 237 struct ieee80211_hdr *hdr) 238 { 239 ab->hal_rx_ops->rx_desc_get_dot11_hdr(desc, hdr); 240 } 241 242 static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab, 243 struct hal_rx_desc *desc, 244 u8 *crypto_hdr, 245 enum hal_encrypt_type enctype) 246 { 247 ab->hal_rx_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype); 248 } 249 250 static inline u8 ath12k_dp_rx_get_msdu_src_link(struct ath12k_base *ab, 251 struct hal_rx_desc *desc) 252 { 253 return ab->hal_rx_ops->rx_desc_get_msdu_src_link_id(desc); 254 } 255 256 static void ath12k_dp_clean_up_skb_list(struct sk_buff_head *skb_list) 257 { 258 struct sk_buff *skb; 259 260 while ((skb = __skb_dequeue(skb_list))) 261 dev_kfree_skb_any(skb); 262 } 263 264 static size_t ath12k_dp_list_cut_nodes(struct list_head *list, 265 struct list_head *head, 266 size_t count) 267 { 268 struct list_head *cur; 269 struct ath12k_rx_desc_info *rx_desc; 270 size_t nodes = 0; 271 272 if (!count) { 273 INIT_LIST_HEAD(list); 274 goto out; 275 } 276 277 list_for_each(cur, head) { 278 if (!count) 279 break; 280 281 rx_desc = list_entry(cur, struct ath12k_rx_desc_info, list); 282 rx_desc->in_use = true; 283 284 count--; 285 nodes++; 286 } 287 288 list_cut_before(list, head, cur); 289 out: 290 return nodes; 291 } 292 293 static void ath12k_dp_rx_enqueue_free(struct ath12k_dp *dp, 294 struct list_head *used_list) 295 { 296 struct ath12k_rx_desc_info *rx_desc, *safe; 297 298 /* Reset the use flag */ 299 list_for_each_entry_safe(rx_desc, safe, used_list, list) 300 rx_desc->in_use = false; 301 302 spin_lock_bh(&dp->rx_desc_lock); 303 list_splice_tail(used_list, &dp->rx_desc_free_list); 304 spin_unlock_bh(&dp->rx_desc_lock); 305 } 306 307 /* Returns number of Rx buffers replenished */ 308 int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab, 309 struct dp_rxdma_ring *rx_ring, 310 struct list_head *used_list, 311 int req_entries) 312 { 313 struct ath12k_buffer_addr *desc; 314 struct hal_srng *srng; 315 struct sk_buff *skb; 316 int num_free; 317 int num_remain; 318 u32 cookie; 319 dma_addr_t paddr; 320 struct ath12k_dp *dp = &ab->dp; 321 struct ath12k_rx_desc_info *rx_desc; 322 enum hal_rx_buf_return_buf_manager mgr = ab->hw_params->hal_params->rx_buf_rbm; 323 324 req_entries = min(req_entries, rx_ring->bufs_max); 325 326 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 327 328 spin_lock_bh(&srng->lock); 329 330 ath12k_hal_srng_access_begin(ab, srng); 331 332 num_free = ath12k_hal_srng_src_num_free(ab, srng, true); 333 if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4)) 334 req_entries = num_free; 335 336 req_entries = min(num_free, req_entries); 337 num_remain = req_entries; 338 339 if (!num_remain) 340 goto out; 341 342 /* Get the descriptor from free list */ 343 if (list_empty(used_list)) { 344 spin_lock_bh(&dp->rx_desc_lock); 345 req_entries = ath12k_dp_list_cut_nodes(used_list, 346 &dp->rx_desc_free_list, 347 num_remain); 348 spin_unlock_bh(&dp->rx_desc_lock); 349 num_remain = req_entries; 350 } 351 352 while (num_remain > 0) { 353 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + 354 DP_RX_BUFFER_ALIGN_SIZE); 355 if (!skb) 356 break; 357 358 if (!IS_ALIGNED((unsigned long)skb->data, 359 DP_RX_BUFFER_ALIGN_SIZE)) { 360 skb_pull(skb, 361 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - 362 skb->data); 363 } 364 365 paddr = dma_map_single(ab->dev, skb->data, 366 skb->len + skb_tailroom(skb), 367 DMA_FROM_DEVICE); 368 if (dma_mapping_error(ab->dev, paddr)) 369 goto fail_free_skb; 370 371 rx_desc = list_first_entry_or_null(used_list, 372 struct ath12k_rx_desc_info, 373 list); 374 if (!rx_desc) 375 goto fail_dma_unmap; 376 377 rx_desc->skb = skb; 378 cookie = rx_desc->cookie; 379 380 desc = ath12k_hal_srng_src_get_next_entry(ab, srng); 381 if (!desc) 382 goto fail_dma_unmap; 383 384 list_del(&rx_desc->list); 385 ATH12K_SKB_RXCB(skb)->paddr = paddr; 386 387 num_remain--; 388 389 ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); 390 } 391 392 goto out; 393 394 fail_dma_unmap: 395 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 396 DMA_FROM_DEVICE); 397 fail_free_skb: 398 dev_kfree_skb_any(skb); 399 out: 400 ath12k_hal_srng_access_end(ab, srng); 401 402 if (!list_empty(used_list)) 403 ath12k_dp_rx_enqueue_free(dp, used_list); 404 405 spin_unlock_bh(&srng->lock); 406 407 return req_entries - num_remain; 408 } 409 410 static int ath12k_dp_rxdma_mon_buf_ring_free(struct ath12k_base *ab, 411 struct dp_rxdma_mon_ring *rx_ring) 412 { 413 struct sk_buff *skb; 414 int buf_id; 415 416 spin_lock_bh(&rx_ring->idr_lock); 417 idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { 418 idr_remove(&rx_ring->bufs_idr, buf_id); 419 /* TODO: Understand where internal driver does this dma_unmap 420 * of rxdma_buffer. 421 */ 422 dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr, 423 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); 424 dev_kfree_skb_any(skb); 425 } 426 427 idr_destroy(&rx_ring->bufs_idr); 428 spin_unlock_bh(&rx_ring->idr_lock); 429 430 return 0; 431 } 432 433 static int ath12k_dp_rxdma_buf_free(struct ath12k_base *ab) 434 { 435 struct ath12k_dp *dp = &ab->dp; 436 int i; 437 438 ath12k_dp_rxdma_mon_buf_ring_free(ab, &dp->rxdma_mon_buf_ring); 439 440 if (ab->hw_params->rxdma1_enable) 441 return 0; 442 443 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) 444 ath12k_dp_rxdma_mon_buf_ring_free(ab, 445 &dp->rx_mon_status_refill_ring[i]); 446 447 return 0; 448 } 449 450 static int ath12k_dp_rxdma_mon_ring_buf_setup(struct ath12k_base *ab, 451 struct dp_rxdma_mon_ring *rx_ring, 452 u32 ringtype) 453 { 454 int num_entries; 455 456 num_entries = rx_ring->refill_buf_ring.size / 457 ath12k_hal_srng_get_entrysize(ab, ringtype); 458 459 rx_ring->bufs_max = num_entries; 460 461 if (ringtype == HAL_RXDMA_MONITOR_STATUS) 462 ath12k_dp_mon_status_bufs_replenish(ab, rx_ring, 463 num_entries); 464 else 465 ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries); 466 467 return 0; 468 } 469 470 static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab, 471 struct dp_rxdma_ring *rx_ring) 472 { 473 LIST_HEAD(list); 474 475 rx_ring->bufs_max = rx_ring->refill_buf_ring.size / 476 ath12k_hal_srng_get_entrysize(ab, HAL_RXDMA_BUF); 477 478 ath12k_dp_rx_bufs_replenish(ab, rx_ring, &list, 0); 479 480 return 0; 481 } 482 483 static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab) 484 { 485 struct ath12k_dp *dp = &ab->dp; 486 struct dp_rxdma_mon_ring *mon_ring; 487 int ret, i; 488 489 ret = ath12k_dp_rxdma_ring_buf_setup(ab, &dp->rx_refill_buf_ring); 490 if (ret) { 491 ath12k_warn(ab, 492 "failed to setup HAL_RXDMA_BUF\n"); 493 return ret; 494 } 495 496 if (ab->hw_params->rxdma1_enable) { 497 ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab, 498 &dp->rxdma_mon_buf_ring, 499 HAL_RXDMA_MONITOR_BUF); 500 if (ret) 501 ath12k_warn(ab, 502 "failed to setup HAL_RXDMA_MONITOR_BUF\n"); 503 return ret; 504 } 505 506 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 507 mon_ring = &dp->rx_mon_status_refill_ring[i]; 508 ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab, mon_ring, 509 HAL_RXDMA_MONITOR_STATUS); 510 if (ret) { 511 ath12k_warn(ab, 512 "failed to setup HAL_RXDMA_MONITOR_STATUS\n"); 513 return ret; 514 } 515 } 516 517 return 0; 518 } 519 520 static void ath12k_dp_rx_pdev_srng_free(struct ath12k *ar) 521 { 522 struct ath12k_pdev_dp *dp = &ar->dp; 523 struct ath12k_base *ab = ar->ab; 524 int i; 525 526 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) 527 ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_dst_ring[i]); 528 } 529 530 void ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base *ab) 531 { 532 struct ath12k_dp *dp = &ab->dp; 533 int i; 534 535 for (i = 0; i < DP_REO_DST_RING_MAX; i++) 536 ath12k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]); 537 } 538 539 int ath12k_dp_rx_pdev_reo_setup(struct ath12k_base *ab) 540 { 541 struct ath12k_dp *dp = &ab->dp; 542 int ret; 543 int i; 544 545 for (i = 0; i < DP_REO_DST_RING_MAX; i++) { 546 ret = ath12k_dp_srng_setup(ab, &dp->reo_dst_ring[i], 547 HAL_REO_DST, i, 0, 548 DP_REO_DST_RING_SIZE); 549 if (ret) { 550 ath12k_warn(ab, "failed to setup reo_dst_ring\n"); 551 goto err_reo_cleanup; 552 } 553 } 554 555 return 0; 556 557 err_reo_cleanup: 558 ath12k_dp_rx_pdev_reo_cleanup(ab); 559 560 return ret; 561 } 562 563 static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar) 564 { 565 struct ath12k_pdev_dp *dp = &ar->dp; 566 struct ath12k_base *ab = ar->ab; 567 int i; 568 int ret; 569 u32 mac_id = dp->mac_id; 570 571 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 572 ret = ath12k_dp_srng_setup(ar->ab, 573 &dp->rxdma_mon_dst_ring[i], 574 HAL_RXDMA_MONITOR_DST, 575 0, mac_id + i, 576 DP_RXDMA_MONITOR_DST_RING_SIZE(ab)); 577 if (ret) { 578 ath12k_warn(ar->ab, 579 "failed to setup HAL_RXDMA_MONITOR_DST\n"); 580 return ret; 581 } 582 } 583 584 return 0; 585 } 586 587 static void ath12k_dp_init_rx_tid_rxq(struct ath12k_dp_rx_tid_rxq *rx_tid_rxq, 588 struct ath12k_dp_rx_tid *rx_tid) 589 { 590 rx_tid_rxq->tid = rx_tid->tid; 591 rx_tid_rxq->active = rx_tid->active; 592 rx_tid_rxq->qbuf = rx_tid->qbuf; 593 } 594 595 static void ath12k_dp_rx_tid_cleanup(struct ath12k_base *ab, 596 struct ath12k_reoq_buf *tid_qbuf) 597 { 598 if (tid_qbuf->vaddr) { 599 dma_unmap_single(ab->dev, tid_qbuf->paddr_aligned, 600 tid_qbuf->size, DMA_BIDIRECTIONAL); 601 kfree(tid_qbuf->vaddr); 602 tid_qbuf->vaddr = NULL; 603 } 604 } 605 606 void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab) 607 { 608 struct ath12k_dp *dp = &ab->dp; 609 struct ath12k_dp_rx_reo_cmd *cmd, *tmp; 610 struct ath12k_dp_rx_reo_cache_flush_elem *cmd_cache, *tmp_cache; 611 struct dp_reo_update_rx_queue_elem *cmd_queue, *tmp_queue; 612 613 spin_lock_bh(&dp->reo_rxq_flush_lock); 614 list_for_each_entry_safe(cmd_queue, tmp_queue, &dp->reo_cmd_update_rx_queue_list, 615 list) { 616 list_del(&cmd_queue->list); 617 ath12k_dp_rx_tid_cleanup(ab, &cmd_queue->rx_tid.qbuf); 618 kfree(cmd_queue); 619 } 620 list_for_each_entry_safe(cmd_cache, tmp_cache, 621 &dp->reo_cmd_cache_flush_list, list) { 622 list_del(&cmd_cache->list); 623 dp->reo_cmd_cache_flush_count--; 624 ath12k_dp_rx_tid_cleanup(ab, &cmd_cache->data.qbuf); 625 kfree(cmd_cache); 626 } 627 spin_unlock_bh(&dp->reo_rxq_flush_lock); 628 629 spin_lock_bh(&dp->reo_cmd_lock); 630 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 631 list_del(&cmd->list); 632 ath12k_dp_rx_tid_cleanup(ab, &cmd->data.qbuf); 633 kfree(cmd); 634 } 635 spin_unlock_bh(&dp->reo_cmd_lock); 636 } 637 638 static void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx, 639 enum hal_reo_cmd_status status) 640 { 641 struct ath12k_dp_rx_tid_rxq *rx_tid = ctx; 642 643 if (status != HAL_REO_CMD_SUCCESS) 644 ath12k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n", 645 rx_tid->tid, status); 646 647 ath12k_dp_rx_tid_cleanup(dp->ab, &rx_tid->qbuf); 648 } 649 650 static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab, 651 struct ath12k_dp_rx_tid_rxq *rx_tid, 652 enum hal_reo_cmd_type type, 653 struct ath12k_hal_reo_cmd *cmd, 654 void (*cb)(struct ath12k_dp *dp, void *ctx, 655 enum hal_reo_cmd_status status)) 656 { 657 struct ath12k_dp *dp = &ab->dp; 658 struct ath12k_dp_rx_reo_cmd *dp_cmd; 659 struct hal_srng *cmd_ring; 660 int cmd_num; 661 662 cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id]; 663 cmd_num = ath12k_hal_reo_cmd_send(ab, cmd_ring, type, cmd); 664 665 /* cmd_num should start from 1, during failure return the error code */ 666 if (cmd_num < 0) 667 return cmd_num; 668 669 /* reo cmd ring descriptors has cmd_num starting from 1 */ 670 if (cmd_num == 0) 671 return -EINVAL; 672 673 if (!cb) 674 return 0; 675 676 /* Can this be optimized so that we keep the pending command list only 677 * for tid delete command to free up the resource on the command status 678 * indication? 679 */ 680 dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC); 681 682 if (!dp_cmd) 683 return -ENOMEM; 684 685 memcpy(&dp_cmd->data, rx_tid, sizeof(*rx_tid)); 686 dp_cmd->cmd_num = cmd_num; 687 dp_cmd->handler = cb; 688 689 spin_lock_bh(&dp->reo_cmd_lock); 690 list_add_tail(&dp_cmd->list, &dp->reo_cmd_list); 691 spin_unlock_bh(&dp->reo_cmd_lock); 692 693 return 0; 694 } 695 696 static int ath12k_dp_reo_cache_flush(struct ath12k_base *ab, 697 struct ath12k_dp_rx_tid_rxq *rx_tid) 698 { 699 struct ath12k_hal_reo_cmd cmd = {}; 700 int ret; 701 702 cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned); 703 cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); 704 /* HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS - all pending MPDUs 705 *in the bitmap will be forwarded/flushed to REO output rings 706 */ 707 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS | 708 HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS; 709 710 /* For all QoS TIDs (except NON_QOS), the driver allocates a maximum 711 * window size of 1024. In such cases, the driver can issue a single 712 * 1KB descriptor flush command instead of sending multiple 128-byte 713 * flush commands for each QoS TID, improving efficiency. 714 */ 715 716 if (rx_tid->tid != HAL_DESC_REO_NON_QOS_TID) 717 cmd.flag |= HAL_REO_CMD_FLG_FLUSH_QUEUE_1K_DESC; 718 719 ret = ath12k_dp_reo_cmd_send(ab, rx_tid, 720 HAL_REO_CMD_FLUSH_CACHE, 721 &cmd, ath12k_dp_reo_cmd_free); 722 return ret; 723 } 724 725 static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u16 tid) 726 { 727 struct ath12k_reo_queue_ref *qref; 728 struct ath12k_dp *dp = &ab->dp; 729 bool ml_peer = false; 730 731 if (!ab->hw_params->reoq_lut_support) 732 return; 733 734 if (peer_id & ATH12K_PEER_ML_ID_VALID) { 735 peer_id &= ~ATH12K_PEER_ML_ID_VALID; 736 ml_peer = true; 737 } 738 739 if (ml_peer) 740 qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr + 741 (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); 742 else 743 qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr + 744 (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); 745 746 qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR); 747 qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) | 748 u32_encode_bits(tid, DP_REO_QREF_NUM); 749 } 750 751 static void ath12k_dp_rx_process_reo_cmd_update_rx_queue_list(struct ath12k_dp *dp) 752 { 753 struct ath12k_base *ab = dp->ab; 754 struct dp_reo_update_rx_queue_elem *elem, *tmp; 755 756 spin_lock_bh(&dp->reo_rxq_flush_lock); 757 758 list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_update_rx_queue_list, list) { 759 if (elem->rx_tid.active) 760 continue; 761 762 if (ath12k_dp_rx_tid_delete_handler(ab, &elem->rx_tid)) 763 break; 764 765 ath12k_peer_rx_tid_qref_reset(ab, 766 elem->is_ml_peer ? elem->ml_peer_id : 767 elem->peer_id, 768 elem->rx_tid.tid); 769 770 if (ab->hw_params->reoq_lut_support) 771 ath12k_hal_reo_shared_qaddr_cache_clear(ab); 772 773 list_del(&elem->list); 774 kfree(elem); 775 } 776 777 spin_unlock_bh(&dp->reo_rxq_flush_lock); 778 } 779 780 static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx, 781 enum hal_reo_cmd_status status) 782 { 783 struct ath12k_base *ab = dp->ab; 784 struct ath12k_dp_rx_tid_rxq *rx_tid = ctx; 785 struct ath12k_dp_rx_reo_cache_flush_elem *elem, *tmp; 786 787 if (status == HAL_REO_CMD_DRAIN) { 788 goto free_desc; 789 } else if (status != HAL_REO_CMD_SUCCESS) { 790 /* Shouldn't happen! Cleanup in case of other failure? */ 791 ath12k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n", 792 rx_tid->tid, status); 793 return; 794 } 795 796 /* Retry the HAL_REO_CMD_UPDATE_RX_QUEUE command for entries 797 * in the pending queue list marked TID as inactive 798 */ 799 spin_lock_bh(&dp->ab->base_lock); 800 ath12k_dp_rx_process_reo_cmd_update_rx_queue_list(dp); 801 spin_unlock_bh(&dp->ab->base_lock); 802 803 elem = kzalloc(sizeof(*elem), GFP_ATOMIC); 804 if (!elem) 805 goto free_desc; 806 807 elem->ts = jiffies; 808 memcpy(&elem->data, rx_tid, sizeof(*rx_tid)); 809 810 spin_lock_bh(&dp->reo_rxq_flush_lock); 811 list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list); 812 dp->reo_cmd_cache_flush_count++; 813 814 /* Flush and invalidate aged REO desc from HW cache */ 815 list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list, 816 list) { 817 if (dp->reo_cmd_cache_flush_count > ATH12K_DP_RX_REO_DESC_FREE_THRES || 818 time_after(jiffies, elem->ts + 819 msecs_to_jiffies(ATH12K_DP_RX_REO_DESC_FREE_TIMEOUT_MS))) { 820 /* The reo_cmd_cache_flush_list is used in only two contexts, 821 * one is in this function called from napi and the 822 * other in ath12k_dp_free during core destroy. 823 * If cache command sent is success, delete the element in 824 * the cache list. ath12k_dp_rx_reo_cmd_list_cleanup 825 * will be called during core destroy. 826 */ 827 828 if (ath12k_dp_reo_cache_flush(ab, &elem->data)) 829 break; 830 831 list_del(&elem->list); 832 dp->reo_cmd_cache_flush_count--; 833 kfree(elem); 834 } 835 } 836 spin_unlock_bh(&dp->reo_rxq_flush_lock); 837 838 return; 839 free_desc: 840 ath12k_dp_rx_tid_cleanup(ab, &rx_tid->qbuf); 841 } 842 843 static int ath12k_dp_rx_tid_delete_handler(struct ath12k_base *ab, 844 struct ath12k_dp_rx_tid_rxq *rx_tid) 845 { 846 struct ath12k_hal_reo_cmd cmd = {}; 847 848 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 849 cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned); 850 cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); 851 cmd.upd0 |= HAL_REO_CMD_UPD0_VLD; 852 /* Observed flush cache failure, to avoid that set vld bit during delete */ 853 cmd.upd1 |= HAL_REO_CMD_UPD1_VLD; 854 855 return ath12k_dp_reo_cmd_send(ab, rx_tid, 856 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 857 ath12k_dp_rx_tid_del_func); 858 } 859 860 static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u16 tid, 861 dma_addr_t paddr) 862 { 863 struct ath12k_reo_queue_ref *qref; 864 struct ath12k_dp *dp = &ab->dp; 865 bool ml_peer = false; 866 867 if (!ab->hw_params->reoq_lut_support) 868 return; 869 870 if (peer_id & ATH12K_PEER_ML_ID_VALID) { 871 peer_id &= ~ATH12K_PEER_ML_ID_VALID; 872 ml_peer = true; 873 } 874 875 if (ml_peer) 876 qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr + 877 (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); 878 else 879 qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr + 880 (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); 881 882 qref->info0 = u32_encode_bits(lower_32_bits(paddr), 883 BUFFER_ADDR_INFO0_ADDR); 884 qref->info1 = u32_encode_bits(upper_32_bits(paddr), 885 BUFFER_ADDR_INFO1_ADDR) | 886 u32_encode_bits(tid, DP_REO_QREF_NUM); 887 ath12k_hal_reo_shared_qaddr_cache_clear(ab); 888 } 889 890 static void ath12k_dp_mark_tid_as_inactive(struct ath12k_dp *dp, int peer_id, u8 tid) 891 { 892 struct dp_reo_update_rx_queue_elem *elem; 893 struct ath12k_dp_rx_tid_rxq *rx_tid; 894 895 spin_lock_bh(&dp->reo_rxq_flush_lock); 896 list_for_each_entry(elem, &dp->reo_cmd_update_rx_queue_list, list) { 897 if (elem->peer_id == peer_id) { 898 rx_tid = &elem->rx_tid; 899 if (rx_tid->tid == tid) { 900 rx_tid->active = false; 901 break; 902 } 903 } 904 } 905 spin_unlock_bh(&dp->reo_rxq_flush_lock); 906 } 907 908 void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar, 909 struct ath12k_peer *peer, u8 tid) 910 { 911 struct ath12k_dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 912 struct ath12k_base *ab = ar->ab; 913 struct ath12k_dp *dp = &ab->dp; 914 915 if (!rx_tid->active) 916 return; 917 918 rx_tid->active = false; 919 920 ath12k_dp_mark_tid_as_inactive(dp, peer->peer_id, tid); 921 ath12k_dp_rx_process_reo_cmd_update_rx_queue_list(dp); 922 } 923 924 int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab, 925 struct ath12k_buffer_addr *buf_addr_info, 926 enum hal_wbm_rel_bm_act action) 927 { 928 struct hal_wbm_release_ring *desc; 929 struct ath12k_dp *dp = &ab->dp; 930 struct hal_srng *srng; 931 int ret = 0; 932 933 srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id]; 934 935 spin_lock_bh(&srng->lock); 936 937 ath12k_hal_srng_access_begin(ab, srng); 938 939 desc = ath12k_hal_srng_src_get_next_entry(ab, srng); 940 if (!desc) { 941 ret = -ENOBUFS; 942 goto exit; 943 } 944 945 ath12k_hal_rx_msdu_link_desc_set(ab, desc, buf_addr_info, action); 946 947 exit: 948 ath12k_hal_srng_access_end(ab, srng); 949 950 spin_unlock_bh(&srng->lock); 951 952 return ret; 953 } 954 955 static void ath12k_dp_rx_frags_cleanup(struct ath12k_dp_rx_tid *rx_tid, 956 bool rel_link_desc) 957 { 958 struct ath12k_buffer_addr *buf_addr_info; 959 struct ath12k_base *ab = rx_tid->ab; 960 961 lockdep_assert_held(&ab->base_lock); 962 963 if (rx_tid->dst_ring_desc) { 964 if (rel_link_desc) { 965 buf_addr_info = &rx_tid->dst_ring_desc->buf_addr_info; 966 ath12k_dp_rx_link_desc_return(ab, buf_addr_info, 967 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 968 } 969 kfree(rx_tid->dst_ring_desc); 970 rx_tid->dst_ring_desc = NULL; 971 } 972 973 rx_tid->cur_sn = 0; 974 rx_tid->last_frag_no = 0; 975 rx_tid->rx_frag_bitmap = 0; 976 __skb_queue_purge(&rx_tid->rx_frags); 977 } 978 979 void ath12k_dp_rx_peer_tid_cleanup(struct ath12k *ar, struct ath12k_peer *peer) 980 { 981 struct ath12k_dp_rx_tid *rx_tid; 982 int i; 983 984 lockdep_assert_held(&ar->ab->base_lock); 985 986 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 987 rx_tid = &peer->rx_tid[i]; 988 989 ath12k_dp_rx_peer_tid_delete(ar, peer, i); 990 ath12k_dp_rx_frags_cleanup(rx_tid, true); 991 992 spin_unlock_bh(&ar->ab->base_lock); 993 timer_delete_sync(&rx_tid->frag_timer); 994 spin_lock_bh(&ar->ab->base_lock); 995 } 996 } 997 998 static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar, 999 struct ath12k_peer *peer, 1000 struct ath12k_dp_rx_tid *rx_tid, 1001 u32 ba_win_sz, u16 ssn, 1002 bool update_ssn) 1003 { 1004 struct ath12k_hal_reo_cmd cmd = {}; 1005 int ret; 1006 struct ath12k_dp_rx_tid_rxq rx_tid_rxq; 1007 1008 ath12k_dp_init_rx_tid_rxq(&rx_tid_rxq, rx_tid); 1009 1010 cmd.addr_lo = lower_32_bits(rx_tid_rxq.qbuf.paddr_aligned); 1011 cmd.addr_hi = upper_32_bits(rx_tid_rxq.qbuf.paddr_aligned); 1012 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 1013 cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE; 1014 cmd.ba_window_size = ba_win_sz; 1015 1016 if (update_ssn) { 1017 cmd.upd0 |= HAL_REO_CMD_UPD0_SSN; 1018 cmd.upd2 = u32_encode_bits(ssn, HAL_REO_CMD_UPD2_SSN); 1019 } 1020 1021 ret = ath12k_dp_reo_cmd_send(ar->ab, &rx_tid_rxq, 1022 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 1023 NULL); 1024 if (ret) { 1025 ath12k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n", 1026 rx_tid_rxq.tid, ret); 1027 return ret; 1028 } 1029 1030 rx_tid->ba_win_sz = ba_win_sz; 1031 1032 return 0; 1033 } 1034 1035 static int ath12k_dp_rx_assign_reoq(struct ath12k_base *ab, 1036 struct ath12k_sta *ahsta, 1037 struct ath12k_dp_rx_tid *rx_tid, 1038 u16 ssn, enum hal_pn_type pn_type) 1039 { 1040 u32 ba_win_sz = rx_tid->ba_win_sz; 1041 struct ath12k_reoq_buf *buf; 1042 void *vaddr, *vaddr_aligned; 1043 dma_addr_t paddr_aligned; 1044 u8 tid = rx_tid->tid; 1045 u32 hw_desc_sz; 1046 int ret; 1047 1048 buf = &ahsta->reoq_bufs[tid]; 1049 if (!buf->vaddr) { 1050 /* TODO: Optimize the memory allocation for qos tid based on 1051 * the actual BA window size in REO tid update path. 1052 */ 1053 if (tid == HAL_DESC_REO_NON_QOS_TID) 1054 hw_desc_sz = ath12k_hal_reo_qdesc_size(ba_win_sz, tid); 1055 else 1056 hw_desc_sz = ath12k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid); 1057 1058 vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC); 1059 if (!vaddr) 1060 return -ENOMEM; 1061 1062 vaddr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN); 1063 1064 ath12k_hal_reo_qdesc_setup(vaddr_aligned, tid, ba_win_sz, 1065 ssn, pn_type); 1066 1067 paddr_aligned = dma_map_single(ab->dev, vaddr_aligned, hw_desc_sz, 1068 DMA_BIDIRECTIONAL); 1069 ret = dma_mapping_error(ab->dev, paddr_aligned); 1070 if (ret) { 1071 kfree(vaddr); 1072 return ret; 1073 } 1074 1075 buf->vaddr = vaddr; 1076 buf->paddr_aligned = paddr_aligned; 1077 buf->size = hw_desc_sz; 1078 } 1079 1080 rx_tid->qbuf = *buf; 1081 rx_tid->active = true; 1082 1083 return 0; 1084 } 1085 1086 static int ath12k_dp_prepare_reo_update_elem(struct ath12k_dp *dp, 1087 struct ath12k_peer *peer, 1088 struct ath12k_dp_rx_tid *rx_tid) 1089 { 1090 struct dp_reo_update_rx_queue_elem *elem; 1091 1092 elem = kzalloc(sizeof(*elem), GFP_ATOMIC); 1093 if (!elem) 1094 return -ENOMEM; 1095 1096 elem->peer_id = peer->peer_id; 1097 elem->is_ml_peer = peer->mlo; 1098 elem->ml_peer_id = peer->ml_id; 1099 1100 ath12k_dp_init_rx_tid_rxq(&elem->rx_tid, rx_tid); 1101 1102 spin_lock_bh(&dp->reo_rxq_flush_lock); 1103 list_add_tail(&elem->list, &dp->reo_cmd_update_rx_queue_list); 1104 spin_unlock_bh(&dp->reo_rxq_flush_lock); 1105 1106 return 0; 1107 } 1108 1109 int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id, 1110 u8 tid, u32 ba_win_sz, u16 ssn, 1111 enum hal_pn_type pn_type) 1112 { 1113 struct ath12k_base *ab = ar->ab; 1114 struct ath12k_dp *dp = &ab->dp; 1115 struct ath12k_peer *peer; 1116 struct ath12k_sta *ahsta; 1117 struct ath12k_dp_rx_tid *rx_tid; 1118 dma_addr_t paddr_aligned; 1119 int ret; 1120 1121 spin_lock_bh(&ab->base_lock); 1122 1123 peer = ath12k_peer_find(ab, vdev_id, peer_mac); 1124 if (!peer) { 1125 spin_unlock_bh(&ab->base_lock); 1126 ath12k_warn(ab, "failed to find the peer to set up rx tid\n"); 1127 return -ENOENT; 1128 } 1129 1130 if (ab->hw_params->dp_primary_link_only && 1131 !peer->primary_link) { 1132 spin_unlock_bh(&ab->base_lock); 1133 return 0; 1134 } 1135 1136 if (ab->hw_params->reoq_lut_support && 1137 (!dp->reoq_lut.vaddr || !dp->ml_reoq_lut.vaddr)) { 1138 spin_unlock_bh(&ab->base_lock); 1139 ath12k_warn(ab, "reo qref table is not setup\n"); 1140 return -EINVAL; 1141 } 1142 1143 if (peer->peer_id > DP_MAX_PEER_ID || tid > IEEE80211_NUM_TIDS) { 1144 ath12k_warn(ab, "peer id of peer %d or tid %d doesn't allow reoq setup\n", 1145 peer->peer_id, tid); 1146 spin_unlock_bh(&ab->base_lock); 1147 return -EINVAL; 1148 } 1149 1150 rx_tid = &peer->rx_tid[tid]; 1151 /* Update the tid queue if it is already setup */ 1152 if (rx_tid->active) { 1153 ret = ath12k_peer_rx_tid_reo_update(ar, peer, rx_tid, 1154 ba_win_sz, ssn, true); 1155 spin_unlock_bh(&ab->base_lock); 1156 if (ret) { 1157 ath12k_warn(ab, "failed to update reo for rx tid %d\n", tid); 1158 return ret; 1159 } 1160 1161 if (!ab->hw_params->reoq_lut_support) { 1162 paddr_aligned = rx_tid->qbuf.paddr_aligned; 1163 ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, 1164 peer_mac, 1165 paddr_aligned, tid, 1166 1, ba_win_sz); 1167 if (ret) { 1168 ath12k_warn(ab, "failed to setup peer rx reorder queuefor tid %d: %d\n", 1169 tid, ret); 1170 return ret; 1171 } 1172 } 1173 1174 return 0; 1175 } 1176 1177 rx_tid->tid = tid; 1178 1179 rx_tid->ba_win_sz = ba_win_sz; 1180 1181 ahsta = ath12k_sta_to_ahsta(peer->sta); 1182 ret = ath12k_dp_rx_assign_reoq(ab, ahsta, rx_tid, ssn, pn_type); 1183 if (ret) { 1184 spin_unlock_bh(&ab->base_lock); 1185 ath12k_warn(ab, "failed to assign reoq buf for rx tid %u\n", tid); 1186 return ret; 1187 } 1188 1189 /* Pre-allocate the update_rxq_list for the corresponding tid 1190 * This will be used during the tid delete. The reason we are not 1191 * allocating during tid delete is that, if any alloc fail in update_rxq_list 1192 * we may not be able to delete the tid vaddr/paddr and may lead to leak 1193 */ 1194 ret = ath12k_dp_prepare_reo_update_elem(dp, peer, rx_tid); 1195 if (ret) { 1196 ath12k_warn(ab, "failed to alloc update_rxq_list for rx tid %u\n", tid); 1197 ath12k_dp_rx_tid_cleanup(ab, &rx_tid->qbuf); 1198 spin_unlock_bh(&ab->base_lock); 1199 return ret; 1200 } 1201 1202 paddr_aligned = rx_tid->qbuf.paddr_aligned; 1203 if (ab->hw_params->reoq_lut_support) { 1204 /* Update the REO queue LUT at the corresponding peer id 1205 * and tid with qaddr. 1206 */ 1207 if (peer->mlo) 1208 ath12k_peer_rx_tid_qref_setup(ab, peer->ml_id, tid, 1209 paddr_aligned); 1210 else 1211 ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid, 1212 paddr_aligned); 1213 1214 spin_unlock_bh(&ab->base_lock); 1215 } else { 1216 spin_unlock_bh(&ab->base_lock); 1217 ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, 1218 paddr_aligned, tid, 1, 1219 ba_win_sz); 1220 } 1221 1222 return ret; 1223 } 1224 1225 int ath12k_dp_rx_ampdu_start(struct ath12k *ar, 1226 struct ieee80211_ampdu_params *params, 1227 u8 link_id) 1228 { 1229 struct ath12k_base *ab = ar->ab; 1230 struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta); 1231 struct ath12k_link_sta *arsta; 1232 int vdev_id; 1233 int ret; 1234 1235 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 1236 1237 arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy, 1238 ahsta->link[link_id]); 1239 if (!arsta) 1240 return -ENOLINK; 1241 1242 vdev_id = arsta->arvif->vdev_id; 1243 1244 ret = ath12k_dp_rx_peer_tid_setup(ar, arsta->addr, vdev_id, 1245 params->tid, params->buf_size, 1246 params->ssn, arsta->ahsta->pn_type); 1247 if (ret) 1248 ath12k_warn(ab, "failed to setup rx tid %d\n", ret); 1249 1250 return ret; 1251 } 1252 1253 int ath12k_dp_rx_ampdu_stop(struct ath12k *ar, 1254 struct ieee80211_ampdu_params *params, 1255 u8 link_id) 1256 { 1257 struct ath12k_base *ab = ar->ab; 1258 struct ath12k_peer *peer; 1259 struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta); 1260 struct ath12k_link_sta *arsta; 1261 int vdev_id; 1262 bool active; 1263 int ret; 1264 1265 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 1266 1267 arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy, 1268 ahsta->link[link_id]); 1269 if (!arsta) 1270 return -ENOLINK; 1271 1272 vdev_id = arsta->arvif->vdev_id; 1273 1274 spin_lock_bh(&ab->base_lock); 1275 1276 peer = ath12k_peer_find(ab, vdev_id, arsta->addr); 1277 if (!peer) { 1278 spin_unlock_bh(&ab->base_lock); 1279 ath12k_warn(ab, "failed to find the peer to stop rx aggregation\n"); 1280 return -ENOENT; 1281 } 1282 1283 active = peer->rx_tid[params->tid].active; 1284 1285 if (!active) { 1286 spin_unlock_bh(&ab->base_lock); 1287 return 0; 1288 } 1289 1290 ret = ath12k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false); 1291 spin_unlock_bh(&ab->base_lock); 1292 if (ret) { 1293 ath12k_warn(ab, "failed to update reo for rx tid %d: %d\n", 1294 params->tid, ret); 1295 return ret; 1296 } 1297 1298 return ret; 1299 } 1300 1301 int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif, 1302 const u8 *peer_addr, 1303 enum set_key_cmd key_cmd, 1304 struct ieee80211_key_conf *key) 1305 { 1306 struct ath12k *ar = arvif->ar; 1307 struct ath12k_base *ab = ar->ab; 1308 struct ath12k_hal_reo_cmd cmd = {}; 1309 struct ath12k_peer *peer; 1310 struct ath12k_dp_rx_tid *rx_tid; 1311 struct ath12k_dp_rx_tid_rxq rx_tid_rxq; 1312 u8 tid; 1313 int ret = 0; 1314 1315 /* NOTE: Enable PN/TSC replay check offload only for unicast frames. 1316 * We use mac80211 PN/TSC replay check functionality for bcast/mcast 1317 * for now. 1318 */ 1319 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) 1320 return 0; 1321 1322 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 1323 cmd.upd0 = HAL_REO_CMD_UPD0_PN | 1324 HAL_REO_CMD_UPD0_PN_SIZE | 1325 HAL_REO_CMD_UPD0_PN_VALID | 1326 HAL_REO_CMD_UPD0_PN_CHECK | 1327 HAL_REO_CMD_UPD0_SVLD; 1328 1329 switch (key->cipher) { 1330 case WLAN_CIPHER_SUITE_TKIP: 1331 case WLAN_CIPHER_SUITE_CCMP: 1332 case WLAN_CIPHER_SUITE_CCMP_256: 1333 case WLAN_CIPHER_SUITE_GCMP: 1334 case WLAN_CIPHER_SUITE_GCMP_256: 1335 if (key_cmd == SET_KEY) { 1336 cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK; 1337 cmd.pn_size = 48; 1338 } 1339 break; 1340 default: 1341 break; 1342 } 1343 1344 spin_lock_bh(&ab->base_lock); 1345 1346 peer = ath12k_peer_find(ab, arvif->vdev_id, peer_addr); 1347 if (!peer) { 1348 spin_unlock_bh(&ab->base_lock); 1349 ath12k_warn(ab, "failed to find the peer %pM to configure pn replay detection\n", 1350 peer_addr); 1351 return -ENOENT; 1352 } 1353 1354 for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) { 1355 rx_tid = &peer->rx_tid[tid]; 1356 if (!rx_tid->active) 1357 continue; 1358 1359 ath12k_dp_init_rx_tid_rxq(&rx_tid_rxq, rx_tid); 1360 cmd.addr_lo = lower_32_bits(rx_tid_rxq.qbuf.paddr_aligned); 1361 cmd.addr_hi = upper_32_bits(rx_tid_rxq.qbuf.paddr_aligned); 1362 ret = ath12k_dp_reo_cmd_send(ab, &rx_tid_rxq, 1363 HAL_REO_CMD_UPDATE_RX_QUEUE, 1364 &cmd, NULL); 1365 if (ret) { 1366 ath12k_warn(ab, "failed to configure rx tid %d queue of peer %pM for pn replay detection %d\n", 1367 tid, peer_addr, ret); 1368 break; 1369 } 1370 } 1371 1372 spin_unlock_bh(&ab->base_lock); 1373 1374 return ret; 1375 } 1376 1377 static int ath12k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats, 1378 u16 peer_id) 1379 { 1380 int i; 1381 1382 for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) { 1383 if (ppdu_stats->user_stats[i].is_valid_peer_id) { 1384 if (peer_id == ppdu_stats->user_stats[i].peer_id) 1385 return i; 1386 } else { 1387 return i; 1388 } 1389 } 1390 1391 return -EINVAL; 1392 } 1393 1394 static int ath12k_htt_tlv_ppdu_stats_parse(struct ath12k_base *ab, 1395 u16 tag, u16 len, const void *ptr, 1396 void *data) 1397 { 1398 const struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *ba_status; 1399 const struct htt_ppdu_stats_usr_cmpltn_cmn *cmplt_cmn; 1400 const struct htt_ppdu_stats_user_rate *user_rate; 1401 struct htt_ppdu_stats_info *ppdu_info; 1402 struct htt_ppdu_user_stats *user_stats; 1403 int cur_user; 1404 u16 peer_id; 1405 1406 ppdu_info = data; 1407 1408 switch (tag) { 1409 case HTT_PPDU_STATS_TAG_COMMON: 1410 if (len < sizeof(struct htt_ppdu_stats_common)) { 1411 ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1412 len, tag); 1413 return -EINVAL; 1414 } 1415 memcpy(&ppdu_info->ppdu_stats.common, ptr, 1416 sizeof(struct htt_ppdu_stats_common)); 1417 break; 1418 case HTT_PPDU_STATS_TAG_USR_RATE: 1419 if (len < sizeof(struct htt_ppdu_stats_user_rate)) { 1420 ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1421 len, tag); 1422 return -EINVAL; 1423 } 1424 user_rate = ptr; 1425 peer_id = le16_to_cpu(user_rate->sw_peer_id); 1426 cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1427 peer_id); 1428 if (cur_user < 0) 1429 return -EINVAL; 1430 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1431 user_stats->peer_id = peer_id; 1432 user_stats->is_valid_peer_id = true; 1433 memcpy(&user_stats->rate, ptr, 1434 sizeof(struct htt_ppdu_stats_user_rate)); 1435 user_stats->tlv_flags |= BIT(tag); 1436 break; 1437 case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON: 1438 if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) { 1439 ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1440 len, tag); 1441 return -EINVAL; 1442 } 1443 1444 cmplt_cmn = ptr; 1445 peer_id = le16_to_cpu(cmplt_cmn->sw_peer_id); 1446 cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1447 peer_id); 1448 if (cur_user < 0) 1449 return -EINVAL; 1450 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1451 user_stats->peer_id = peer_id; 1452 user_stats->is_valid_peer_id = true; 1453 memcpy(&user_stats->cmpltn_cmn, ptr, 1454 sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)); 1455 user_stats->tlv_flags |= BIT(tag); 1456 break; 1457 case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS: 1458 if (len < 1459 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) { 1460 ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1461 len, tag); 1462 return -EINVAL; 1463 } 1464 1465 ba_status = ptr; 1466 peer_id = le16_to_cpu(ba_status->sw_peer_id); 1467 cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1468 peer_id); 1469 if (cur_user < 0) 1470 return -EINVAL; 1471 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1472 user_stats->peer_id = peer_id; 1473 user_stats->is_valid_peer_id = true; 1474 memcpy(&user_stats->ack_ba, ptr, 1475 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)); 1476 user_stats->tlv_flags |= BIT(tag); 1477 break; 1478 } 1479 return 0; 1480 } 1481 1482 int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len, 1483 int (*iter)(struct ath12k_base *ar, u16 tag, u16 len, 1484 const void *ptr, void *data), 1485 void *data) 1486 { 1487 const struct htt_tlv *tlv; 1488 const void *begin = ptr; 1489 u16 tlv_tag, tlv_len; 1490 int ret = -EINVAL; 1491 1492 while (len > 0) { 1493 if (len < sizeof(*tlv)) { 1494 ath12k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", 1495 ptr - begin, len, sizeof(*tlv)); 1496 return -EINVAL; 1497 } 1498 tlv = (struct htt_tlv *)ptr; 1499 tlv_tag = le32_get_bits(tlv->header, HTT_TLV_TAG); 1500 tlv_len = le32_get_bits(tlv->header, HTT_TLV_LEN); 1501 ptr += sizeof(*tlv); 1502 len -= sizeof(*tlv); 1503 1504 if (tlv_len > len) { 1505 ath12k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n", 1506 tlv_tag, ptr - begin, len, tlv_len); 1507 return -EINVAL; 1508 } 1509 ret = iter(ab, tlv_tag, tlv_len, ptr, data); 1510 if (ret == -ENOMEM) 1511 return ret; 1512 1513 ptr += tlv_len; 1514 len -= tlv_len; 1515 } 1516 return 0; 1517 } 1518 1519 static void 1520 ath12k_update_per_peer_tx_stats(struct ath12k *ar, 1521 struct htt_ppdu_stats *ppdu_stats, u8 user) 1522 { 1523 struct ath12k_base *ab = ar->ab; 1524 struct ath12k_peer *peer; 1525 struct ath12k_link_sta *arsta; 1526 struct htt_ppdu_stats_user_rate *user_rate; 1527 struct ath12k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats; 1528 struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user]; 1529 struct htt_ppdu_stats_common *common = &ppdu_stats->common; 1530 int ret; 1531 u8 flags, mcs, nss, bw, sgi, dcm, ppdu_type, rate_idx = 0; 1532 u32 v, succ_bytes = 0; 1533 u16 tones, rate = 0, succ_pkts = 0; 1534 u32 tx_duration = 0; 1535 u8 tid = HTT_PPDU_STATS_NON_QOS_TID; 1536 u16 tx_retry_failed = 0, tx_retry_count = 0; 1537 bool is_ampdu = false, is_ofdma; 1538 1539 if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE))) 1540 return; 1541 1542 if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) { 1543 is_ampdu = 1544 HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags); 1545 tx_retry_failed = 1546 __le16_to_cpu(usr_stats->cmpltn_cmn.mpdu_tried) - 1547 __le16_to_cpu(usr_stats->cmpltn_cmn.mpdu_success); 1548 tx_retry_count = 1549 HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) + 1550 HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags); 1551 } 1552 1553 if (usr_stats->tlv_flags & 1554 BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) { 1555 succ_bytes = le32_to_cpu(usr_stats->ack_ba.success_bytes); 1556 succ_pkts = le32_get_bits(usr_stats->ack_ba.info, 1557 HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M); 1558 tid = le32_get_bits(usr_stats->ack_ba.info, 1559 HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM); 1560 } 1561 1562 if (common->fes_duration_us) 1563 tx_duration = le32_to_cpu(common->fes_duration_us); 1564 1565 user_rate = &usr_stats->rate; 1566 flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags); 1567 bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2; 1568 nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1; 1569 mcs = HTT_USR_RATE_MCS(user_rate->rate_flags); 1570 sgi = HTT_USR_RATE_GI(user_rate->rate_flags); 1571 dcm = HTT_USR_RATE_DCM(user_rate->rate_flags); 1572 1573 ppdu_type = HTT_USR_RATE_PPDU_TYPE(user_rate->info1); 1574 is_ofdma = (ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA) || 1575 (ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA); 1576 1577 /* Note: If host configured fixed rates and in some other special 1578 * cases, the broadcast/management frames are sent in different rates. 1579 * Firmware rate's control to be skipped for this? 1580 */ 1581 1582 if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH12K_HE_MCS_MAX) { 1583 ath12k_warn(ab, "Invalid HE mcs %d peer stats", mcs); 1584 return; 1585 } 1586 1587 if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH12K_VHT_MCS_MAX) { 1588 ath12k_warn(ab, "Invalid VHT mcs %d peer stats", mcs); 1589 return; 1590 } 1591 1592 if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH12K_HT_MCS_MAX || nss < 1)) { 1593 ath12k_warn(ab, "Invalid HT mcs %d nss %d peer stats", 1594 mcs, nss); 1595 return; 1596 } 1597 1598 if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) { 1599 ret = ath12k_mac_hw_ratecode_to_legacy_rate(mcs, 1600 flags, 1601 &rate_idx, 1602 &rate); 1603 if (ret < 0) 1604 return; 1605 } 1606 1607 rcu_read_lock(); 1608 spin_lock_bh(&ab->base_lock); 1609 peer = ath12k_peer_find_by_id(ab, usr_stats->peer_id); 1610 1611 if (!peer || !peer->sta) { 1612 spin_unlock_bh(&ab->base_lock); 1613 rcu_read_unlock(); 1614 return; 1615 } 1616 1617 arsta = ath12k_peer_get_link_sta(ab, peer); 1618 if (!arsta) { 1619 spin_unlock_bh(&ab->base_lock); 1620 rcu_read_unlock(); 1621 return; 1622 } 1623 1624 memset(&arsta->txrate, 0, sizeof(arsta->txrate)); 1625 1626 arsta->txrate.bw = ath12k_mac_bw_to_mac80211_bw(bw); 1627 1628 switch (flags) { 1629 case WMI_RATE_PREAMBLE_OFDM: 1630 arsta->txrate.legacy = rate; 1631 break; 1632 case WMI_RATE_PREAMBLE_CCK: 1633 arsta->txrate.legacy = rate; 1634 break; 1635 case WMI_RATE_PREAMBLE_HT: 1636 arsta->txrate.mcs = mcs + 8 * (nss - 1); 1637 arsta->txrate.flags = RATE_INFO_FLAGS_MCS; 1638 if (sgi) 1639 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1640 break; 1641 case WMI_RATE_PREAMBLE_VHT: 1642 arsta->txrate.mcs = mcs; 1643 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; 1644 if (sgi) 1645 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1646 break; 1647 case WMI_RATE_PREAMBLE_HE: 1648 arsta->txrate.mcs = mcs; 1649 arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS; 1650 arsta->txrate.he_dcm = dcm; 1651 arsta->txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi); 1652 tones = le16_to_cpu(user_rate->ru_end) - 1653 le16_to_cpu(user_rate->ru_start) + 1; 1654 v = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(tones); 1655 arsta->txrate.he_ru_alloc = v; 1656 if (is_ofdma) 1657 arsta->txrate.bw = RATE_INFO_BW_HE_RU; 1658 break; 1659 case WMI_RATE_PREAMBLE_EHT: 1660 arsta->txrate.mcs = mcs; 1661 arsta->txrate.flags = RATE_INFO_FLAGS_EHT_MCS; 1662 arsta->txrate.he_dcm = dcm; 1663 arsta->txrate.eht_gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(sgi); 1664 tones = le16_to_cpu(user_rate->ru_end) - 1665 le16_to_cpu(user_rate->ru_start) + 1; 1666 v = ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(tones); 1667 arsta->txrate.eht_ru_alloc = v; 1668 if (is_ofdma) 1669 arsta->txrate.bw = RATE_INFO_BW_EHT_RU; 1670 break; 1671 } 1672 1673 arsta->tx_retry_failed += tx_retry_failed; 1674 arsta->tx_retry_count += tx_retry_count; 1675 arsta->txrate.nss = nss; 1676 arsta->tx_duration += tx_duration; 1677 memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info)); 1678 1679 /* PPDU stats reported for mgmt packet doesn't have valid tx bytes. 1680 * So skip peer stats update for mgmt packets. 1681 */ 1682 if (tid < HTT_PPDU_STATS_NON_QOS_TID) { 1683 memset(peer_stats, 0, sizeof(*peer_stats)); 1684 peer_stats->succ_pkts = succ_pkts; 1685 peer_stats->succ_bytes = succ_bytes; 1686 peer_stats->is_ampdu = is_ampdu; 1687 peer_stats->duration = tx_duration; 1688 peer_stats->ba_fails = 1689 HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) + 1690 HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags); 1691 } 1692 1693 spin_unlock_bh(&ab->base_lock); 1694 rcu_read_unlock(); 1695 } 1696 1697 static void ath12k_htt_update_ppdu_stats(struct ath12k *ar, 1698 struct htt_ppdu_stats *ppdu_stats) 1699 { 1700 u8 user; 1701 1702 for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++) 1703 ath12k_update_per_peer_tx_stats(ar, ppdu_stats, user); 1704 } 1705 1706 static 1707 struct htt_ppdu_stats_info *ath12k_dp_htt_get_ppdu_desc(struct ath12k *ar, 1708 u32 ppdu_id) 1709 { 1710 struct htt_ppdu_stats_info *ppdu_info; 1711 1712 lockdep_assert_held(&ar->data_lock); 1713 if (!list_empty(&ar->ppdu_stats_info)) { 1714 list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) { 1715 if (ppdu_info->ppdu_id == ppdu_id) 1716 return ppdu_info; 1717 } 1718 1719 if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) { 1720 ppdu_info = list_first_entry(&ar->ppdu_stats_info, 1721 typeof(*ppdu_info), list); 1722 list_del(&ppdu_info->list); 1723 ar->ppdu_stat_list_depth--; 1724 ath12k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats); 1725 kfree(ppdu_info); 1726 } 1727 } 1728 1729 ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC); 1730 if (!ppdu_info) 1731 return NULL; 1732 1733 list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info); 1734 ar->ppdu_stat_list_depth++; 1735 1736 return ppdu_info; 1737 } 1738 1739 static void ath12k_copy_to_delay_stats(struct ath12k_peer *peer, 1740 struct htt_ppdu_user_stats *usr_stats) 1741 { 1742 peer->ppdu_stats_delayba.sw_peer_id = le16_to_cpu(usr_stats->rate.sw_peer_id); 1743 peer->ppdu_stats_delayba.info0 = le32_to_cpu(usr_stats->rate.info0); 1744 peer->ppdu_stats_delayba.ru_end = le16_to_cpu(usr_stats->rate.ru_end); 1745 peer->ppdu_stats_delayba.ru_start = le16_to_cpu(usr_stats->rate.ru_start); 1746 peer->ppdu_stats_delayba.info1 = le32_to_cpu(usr_stats->rate.info1); 1747 peer->ppdu_stats_delayba.rate_flags = le32_to_cpu(usr_stats->rate.rate_flags); 1748 peer->ppdu_stats_delayba.resp_rate_flags = 1749 le32_to_cpu(usr_stats->rate.resp_rate_flags); 1750 1751 peer->delayba_flag = true; 1752 } 1753 1754 static void ath12k_copy_to_bar(struct ath12k_peer *peer, 1755 struct htt_ppdu_user_stats *usr_stats) 1756 { 1757 usr_stats->rate.sw_peer_id = cpu_to_le16(peer->ppdu_stats_delayba.sw_peer_id); 1758 usr_stats->rate.info0 = cpu_to_le32(peer->ppdu_stats_delayba.info0); 1759 usr_stats->rate.ru_end = cpu_to_le16(peer->ppdu_stats_delayba.ru_end); 1760 usr_stats->rate.ru_start = cpu_to_le16(peer->ppdu_stats_delayba.ru_start); 1761 usr_stats->rate.info1 = cpu_to_le32(peer->ppdu_stats_delayba.info1); 1762 usr_stats->rate.rate_flags = cpu_to_le32(peer->ppdu_stats_delayba.rate_flags); 1763 usr_stats->rate.resp_rate_flags = 1764 cpu_to_le32(peer->ppdu_stats_delayba.resp_rate_flags); 1765 1766 peer->delayba_flag = false; 1767 } 1768 1769 static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab, 1770 struct sk_buff *skb) 1771 { 1772 struct ath12k_htt_ppdu_stats_msg *msg; 1773 struct htt_ppdu_stats_info *ppdu_info; 1774 struct ath12k_peer *peer = NULL; 1775 struct htt_ppdu_user_stats *usr_stats = NULL; 1776 u32 peer_id = 0; 1777 struct ath12k *ar; 1778 int ret, i; 1779 u8 pdev_id; 1780 u32 ppdu_id, len; 1781 1782 msg = (struct ath12k_htt_ppdu_stats_msg *)skb->data; 1783 len = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE); 1784 if (len > (skb->len - struct_size(msg, data, 0))) { 1785 ath12k_warn(ab, 1786 "HTT PPDU STATS event has unexpected payload size %u, should be smaller than %u\n", 1787 len, skb->len); 1788 return -EINVAL; 1789 } 1790 1791 pdev_id = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PDEV_ID); 1792 ppdu_id = le32_to_cpu(msg->ppdu_id); 1793 1794 rcu_read_lock(); 1795 ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id); 1796 if (!ar) { 1797 ret = -EINVAL; 1798 goto exit; 1799 } 1800 1801 spin_lock_bh(&ar->data_lock); 1802 ppdu_info = ath12k_dp_htt_get_ppdu_desc(ar, ppdu_id); 1803 if (!ppdu_info) { 1804 spin_unlock_bh(&ar->data_lock); 1805 ret = -EINVAL; 1806 goto exit; 1807 } 1808 1809 ppdu_info->ppdu_id = ppdu_id; 1810 ret = ath12k_dp_htt_tlv_iter(ab, msg->data, len, 1811 ath12k_htt_tlv_ppdu_stats_parse, 1812 (void *)ppdu_info); 1813 if (ret) { 1814 spin_unlock_bh(&ar->data_lock); 1815 ath12k_warn(ab, "Failed to parse tlv %d\n", ret); 1816 goto exit; 1817 } 1818 1819 if (ppdu_info->ppdu_stats.common.num_users >= HTT_PPDU_STATS_MAX_USERS) { 1820 spin_unlock_bh(&ar->data_lock); 1821 ath12k_warn(ab, 1822 "HTT PPDU STATS event has unexpected num_users %u, should be smaller than %u\n", 1823 ppdu_info->ppdu_stats.common.num_users, 1824 HTT_PPDU_STATS_MAX_USERS); 1825 ret = -EINVAL; 1826 goto exit; 1827 } 1828 1829 /* back up data rate tlv for all peers */ 1830 if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_DATA && 1831 (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON)) && 1832 ppdu_info->delay_ba) { 1833 for (i = 0; i < ppdu_info->ppdu_stats.common.num_users; i++) { 1834 peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id; 1835 spin_lock_bh(&ab->base_lock); 1836 peer = ath12k_peer_find_by_id(ab, peer_id); 1837 if (!peer) { 1838 spin_unlock_bh(&ab->base_lock); 1839 continue; 1840 } 1841 1842 usr_stats = &ppdu_info->ppdu_stats.user_stats[i]; 1843 if (usr_stats->delay_ba) 1844 ath12k_copy_to_delay_stats(peer, usr_stats); 1845 spin_unlock_bh(&ab->base_lock); 1846 } 1847 } 1848 1849 /* restore all peers' data rate tlv to mu-bar tlv */ 1850 if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_BAR && 1851 (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON))) { 1852 for (i = 0; i < ppdu_info->bar_num_users; i++) { 1853 peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id; 1854 spin_lock_bh(&ab->base_lock); 1855 peer = ath12k_peer_find_by_id(ab, peer_id); 1856 if (!peer) { 1857 spin_unlock_bh(&ab->base_lock); 1858 continue; 1859 } 1860 1861 usr_stats = &ppdu_info->ppdu_stats.user_stats[i]; 1862 if (peer->delayba_flag) 1863 ath12k_copy_to_bar(peer, usr_stats); 1864 spin_unlock_bh(&ab->base_lock); 1865 } 1866 } 1867 1868 spin_unlock_bh(&ar->data_lock); 1869 1870 exit: 1871 rcu_read_unlock(); 1872 1873 return ret; 1874 } 1875 1876 static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab, 1877 struct sk_buff *skb) 1878 { 1879 struct ath12k_htt_mlo_offset_msg *msg; 1880 struct ath12k_pdev *pdev; 1881 struct ath12k *ar; 1882 u8 pdev_id; 1883 1884 msg = (struct ath12k_htt_mlo_offset_msg *)skb->data; 1885 pdev_id = u32_get_bits(__le32_to_cpu(msg->info), 1886 HTT_T2H_MLO_OFFSET_INFO_PDEV_ID); 1887 1888 rcu_read_lock(); 1889 ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id); 1890 if (!ar) { 1891 /* It is possible that the ar is not yet active (started). 1892 * The above function will only look for the active pdev 1893 * and hence %NULL return is possible. Just silently 1894 * discard this message 1895 */ 1896 goto exit; 1897 } 1898 1899 spin_lock_bh(&ar->data_lock); 1900 pdev = ar->pdev; 1901 1902 pdev->timestamp.info = __le32_to_cpu(msg->info); 1903 pdev->timestamp.sync_timestamp_lo_us = __le32_to_cpu(msg->sync_timestamp_lo_us); 1904 pdev->timestamp.sync_timestamp_hi_us = __le32_to_cpu(msg->sync_timestamp_hi_us); 1905 pdev->timestamp.mlo_offset_lo = __le32_to_cpu(msg->mlo_offset_lo); 1906 pdev->timestamp.mlo_offset_hi = __le32_to_cpu(msg->mlo_offset_hi); 1907 pdev->timestamp.mlo_offset_clks = __le32_to_cpu(msg->mlo_offset_clks); 1908 pdev->timestamp.mlo_comp_clks = __le32_to_cpu(msg->mlo_comp_clks); 1909 pdev->timestamp.mlo_comp_timer = __le32_to_cpu(msg->mlo_comp_timer); 1910 1911 spin_unlock_bh(&ar->data_lock); 1912 exit: 1913 rcu_read_unlock(); 1914 } 1915 1916 void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab, 1917 struct sk_buff *skb) 1918 { 1919 struct ath12k_dp *dp = &ab->dp; 1920 struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data; 1921 enum htt_t2h_msg_type type; 1922 u16 peer_id; 1923 u8 vdev_id; 1924 u8 mac_addr[ETH_ALEN]; 1925 u16 peer_mac_h16; 1926 u16 ast_hash = 0; 1927 u16 hw_peer_id; 1928 1929 type = le32_get_bits(resp->version_msg.version, HTT_T2H_MSG_TYPE); 1930 1931 ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type); 1932 1933 switch (type) { 1934 case HTT_T2H_MSG_TYPE_VERSION_CONF: 1935 dp->htt_tgt_ver_major = le32_get_bits(resp->version_msg.version, 1936 HTT_T2H_VERSION_CONF_MAJOR); 1937 dp->htt_tgt_ver_minor = le32_get_bits(resp->version_msg.version, 1938 HTT_T2H_VERSION_CONF_MINOR); 1939 complete(&dp->htt_tgt_version_received); 1940 break; 1941 /* TODO: remove unused peer map versions after testing */ 1942 case HTT_T2H_MSG_TYPE_PEER_MAP: 1943 vdev_id = le32_get_bits(resp->peer_map_ev.info, 1944 HTT_T2H_PEER_MAP_INFO_VDEV_ID); 1945 peer_id = le32_get_bits(resp->peer_map_ev.info, 1946 HTT_T2H_PEER_MAP_INFO_PEER_ID); 1947 peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1, 1948 HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16); 1949 ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32), 1950 peer_mac_h16, mac_addr); 1951 ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0); 1952 break; 1953 case HTT_T2H_MSG_TYPE_PEER_MAP2: 1954 vdev_id = le32_get_bits(resp->peer_map_ev.info, 1955 HTT_T2H_PEER_MAP_INFO_VDEV_ID); 1956 peer_id = le32_get_bits(resp->peer_map_ev.info, 1957 HTT_T2H_PEER_MAP_INFO_PEER_ID); 1958 peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1, 1959 HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16); 1960 ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32), 1961 peer_mac_h16, mac_addr); 1962 ast_hash = le32_get_bits(resp->peer_map_ev.info2, 1963 HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL); 1964 hw_peer_id = le32_get_bits(resp->peer_map_ev.info1, 1965 HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID); 1966 ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash, 1967 hw_peer_id); 1968 break; 1969 case HTT_T2H_MSG_TYPE_PEER_MAP3: 1970 vdev_id = le32_get_bits(resp->peer_map_ev.info, 1971 HTT_T2H_PEER_MAP_INFO_VDEV_ID); 1972 peer_id = le32_get_bits(resp->peer_map_ev.info, 1973 HTT_T2H_PEER_MAP_INFO_PEER_ID); 1974 peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1, 1975 HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16); 1976 ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32), 1977 peer_mac_h16, mac_addr); 1978 ast_hash = le32_get_bits(resp->peer_map_ev.info2, 1979 HTT_T2H_PEER_MAP3_INFO2_AST_HASH_VAL); 1980 hw_peer_id = le32_get_bits(resp->peer_map_ev.info2, 1981 HTT_T2H_PEER_MAP3_INFO2_HW_PEER_ID); 1982 ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash, 1983 hw_peer_id); 1984 break; 1985 case HTT_T2H_MSG_TYPE_PEER_UNMAP: 1986 case HTT_T2H_MSG_TYPE_PEER_UNMAP2: 1987 peer_id = le32_get_bits(resp->peer_unmap_ev.info, 1988 HTT_T2H_PEER_UNMAP_INFO_PEER_ID); 1989 ath12k_peer_unmap_event(ab, peer_id); 1990 break; 1991 case HTT_T2H_MSG_TYPE_PPDU_STATS_IND: 1992 ath12k_htt_pull_ppdu_stats(ab, skb); 1993 break; 1994 case HTT_T2H_MSG_TYPE_EXT_STATS_CONF: 1995 ath12k_debugfs_htt_ext_stats_handler(ab, skb); 1996 break; 1997 case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND: 1998 ath12k_htt_mlo_offset_event_handler(ab, skb); 1999 break; 2000 default: 2001 ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt event %d not handled\n", 2002 type); 2003 break; 2004 } 2005 2006 dev_kfree_skb_any(skb); 2007 } 2008 2009 static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar, 2010 struct sk_buff_head *msdu_list, 2011 struct sk_buff *first, struct sk_buff *last, 2012 u8 l3pad_bytes, int msdu_len) 2013 { 2014 struct ath12k_base *ab = ar->ab; 2015 struct sk_buff *skb; 2016 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first); 2017 int buf_first_hdr_len, buf_first_len; 2018 struct hal_rx_desc *ldesc; 2019 int space_extra, rem_len, buf_len; 2020 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 2021 bool is_continuation; 2022 2023 /* As the msdu is spread across multiple rx buffers, 2024 * find the offset to the start of msdu for computing 2025 * the length of the msdu in the first buffer. 2026 */ 2027 buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes; 2028 buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len; 2029 2030 if (WARN_ON_ONCE(msdu_len <= buf_first_len)) { 2031 skb_put(first, buf_first_hdr_len + msdu_len); 2032 skb_pull(first, buf_first_hdr_len); 2033 return 0; 2034 } 2035 2036 ldesc = (struct hal_rx_desc *)last->data; 2037 rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, ldesc); 2038 rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, ldesc); 2039 2040 /* MSDU spans over multiple buffers because the length of the MSDU 2041 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data 2042 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. 2043 */ 2044 skb_put(first, DP_RX_BUFFER_SIZE); 2045 skb_pull(first, buf_first_hdr_len); 2046 2047 /* When an MSDU spread over multiple buffers MSDU_END 2048 * tlvs are valid only in the last buffer. Copy those tlvs. 2049 */ 2050 ath12k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc); 2051 2052 space_extra = msdu_len - (buf_first_len + skb_tailroom(first)); 2053 if (space_extra > 0 && 2054 (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) { 2055 /* Free up all buffers of the MSDU */ 2056 while ((skb = __skb_dequeue(msdu_list)) != NULL) { 2057 rxcb = ATH12K_SKB_RXCB(skb); 2058 if (!rxcb->is_continuation) { 2059 dev_kfree_skb_any(skb); 2060 break; 2061 } 2062 dev_kfree_skb_any(skb); 2063 } 2064 return -ENOMEM; 2065 } 2066 2067 rem_len = msdu_len - buf_first_len; 2068 while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) { 2069 rxcb = ATH12K_SKB_RXCB(skb); 2070 is_continuation = rxcb->is_continuation; 2071 if (is_continuation) 2072 buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz; 2073 else 2074 buf_len = rem_len; 2075 2076 if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) { 2077 WARN_ON_ONCE(1); 2078 dev_kfree_skb_any(skb); 2079 return -EINVAL; 2080 } 2081 2082 skb_put(skb, buf_len + hal_rx_desc_sz); 2083 skb_pull(skb, hal_rx_desc_sz); 2084 skb_copy_from_linear_data(skb, skb_put(first, buf_len), 2085 buf_len); 2086 dev_kfree_skb_any(skb); 2087 2088 rem_len -= buf_len; 2089 if (!is_continuation) 2090 break; 2091 } 2092 2093 return 0; 2094 } 2095 2096 static struct sk_buff *ath12k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list, 2097 struct sk_buff *first) 2098 { 2099 struct sk_buff *skb; 2100 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first); 2101 2102 if (!rxcb->is_continuation) 2103 return first; 2104 2105 skb_queue_walk(msdu_list, skb) { 2106 rxcb = ATH12K_SKB_RXCB(skb); 2107 if (!rxcb->is_continuation) 2108 return skb; 2109 } 2110 2111 return NULL; 2112 } 2113 2114 static void ath12k_dp_rx_h_csum_offload(struct sk_buff *msdu, 2115 struct ath12k_dp_rx_info *rx_info) 2116 { 2117 msdu->ip_summed = (rx_info->ip_csum_fail || rx_info->l4_csum_fail) ? 2118 CHECKSUM_NONE : CHECKSUM_UNNECESSARY; 2119 } 2120 2121 int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar, enum hal_encrypt_type enctype) 2122 { 2123 switch (enctype) { 2124 case HAL_ENCRYPT_TYPE_OPEN: 2125 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 2126 case HAL_ENCRYPT_TYPE_TKIP_MIC: 2127 return 0; 2128 case HAL_ENCRYPT_TYPE_CCMP_128: 2129 return IEEE80211_CCMP_MIC_LEN; 2130 case HAL_ENCRYPT_TYPE_CCMP_256: 2131 return IEEE80211_CCMP_256_MIC_LEN; 2132 case HAL_ENCRYPT_TYPE_GCMP_128: 2133 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 2134 return IEEE80211_GCMP_MIC_LEN; 2135 case HAL_ENCRYPT_TYPE_WEP_40: 2136 case HAL_ENCRYPT_TYPE_WEP_104: 2137 case HAL_ENCRYPT_TYPE_WEP_128: 2138 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 2139 case HAL_ENCRYPT_TYPE_WAPI: 2140 break; 2141 } 2142 2143 ath12k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype); 2144 return 0; 2145 } 2146 2147 static int ath12k_dp_rx_crypto_param_len(struct ath12k *ar, 2148 enum hal_encrypt_type enctype) 2149 { 2150 switch (enctype) { 2151 case HAL_ENCRYPT_TYPE_OPEN: 2152 return 0; 2153 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 2154 case HAL_ENCRYPT_TYPE_TKIP_MIC: 2155 return IEEE80211_TKIP_IV_LEN; 2156 case HAL_ENCRYPT_TYPE_CCMP_128: 2157 return IEEE80211_CCMP_HDR_LEN; 2158 case HAL_ENCRYPT_TYPE_CCMP_256: 2159 return IEEE80211_CCMP_256_HDR_LEN; 2160 case HAL_ENCRYPT_TYPE_GCMP_128: 2161 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 2162 return IEEE80211_GCMP_HDR_LEN; 2163 case HAL_ENCRYPT_TYPE_WEP_40: 2164 case HAL_ENCRYPT_TYPE_WEP_104: 2165 case HAL_ENCRYPT_TYPE_WEP_128: 2166 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 2167 case HAL_ENCRYPT_TYPE_WAPI: 2168 break; 2169 } 2170 2171 ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 2172 return 0; 2173 } 2174 2175 static int ath12k_dp_rx_crypto_icv_len(struct ath12k *ar, 2176 enum hal_encrypt_type enctype) 2177 { 2178 switch (enctype) { 2179 case HAL_ENCRYPT_TYPE_OPEN: 2180 case HAL_ENCRYPT_TYPE_CCMP_128: 2181 case HAL_ENCRYPT_TYPE_CCMP_256: 2182 case HAL_ENCRYPT_TYPE_GCMP_128: 2183 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 2184 return 0; 2185 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 2186 case HAL_ENCRYPT_TYPE_TKIP_MIC: 2187 return IEEE80211_TKIP_ICV_LEN; 2188 case HAL_ENCRYPT_TYPE_WEP_40: 2189 case HAL_ENCRYPT_TYPE_WEP_104: 2190 case HAL_ENCRYPT_TYPE_WEP_128: 2191 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 2192 case HAL_ENCRYPT_TYPE_WAPI: 2193 break; 2194 } 2195 2196 ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 2197 return 0; 2198 } 2199 2200 static void ath12k_dp_rx_h_undecap_nwifi(struct ath12k *ar, 2201 struct sk_buff *msdu, 2202 enum hal_encrypt_type enctype, 2203 struct ieee80211_rx_status *status) 2204 { 2205 struct ath12k_base *ab = ar->ab; 2206 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 2207 u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN]; 2208 struct ieee80211_hdr *hdr; 2209 size_t hdr_len; 2210 u8 *crypto_hdr; 2211 u16 qos_ctl; 2212 2213 /* pull decapped header */ 2214 hdr = (struct ieee80211_hdr *)msdu->data; 2215 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2216 skb_pull(msdu, hdr_len); 2217 2218 /* Rebuild qos header */ 2219 hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 2220 2221 /* Reset the order bit as the HT_Control header is stripped */ 2222 hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER)); 2223 2224 qos_ctl = rxcb->tid; 2225 2226 if (ath12k_dp_rx_h_mesh_ctl_present(ab, rxcb->rx_desc)) 2227 qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT; 2228 2229 /* TODO: Add other QoS ctl fields when required */ 2230 2231 /* copy decap header before overwriting for reuse below */ 2232 memcpy(decap_hdr, hdr, hdr_len); 2233 2234 /* Rebuild crypto header for mac80211 use */ 2235 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 2236 crypto_hdr = skb_push(msdu, ath12k_dp_rx_crypto_param_len(ar, enctype)); 2237 ath12k_dp_rx_desc_get_crypto_header(ar->ab, 2238 rxcb->rx_desc, crypto_hdr, 2239 enctype); 2240 } 2241 2242 memcpy(skb_push(msdu, 2243 IEEE80211_QOS_CTL_LEN), &qos_ctl, 2244 IEEE80211_QOS_CTL_LEN); 2245 memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len); 2246 } 2247 2248 static void ath12k_dp_rx_h_undecap_raw(struct ath12k *ar, struct sk_buff *msdu, 2249 enum hal_encrypt_type enctype, 2250 struct ieee80211_rx_status *status, 2251 bool decrypted) 2252 { 2253 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 2254 struct ieee80211_hdr *hdr; 2255 size_t hdr_len; 2256 size_t crypto_len; 2257 2258 if (!rxcb->is_first_msdu || 2259 !(rxcb->is_first_msdu && rxcb->is_last_msdu)) { 2260 WARN_ON_ONCE(1); 2261 return; 2262 } 2263 2264 skb_trim(msdu, msdu->len - FCS_LEN); 2265 2266 if (!decrypted) 2267 return; 2268 2269 hdr = (void *)msdu->data; 2270 2271 /* Tail */ 2272 if (status->flag & RX_FLAG_IV_STRIPPED) { 2273 skb_trim(msdu, msdu->len - 2274 ath12k_dp_rx_crypto_mic_len(ar, enctype)); 2275 2276 skb_trim(msdu, msdu->len - 2277 ath12k_dp_rx_crypto_icv_len(ar, enctype)); 2278 } else { 2279 /* MIC */ 2280 if (status->flag & RX_FLAG_MIC_STRIPPED) 2281 skb_trim(msdu, msdu->len - 2282 ath12k_dp_rx_crypto_mic_len(ar, enctype)); 2283 2284 /* ICV */ 2285 if (status->flag & RX_FLAG_ICV_STRIPPED) 2286 skb_trim(msdu, msdu->len - 2287 ath12k_dp_rx_crypto_icv_len(ar, enctype)); 2288 } 2289 2290 /* MMIC */ 2291 if ((status->flag & RX_FLAG_MMIC_STRIPPED) && 2292 !ieee80211_has_morefrags(hdr->frame_control) && 2293 enctype == HAL_ENCRYPT_TYPE_TKIP_MIC) 2294 skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN); 2295 2296 /* Head */ 2297 if (status->flag & RX_FLAG_IV_STRIPPED) { 2298 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2299 crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype); 2300 2301 memmove(msdu->data + crypto_len, msdu->data, hdr_len); 2302 skb_pull(msdu, crypto_len); 2303 } 2304 } 2305 2306 static void ath12k_get_dot11_hdr_from_rx_desc(struct ath12k *ar, 2307 struct sk_buff *msdu, 2308 struct ath12k_skb_rxcb *rxcb, 2309 struct ieee80211_rx_status *status, 2310 enum hal_encrypt_type enctype) 2311 { 2312 struct hal_rx_desc *rx_desc = rxcb->rx_desc; 2313 struct ath12k_base *ab = ar->ab; 2314 size_t hdr_len, crypto_len; 2315 struct ieee80211_hdr hdr; 2316 __le16 qos_ctl; 2317 u8 *crypto_hdr, mesh_ctrl; 2318 2319 ath12k_dp_rx_desc_get_dot11_hdr(ab, rx_desc, &hdr); 2320 hdr_len = ieee80211_hdrlen(hdr.frame_control); 2321 mesh_ctrl = ath12k_dp_rx_h_mesh_ctl_present(ab, rx_desc); 2322 2323 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 2324 crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype); 2325 crypto_hdr = skb_push(msdu, crypto_len); 2326 ath12k_dp_rx_desc_get_crypto_header(ab, rx_desc, crypto_hdr, enctype); 2327 } 2328 2329 skb_push(msdu, hdr_len); 2330 memcpy(msdu->data, &hdr, min(hdr_len, sizeof(hdr))); 2331 2332 if (rxcb->is_mcbc) 2333 status->flag &= ~RX_FLAG_PN_VALIDATED; 2334 2335 /* Add QOS header */ 2336 if (ieee80211_is_data_qos(hdr.frame_control)) { 2337 struct ieee80211_hdr *qos_ptr = (struct ieee80211_hdr *)msdu->data; 2338 2339 qos_ctl = cpu_to_le16(rxcb->tid & IEEE80211_QOS_CTL_TID_MASK); 2340 if (mesh_ctrl) 2341 qos_ctl |= cpu_to_le16(IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT); 2342 2343 memcpy(ieee80211_get_qos_ctl(qos_ptr), &qos_ctl, IEEE80211_QOS_CTL_LEN); 2344 } 2345 } 2346 2347 static void ath12k_dp_rx_h_undecap_eth(struct ath12k *ar, 2348 struct sk_buff *msdu, 2349 enum hal_encrypt_type enctype, 2350 struct ieee80211_rx_status *status) 2351 { 2352 struct ieee80211_hdr *hdr; 2353 struct ethhdr *eth; 2354 u8 da[ETH_ALEN]; 2355 u8 sa[ETH_ALEN]; 2356 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 2357 struct ath12k_dp_rx_rfc1042_hdr rfc = {0xaa, 0xaa, 0x03, {0x00, 0x00, 0x00}}; 2358 2359 eth = (struct ethhdr *)msdu->data; 2360 ether_addr_copy(da, eth->h_dest); 2361 ether_addr_copy(sa, eth->h_source); 2362 rfc.snap_type = eth->h_proto; 2363 skb_pull(msdu, sizeof(*eth)); 2364 memcpy(skb_push(msdu, sizeof(rfc)), &rfc, 2365 sizeof(rfc)); 2366 ath12k_get_dot11_hdr_from_rx_desc(ar, msdu, rxcb, status, enctype); 2367 2368 /* original 802.11 header has a different DA and in 2369 * case of 4addr it may also have different SA 2370 */ 2371 hdr = (struct ieee80211_hdr *)msdu->data; 2372 ether_addr_copy(ieee80211_get_DA(hdr), da); 2373 ether_addr_copy(ieee80211_get_SA(hdr), sa); 2374 } 2375 2376 static void ath12k_dp_rx_h_undecap(struct ath12k *ar, struct sk_buff *msdu, 2377 struct hal_rx_desc *rx_desc, 2378 enum hal_encrypt_type enctype, 2379 struct ieee80211_rx_status *status, 2380 bool decrypted) 2381 { 2382 struct ath12k_base *ab = ar->ab; 2383 u8 decap; 2384 struct ethhdr *ehdr; 2385 2386 decap = ath12k_dp_rx_h_decap_type(ab, rx_desc); 2387 2388 switch (decap) { 2389 case DP_RX_DECAP_TYPE_NATIVE_WIFI: 2390 ath12k_dp_rx_h_undecap_nwifi(ar, msdu, enctype, status); 2391 break; 2392 case DP_RX_DECAP_TYPE_RAW: 2393 ath12k_dp_rx_h_undecap_raw(ar, msdu, enctype, status, 2394 decrypted); 2395 break; 2396 case DP_RX_DECAP_TYPE_ETHERNET2_DIX: 2397 ehdr = (struct ethhdr *)msdu->data; 2398 2399 /* mac80211 allows fast path only for authorized STA */ 2400 if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) { 2401 ATH12K_SKB_RXCB(msdu)->is_eapol = true; 2402 ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status); 2403 break; 2404 } 2405 2406 /* PN for mcast packets will be validated in mac80211; 2407 * remove eth header and add 802.11 header. 2408 */ 2409 if (ATH12K_SKB_RXCB(msdu)->is_mcbc && decrypted) 2410 ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status); 2411 break; 2412 case DP_RX_DECAP_TYPE_8023: 2413 /* TODO: Handle undecap for these formats */ 2414 break; 2415 } 2416 } 2417 2418 struct ath12k_peer * 2419 ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu, 2420 struct ath12k_dp_rx_info *rx_info) 2421 { 2422 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 2423 struct ath12k_peer *peer = NULL; 2424 2425 lockdep_assert_held(&ab->base_lock); 2426 2427 if (rxcb->peer_id) 2428 peer = ath12k_peer_find_by_id(ab, rxcb->peer_id); 2429 2430 if (peer) 2431 return peer; 2432 2433 if (rx_info->addr2_present) 2434 peer = ath12k_peer_find_by_addr(ab, rx_info->addr2); 2435 2436 return peer; 2437 } 2438 2439 static void ath12k_dp_rx_h_mpdu(struct ath12k *ar, 2440 struct sk_buff *msdu, 2441 struct hal_rx_desc *rx_desc, 2442 struct ath12k_dp_rx_info *rx_info) 2443 { 2444 struct ath12k_base *ab = ar->ab; 2445 struct ath12k_skb_rxcb *rxcb; 2446 enum hal_encrypt_type enctype; 2447 bool is_decrypted = false; 2448 struct ieee80211_hdr *hdr; 2449 struct ath12k_peer *peer; 2450 struct ieee80211_rx_status *rx_status = rx_info->rx_status; 2451 u32 err_bitmap; 2452 2453 /* PN for multicast packets will be checked in mac80211 */ 2454 rxcb = ATH12K_SKB_RXCB(msdu); 2455 rxcb->is_mcbc = rx_info->is_mcbc; 2456 2457 if (rxcb->is_mcbc) 2458 rxcb->peer_id = rx_info->peer_id; 2459 2460 spin_lock_bh(&ar->ab->base_lock); 2461 peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu, rx_info); 2462 if (peer) { 2463 /* resetting mcbc bit because mcbc packets are unicast 2464 * packets only for AP as STA sends unicast packets. 2465 */ 2466 rxcb->is_mcbc = rxcb->is_mcbc && !peer->ucast_ra_only; 2467 2468 if (rxcb->is_mcbc) 2469 enctype = peer->sec_type_grp; 2470 else 2471 enctype = peer->sec_type; 2472 } else { 2473 enctype = HAL_ENCRYPT_TYPE_OPEN; 2474 } 2475 spin_unlock_bh(&ar->ab->base_lock); 2476 2477 err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc); 2478 if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap) 2479 is_decrypted = ath12k_dp_rx_h_is_decrypted(ab, rx_desc); 2480 2481 /* Clear per-MPDU flags while leaving per-PPDU flags intact */ 2482 rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | 2483 RX_FLAG_MMIC_ERROR | 2484 RX_FLAG_DECRYPTED | 2485 RX_FLAG_IV_STRIPPED | 2486 RX_FLAG_MMIC_STRIPPED); 2487 2488 if (err_bitmap & HAL_RX_MPDU_ERR_FCS) 2489 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 2490 if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) 2491 rx_status->flag |= RX_FLAG_MMIC_ERROR; 2492 2493 if (is_decrypted) { 2494 rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED; 2495 2496 if (rx_info->is_mcbc) 2497 rx_status->flag |= RX_FLAG_MIC_STRIPPED | 2498 RX_FLAG_ICV_STRIPPED; 2499 else 2500 rx_status->flag |= RX_FLAG_IV_STRIPPED | 2501 RX_FLAG_PN_VALIDATED; 2502 } 2503 2504 ath12k_dp_rx_h_csum_offload(msdu, rx_info); 2505 ath12k_dp_rx_h_undecap(ar, msdu, rx_desc, 2506 enctype, rx_status, is_decrypted); 2507 2508 if (!is_decrypted || rx_info->is_mcbc) 2509 return; 2510 2511 if (rx_info->decap_type != DP_RX_DECAP_TYPE_ETHERNET2_DIX) { 2512 hdr = (void *)msdu->data; 2513 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 2514 } 2515 } 2516 2517 static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info) 2518 { 2519 struct ieee80211_supported_band *sband; 2520 struct ieee80211_rx_status *rx_status = rx_info->rx_status; 2521 enum rx_msdu_start_pkt_type pkt_type = rx_info->pkt_type; 2522 u8 bw = rx_info->bw, sgi = rx_info->sgi; 2523 u8 rate_mcs = rx_info->rate_mcs, nss = rx_info->nss; 2524 bool is_cck; 2525 2526 switch (pkt_type) { 2527 case RX_MSDU_START_PKT_TYPE_11A: 2528 case RX_MSDU_START_PKT_TYPE_11B: 2529 is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B); 2530 sband = &ar->mac.sbands[rx_status->band]; 2531 rx_status->rate_idx = ath12k_mac_hw_rate_to_idx(sband, rate_mcs, 2532 is_cck); 2533 break; 2534 case RX_MSDU_START_PKT_TYPE_11N: 2535 rx_status->encoding = RX_ENC_HT; 2536 if (rate_mcs > ATH12K_HT_MCS_MAX) { 2537 ath12k_warn(ar->ab, 2538 "Received with invalid mcs in HT mode %d\n", 2539 rate_mcs); 2540 break; 2541 } 2542 rx_status->rate_idx = rate_mcs + (8 * (nss - 1)); 2543 if (sgi) 2544 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 2545 rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw); 2546 break; 2547 case RX_MSDU_START_PKT_TYPE_11AC: 2548 rx_status->encoding = RX_ENC_VHT; 2549 rx_status->rate_idx = rate_mcs; 2550 if (rate_mcs > ATH12K_VHT_MCS_MAX) { 2551 ath12k_warn(ar->ab, 2552 "Received with invalid mcs in VHT mode %d\n", 2553 rate_mcs); 2554 break; 2555 } 2556 rx_status->nss = nss; 2557 if (sgi) 2558 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 2559 rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw); 2560 break; 2561 case RX_MSDU_START_PKT_TYPE_11AX: 2562 rx_status->rate_idx = rate_mcs; 2563 if (rate_mcs > ATH12K_HE_MCS_MAX) { 2564 ath12k_warn(ar->ab, 2565 "Received with invalid mcs in HE mode %d\n", 2566 rate_mcs); 2567 break; 2568 } 2569 rx_status->encoding = RX_ENC_HE; 2570 rx_status->nss = nss; 2571 rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi); 2572 rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw); 2573 break; 2574 case RX_MSDU_START_PKT_TYPE_11BE: 2575 rx_status->rate_idx = rate_mcs; 2576 2577 if (rate_mcs > ATH12K_EHT_MCS_MAX) { 2578 ath12k_warn(ar->ab, 2579 "Received with invalid mcs in EHT mode %d\n", 2580 rate_mcs); 2581 break; 2582 } 2583 2584 rx_status->encoding = RX_ENC_EHT; 2585 rx_status->nss = nss; 2586 rx_status->eht.gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(sgi); 2587 rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw); 2588 break; 2589 default: 2590 break; 2591 } 2592 } 2593 2594 void ath12k_dp_rx_h_fetch_info(struct ath12k_base *ab, struct hal_rx_desc *rx_desc, 2595 struct ath12k_dp_rx_info *rx_info) 2596 { 2597 rx_info->ip_csum_fail = ath12k_dp_rx_h_ip_cksum_fail(ab, rx_desc); 2598 rx_info->l4_csum_fail = ath12k_dp_rx_h_l4_cksum_fail(ab, rx_desc); 2599 rx_info->is_mcbc = ath12k_dp_rx_h_is_da_mcbc(ab, rx_desc); 2600 rx_info->decap_type = ath12k_dp_rx_h_decap_type(ab, rx_desc); 2601 rx_info->pkt_type = ath12k_dp_rx_h_pkt_type(ab, rx_desc); 2602 rx_info->sgi = ath12k_dp_rx_h_sgi(ab, rx_desc); 2603 rx_info->rate_mcs = ath12k_dp_rx_h_rate_mcs(ab, rx_desc); 2604 rx_info->bw = ath12k_dp_rx_h_rx_bw(ab, rx_desc); 2605 rx_info->nss = ath12k_dp_rx_h_nss(ab, rx_desc); 2606 rx_info->tid = ath12k_dp_rx_h_tid(ab, rx_desc); 2607 rx_info->peer_id = ath12k_dp_rx_h_peer_id(ab, rx_desc); 2608 rx_info->phy_meta_data = ath12k_dp_rx_h_freq(ab, rx_desc); 2609 2610 if (ath12k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)) { 2611 ether_addr_copy(rx_info->addr2, 2612 ath12k_dp_rxdesc_get_mpdu_start_addr2(ab, rx_desc)); 2613 rx_info->addr2_present = true; 2614 } 2615 2616 ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "rx_desc: ", 2617 rx_desc, sizeof(*rx_desc)); 2618 } 2619 2620 void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info) 2621 { 2622 struct ieee80211_rx_status *rx_status = rx_info->rx_status; 2623 u8 channel_num; 2624 u32 center_freq, meta_data; 2625 struct ieee80211_channel *channel; 2626 2627 rx_status->freq = 0; 2628 rx_status->rate_idx = 0; 2629 rx_status->nss = 0; 2630 rx_status->encoding = RX_ENC_LEGACY; 2631 rx_status->bw = RATE_INFO_BW_20; 2632 rx_status->enc_flags = 0; 2633 2634 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 2635 2636 meta_data = rx_info->phy_meta_data; 2637 channel_num = meta_data; 2638 center_freq = meta_data >> 16; 2639 2640 rx_status->band = NUM_NL80211_BANDS; 2641 2642 if (center_freq >= ATH12K_MIN_6GHZ_FREQ && 2643 center_freq <= ATH12K_MAX_6GHZ_FREQ) { 2644 rx_status->band = NL80211_BAND_6GHZ; 2645 rx_status->freq = center_freq; 2646 } else if (channel_num >= 1 && channel_num <= 14) { 2647 rx_status->band = NL80211_BAND_2GHZ; 2648 } else if (channel_num >= 36 && channel_num <= 173) { 2649 rx_status->band = NL80211_BAND_5GHZ; 2650 } 2651 2652 if (unlikely(rx_status->band == NUM_NL80211_BANDS || 2653 !ath12k_ar_to_hw(ar)->wiphy->bands[rx_status->band])) { 2654 ath12k_warn(ar->ab, "sband is NULL for status band %d channel_num %d center_freq %d pdev_id %d\n", 2655 rx_status->band, channel_num, center_freq, ar->pdev_idx); 2656 2657 spin_lock_bh(&ar->data_lock); 2658 channel = ar->rx_channel; 2659 if (channel) { 2660 rx_status->band = channel->band; 2661 channel_num = 2662 ieee80211_frequency_to_channel(channel->center_freq); 2663 rx_status->freq = ieee80211_channel_to_frequency(channel_num, 2664 rx_status->band); 2665 } else { 2666 ath12k_err(ar->ab, "unable to determine channel, band for rx packet"); 2667 } 2668 spin_unlock_bh(&ar->data_lock); 2669 goto h_rate; 2670 } 2671 2672 if (rx_status->band != NL80211_BAND_6GHZ) 2673 rx_status->freq = ieee80211_channel_to_frequency(channel_num, 2674 rx_status->band); 2675 2676 h_rate: 2677 ath12k_dp_rx_h_rate(ar, rx_info); 2678 } 2679 2680 static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi, 2681 struct sk_buff *msdu, 2682 struct ath12k_dp_rx_info *rx_info) 2683 { 2684 struct ath12k_base *ab = ar->ab; 2685 struct ieee80211_rx_status *rx_status; 2686 struct ieee80211_sta *pubsta; 2687 struct ath12k_peer *peer; 2688 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 2689 struct ieee80211_rx_status *status = rx_info->rx_status; 2690 u8 decap = rx_info->decap_type; 2691 bool is_mcbc = rxcb->is_mcbc; 2692 bool is_eapol = rxcb->is_eapol; 2693 2694 spin_lock_bh(&ab->base_lock); 2695 peer = ath12k_dp_rx_h_find_peer(ab, msdu, rx_info); 2696 2697 pubsta = peer ? peer->sta : NULL; 2698 2699 if (pubsta && pubsta->valid_links) { 2700 status->link_valid = 1; 2701 status->link_id = peer->link_id; 2702 } 2703 2704 spin_unlock_bh(&ab->base_lock); 2705 2706 ath12k_dbg(ab, ATH12K_DBG_DATA, 2707 "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", 2708 msdu, 2709 msdu->len, 2710 peer ? peer->addr : NULL, 2711 rxcb->tid, 2712 is_mcbc ? "mcast" : "ucast", 2713 ath12k_dp_rx_h_seq_no(ab, rxcb->rx_desc), 2714 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", 2715 (status->encoding == RX_ENC_HT) ? "ht" : "", 2716 (status->encoding == RX_ENC_VHT) ? "vht" : "", 2717 (status->encoding == RX_ENC_HE) ? "he" : "", 2718 (status->encoding == RX_ENC_EHT) ? "eht" : "", 2719 (status->bw == RATE_INFO_BW_40) ? "40" : "", 2720 (status->bw == RATE_INFO_BW_80) ? "80" : "", 2721 (status->bw == RATE_INFO_BW_160) ? "160" : "", 2722 (status->bw == RATE_INFO_BW_320) ? "320" : "", 2723 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", 2724 status->rate_idx, 2725 status->nss, 2726 status->freq, 2727 status->band, status->flag, 2728 !!(status->flag & RX_FLAG_FAILED_FCS_CRC), 2729 !!(status->flag & RX_FLAG_MMIC_ERROR), 2730 !!(status->flag & RX_FLAG_AMSDU_MORE)); 2731 2732 ath12k_dbg_dump(ab, ATH12K_DBG_DP_RX, NULL, "dp rx msdu: ", 2733 msdu->data, msdu->len); 2734 2735 rx_status = IEEE80211_SKB_RXCB(msdu); 2736 *rx_status = *status; 2737 2738 /* TODO: trace rx packet */ 2739 2740 /* PN for multicast packets are not validate in HW, 2741 * so skip 802.3 rx path 2742 * Also, fast_rx expects the STA to be authorized, hence 2743 * eapol packets are sent in slow path. 2744 */ 2745 if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol && 2746 !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED)) 2747 rx_status->flag |= RX_FLAG_8023; 2748 2749 ieee80211_rx_napi(ath12k_ar_to_hw(ar), pubsta, msdu, napi); 2750 } 2751 2752 static bool ath12k_dp_rx_check_nwifi_hdr_len_valid(struct ath12k_base *ab, 2753 struct hal_rx_desc *rx_desc, 2754 struct sk_buff *msdu) 2755 { 2756 struct ieee80211_hdr *hdr; 2757 u8 decap_type; 2758 u32 hdr_len; 2759 2760 decap_type = ath12k_dp_rx_h_decap_type(ab, rx_desc); 2761 if (decap_type != DP_RX_DECAP_TYPE_NATIVE_WIFI) 2762 return true; 2763 2764 hdr = (struct ieee80211_hdr *)msdu->data; 2765 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2766 2767 if ((likely(hdr_len <= DP_MAX_NWIFI_HDR_LEN))) 2768 return true; 2769 2770 ab->device_stats.invalid_rbm++; 2771 WARN_ON_ONCE(1); 2772 return false; 2773 } 2774 2775 static int ath12k_dp_rx_process_msdu(struct ath12k *ar, 2776 struct sk_buff *msdu, 2777 struct sk_buff_head *msdu_list, 2778 struct ath12k_dp_rx_info *rx_info) 2779 { 2780 struct ath12k_base *ab = ar->ab; 2781 struct hal_rx_desc *rx_desc, *lrx_desc; 2782 struct ath12k_skb_rxcb *rxcb; 2783 struct sk_buff *last_buf; 2784 u8 l3_pad_bytes; 2785 u16 msdu_len; 2786 int ret; 2787 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 2788 2789 last_buf = ath12k_dp_rx_get_msdu_last_buf(msdu_list, msdu); 2790 if (!last_buf) { 2791 ath12k_warn(ab, 2792 "No valid Rx buffer to access MSDU_END tlv\n"); 2793 ret = -EIO; 2794 goto free_out; 2795 } 2796 2797 rx_desc = (struct hal_rx_desc *)msdu->data; 2798 lrx_desc = (struct hal_rx_desc *)last_buf->data; 2799 if (!ath12k_dp_rx_h_msdu_done(ab, lrx_desc)) { 2800 ath12k_warn(ab, "msdu_done bit in msdu_end is not set\n"); 2801 ret = -EIO; 2802 goto free_out; 2803 } 2804 2805 rxcb = ATH12K_SKB_RXCB(msdu); 2806 rxcb->rx_desc = rx_desc; 2807 msdu_len = ath12k_dp_rx_h_msdu_len(ab, lrx_desc); 2808 l3_pad_bytes = ath12k_dp_rx_h_l3pad(ab, lrx_desc); 2809 2810 if (rxcb->is_frag) { 2811 skb_pull(msdu, hal_rx_desc_sz); 2812 } else if (!rxcb->is_continuation) { 2813 if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) { 2814 ret = -EINVAL; 2815 ath12k_warn(ab, "invalid msdu len %u\n", msdu_len); 2816 ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", rx_desc, 2817 sizeof(*rx_desc)); 2818 goto free_out; 2819 } 2820 skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len); 2821 skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes); 2822 } else { 2823 ret = ath12k_dp_rx_msdu_coalesce(ar, msdu_list, 2824 msdu, last_buf, 2825 l3_pad_bytes, msdu_len); 2826 if (ret) { 2827 ath12k_warn(ab, 2828 "failed to coalesce msdu rx buffer%d\n", ret); 2829 goto free_out; 2830 } 2831 } 2832 2833 if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, rx_desc, msdu))) { 2834 ret = -EINVAL; 2835 goto free_out; 2836 } 2837 2838 ath12k_dp_rx_h_fetch_info(ab, rx_desc, rx_info); 2839 ath12k_dp_rx_h_ppdu(ar, rx_info); 2840 ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_info); 2841 2842 rx_info->rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED; 2843 2844 return 0; 2845 2846 free_out: 2847 return ret; 2848 } 2849 2850 static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab, 2851 struct napi_struct *napi, 2852 struct sk_buff_head *msdu_list, 2853 int ring_id) 2854 { 2855 struct ath12k_hw_group *ag = ab->ag; 2856 struct ieee80211_rx_status rx_status = {}; 2857 struct ath12k_skb_rxcb *rxcb; 2858 struct sk_buff *msdu; 2859 struct ath12k *ar; 2860 struct ath12k_hw_link *hw_links = ag->hw_links; 2861 struct ath12k_base *partner_ab; 2862 struct ath12k_dp_rx_info rx_info; 2863 u8 hw_link_id, pdev_id; 2864 int ret; 2865 2866 if (skb_queue_empty(msdu_list)) 2867 return; 2868 2869 rx_info.addr2_present = false; 2870 rx_info.rx_status = &rx_status; 2871 2872 rcu_read_lock(); 2873 2874 while ((msdu = __skb_dequeue(msdu_list))) { 2875 rxcb = ATH12K_SKB_RXCB(msdu); 2876 hw_link_id = rxcb->hw_link_id; 2877 partner_ab = ath12k_ag_to_ab(ag, 2878 hw_links[hw_link_id].device_id); 2879 pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params, 2880 hw_links[hw_link_id].pdev_idx); 2881 ar = partner_ab->pdevs[pdev_id].ar; 2882 if (!rcu_dereference(partner_ab->pdevs_active[pdev_id])) { 2883 dev_kfree_skb_any(msdu); 2884 continue; 2885 } 2886 2887 if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) { 2888 dev_kfree_skb_any(msdu); 2889 continue; 2890 } 2891 2892 ret = ath12k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_info); 2893 if (ret) { 2894 ath12k_dbg(ab, ATH12K_DBG_DATA, 2895 "Unable to process msdu %d", ret); 2896 dev_kfree_skb_any(msdu); 2897 continue; 2898 } 2899 2900 ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_info); 2901 } 2902 2903 rcu_read_unlock(); 2904 } 2905 2906 static u16 ath12k_dp_rx_get_peer_id(struct ath12k_base *ab, 2907 enum ath12k_peer_metadata_version ver, 2908 __le32 peer_metadata) 2909 { 2910 switch (ver) { 2911 default: 2912 ath12k_warn(ab, "Unknown peer metadata version: %d", ver); 2913 fallthrough; 2914 case ATH12K_PEER_METADATA_V0: 2915 return le32_get_bits(peer_metadata, 2916 RX_MPDU_DESC_META_DATA_V0_PEER_ID); 2917 case ATH12K_PEER_METADATA_V1: 2918 return le32_get_bits(peer_metadata, 2919 RX_MPDU_DESC_META_DATA_V1_PEER_ID); 2920 case ATH12K_PEER_METADATA_V1A: 2921 return le32_get_bits(peer_metadata, 2922 RX_MPDU_DESC_META_DATA_V1A_PEER_ID); 2923 case ATH12K_PEER_METADATA_V1B: 2924 return le32_get_bits(peer_metadata, 2925 RX_MPDU_DESC_META_DATA_V1B_PEER_ID); 2926 } 2927 } 2928 2929 int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id, 2930 struct napi_struct *napi, int budget) 2931 { 2932 struct ath12k_hw_group *ag = ab->ag; 2933 struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES]; 2934 struct ath12k_hw_link *hw_links = ag->hw_links; 2935 int num_buffs_reaped[ATH12K_MAX_DEVICES] = {}; 2936 struct ath12k_rx_desc_info *desc_info; 2937 struct ath12k_dp *dp = &ab->dp; 2938 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 2939 struct hal_reo_dest_ring *desc; 2940 struct ath12k_base *partner_ab; 2941 struct sk_buff_head msdu_list; 2942 struct ath12k_skb_rxcb *rxcb; 2943 int total_msdu_reaped = 0; 2944 u8 hw_link_id, device_id; 2945 struct hal_srng *srng; 2946 struct sk_buff *msdu; 2947 bool done = false; 2948 u64 desc_va; 2949 2950 __skb_queue_head_init(&msdu_list); 2951 2952 for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) 2953 INIT_LIST_HEAD(&rx_desc_used_list[device_id]); 2954 2955 srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id]; 2956 2957 spin_lock_bh(&srng->lock); 2958 2959 try_again: 2960 ath12k_hal_srng_access_begin(ab, srng); 2961 2962 while ((desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) { 2963 struct rx_mpdu_desc *mpdu_info; 2964 struct rx_msdu_desc *msdu_info; 2965 enum hal_reo_dest_ring_push_reason push_reason; 2966 u32 cookie; 2967 2968 cookie = le32_get_bits(desc->buf_addr_info.info1, 2969 BUFFER_ADDR_INFO1_SW_COOKIE); 2970 2971 hw_link_id = le32_get_bits(desc->info0, 2972 HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); 2973 2974 desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 | 2975 le32_to_cpu(desc->buf_va_lo)); 2976 desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va); 2977 2978 device_id = hw_links[hw_link_id].device_id; 2979 partner_ab = ath12k_ag_to_ab(ag, device_id); 2980 if (unlikely(!partner_ab)) { 2981 if (desc_info->skb) { 2982 dev_kfree_skb_any(desc_info->skb); 2983 desc_info->skb = NULL; 2984 } 2985 2986 continue; 2987 } 2988 2989 /* retry manual desc retrieval */ 2990 if (!desc_info) { 2991 desc_info = ath12k_dp_get_rx_desc(partner_ab, cookie); 2992 if (!desc_info) { 2993 ath12k_warn(partner_ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n", 2994 cookie); 2995 continue; 2996 } 2997 } 2998 2999 if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC) 3000 ath12k_warn(ab, "Check HW CC implementation"); 3001 3002 msdu = desc_info->skb; 3003 desc_info->skb = NULL; 3004 3005 list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]); 3006 3007 rxcb = ATH12K_SKB_RXCB(msdu); 3008 dma_unmap_single(partner_ab->dev, rxcb->paddr, 3009 msdu->len + skb_tailroom(msdu), 3010 DMA_FROM_DEVICE); 3011 3012 num_buffs_reaped[device_id]++; 3013 ab->device_stats.reo_rx[ring_id][ab->device_id]++; 3014 3015 push_reason = le32_get_bits(desc->info0, 3016 HAL_REO_DEST_RING_INFO0_PUSH_REASON); 3017 if (push_reason != 3018 HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) { 3019 dev_kfree_skb_any(msdu); 3020 ab->device_stats.hal_reo_error[ring_id]++; 3021 continue; 3022 } 3023 3024 msdu_info = &desc->rx_msdu_info; 3025 mpdu_info = &desc->rx_mpdu_info; 3026 3027 rxcb->is_first_msdu = !!(le32_to_cpu(msdu_info->info0) & 3028 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU); 3029 rxcb->is_last_msdu = !!(le32_to_cpu(msdu_info->info0) & 3030 RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU); 3031 rxcb->is_continuation = !!(le32_to_cpu(msdu_info->info0) & 3032 RX_MSDU_DESC_INFO0_MSDU_CONTINUATION); 3033 rxcb->hw_link_id = hw_link_id; 3034 rxcb->peer_id = ath12k_dp_rx_get_peer_id(ab, dp->peer_metadata_ver, 3035 mpdu_info->peer_meta_data); 3036 rxcb->tid = le32_get_bits(mpdu_info->info0, 3037 RX_MPDU_DESC_INFO0_TID); 3038 3039 __skb_queue_tail(&msdu_list, msdu); 3040 3041 if (!rxcb->is_continuation) { 3042 total_msdu_reaped++; 3043 done = true; 3044 } else { 3045 done = false; 3046 } 3047 3048 if (total_msdu_reaped >= budget) 3049 break; 3050 } 3051 3052 /* Hw might have updated the head pointer after we cached it. 3053 * In this case, even though there are entries in the ring we'll 3054 * get rx_desc NULL. Give the read another try with updated cached 3055 * head pointer so that we can reap complete MPDU in the current 3056 * rx processing. 3057 */ 3058 if (!done && ath12k_hal_srng_dst_num_free(ab, srng, true)) { 3059 ath12k_hal_srng_access_end(ab, srng); 3060 goto try_again; 3061 } 3062 3063 ath12k_hal_srng_access_end(ab, srng); 3064 3065 spin_unlock_bh(&srng->lock); 3066 3067 if (!total_msdu_reaped) 3068 goto exit; 3069 3070 for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) { 3071 if (!num_buffs_reaped[device_id]) 3072 continue; 3073 3074 partner_ab = ath12k_ag_to_ab(ag, device_id); 3075 rx_ring = &partner_ab->dp.rx_refill_buf_ring; 3076 3077 ath12k_dp_rx_bufs_replenish(partner_ab, rx_ring, 3078 &rx_desc_used_list[device_id], 3079 num_buffs_reaped[device_id]); 3080 } 3081 3082 ath12k_dp_rx_process_received_packets(ab, napi, &msdu_list, 3083 ring_id); 3084 3085 exit: 3086 return total_msdu_reaped; 3087 } 3088 3089 static void ath12k_dp_rx_frag_timer(struct timer_list *timer) 3090 { 3091 struct ath12k_dp_rx_tid *rx_tid = timer_container_of(rx_tid, timer, 3092 frag_timer); 3093 3094 spin_lock_bh(&rx_tid->ab->base_lock); 3095 if (rx_tid->last_frag_no && 3096 rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) { 3097 spin_unlock_bh(&rx_tid->ab->base_lock); 3098 return; 3099 } 3100 ath12k_dp_rx_frags_cleanup(rx_tid, true); 3101 spin_unlock_bh(&rx_tid->ab->base_lock); 3102 } 3103 3104 int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id) 3105 { 3106 struct ath12k_base *ab = ar->ab; 3107 struct crypto_shash *tfm; 3108 struct ath12k_peer *peer; 3109 struct ath12k_dp_rx_tid *rx_tid; 3110 int i; 3111 3112 tfm = crypto_alloc_shash("michael_mic", 0, 0); 3113 if (IS_ERR(tfm)) 3114 return PTR_ERR(tfm); 3115 3116 spin_lock_bh(&ab->base_lock); 3117 3118 peer = ath12k_peer_find(ab, vdev_id, peer_mac); 3119 if (!peer) { 3120 spin_unlock_bh(&ab->base_lock); 3121 crypto_free_shash(tfm); 3122 ath12k_warn(ab, "failed to find the peer to set up fragment info\n"); 3123 return -ENOENT; 3124 } 3125 3126 if (!peer->primary_link) { 3127 spin_unlock_bh(&ab->base_lock); 3128 crypto_free_shash(tfm); 3129 return 0; 3130 } 3131 3132 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 3133 rx_tid = &peer->rx_tid[i]; 3134 rx_tid->ab = ab; 3135 timer_setup(&rx_tid->frag_timer, ath12k_dp_rx_frag_timer, 0); 3136 skb_queue_head_init(&rx_tid->rx_frags); 3137 } 3138 3139 peer->tfm_mmic = tfm; 3140 peer->dp_setup_done = true; 3141 spin_unlock_bh(&ab->base_lock); 3142 3143 return 0; 3144 } 3145 3146 static int ath12k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key, 3147 struct ieee80211_hdr *hdr, u8 *data, 3148 size_t data_len, u8 *mic) 3149 { 3150 SHASH_DESC_ON_STACK(desc, tfm); 3151 u8 mic_hdr[16] = {}; 3152 u8 tid = 0; 3153 int ret; 3154 3155 if (!tfm) 3156 return -EINVAL; 3157 3158 desc->tfm = tfm; 3159 3160 ret = crypto_shash_setkey(tfm, key, 8); 3161 if (ret) 3162 goto out; 3163 3164 ret = crypto_shash_init(desc); 3165 if (ret) 3166 goto out; 3167 3168 /* TKIP MIC header */ 3169 memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN); 3170 memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN); 3171 if (ieee80211_is_data_qos(hdr->frame_control)) 3172 tid = ieee80211_get_tid(hdr); 3173 mic_hdr[12] = tid; 3174 3175 ret = crypto_shash_update(desc, mic_hdr, 16); 3176 if (ret) 3177 goto out; 3178 ret = crypto_shash_update(desc, data, data_len); 3179 if (ret) 3180 goto out; 3181 ret = crypto_shash_final(desc, mic); 3182 out: 3183 shash_desc_zero(desc); 3184 return ret; 3185 } 3186 3187 static int ath12k_dp_rx_h_verify_tkip_mic(struct ath12k *ar, struct ath12k_peer *peer, 3188 struct sk_buff *msdu) 3189 { 3190 struct ath12k_base *ab = ar->ab; 3191 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data; 3192 struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu); 3193 struct ieee80211_key_conf *key_conf; 3194 struct ieee80211_hdr *hdr; 3195 struct ath12k_dp_rx_info rx_info; 3196 u8 mic[IEEE80211_CCMP_MIC_LEN]; 3197 int head_len, tail_len, ret; 3198 size_t data_len; 3199 u32 hdr_len, hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 3200 u8 *key, *data; 3201 u8 key_idx; 3202 3203 if (ath12k_dp_rx_h_enctype(ab, rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC) 3204 return 0; 3205 3206 rx_info.addr2_present = false; 3207 rx_info.rx_status = rxs; 3208 3209 hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz); 3210 hdr_len = ieee80211_hdrlen(hdr->frame_control); 3211 head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN; 3212 tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN; 3213 3214 if (!is_multicast_ether_addr(hdr->addr1)) 3215 key_idx = peer->ucast_keyidx; 3216 else 3217 key_idx = peer->mcast_keyidx; 3218 3219 key_conf = peer->keys[key_idx]; 3220 3221 data = msdu->data + head_len; 3222 data_len = msdu->len - head_len - tail_len; 3223 key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; 3224 3225 ret = ath12k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic); 3226 if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN)) 3227 goto mic_fail; 3228 3229 return 0; 3230 3231 mic_fail: 3232 (ATH12K_SKB_RXCB(msdu))->is_first_msdu = true; 3233 (ATH12K_SKB_RXCB(msdu))->is_last_msdu = true; 3234 3235 ath12k_dp_rx_h_fetch_info(ab, rx_desc, &rx_info); 3236 3237 rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED | 3238 RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED; 3239 skb_pull(msdu, hal_rx_desc_sz); 3240 3241 if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, rx_desc, msdu))) 3242 return -EINVAL; 3243 3244 ath12k_dp_rx_h_ppdu(ar, &rx_info); 3245 ath12k_dp_rx_h_undecap(ar, msdu, rx_desc, 3246 HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true); 3247 ieee80211_rx(ath12k_ar_to_hw(ar), msdu); 3248 return -EINVAL; 3249 } 3250 3251 static void ath12k_dp_rx_h_undecap_frag(struct ath12k *ar, struct sk_buff *msdu, 3252 enum hal_encrypt_type enctype, u32 flags) 3253 { 3254 struct ieee80211_hdr *hdr; 3255 size_t hdr_len; 3256 size_t crypto_len; 3257 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 3258 3259 if (!flags) 3260 return; 3261 3262 hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz); 3263 3264 if (flags & RX_FLAG_MIC_STRIPPED) 3265 skb_trim(msdu, msdu->len - 3266 ath12k_dp_rx_crypto_mic_len(ar, enctype)); 3267 3268 if (flags & RX_FLAG_ICV_STRIPPED) 3269 skb_trim(msdu, msdu->len - 3270 ath12k_dp_rx_crypto_icv_len(ar, enctype)); 3271 3272 if (flags & RX_FLAG_IV_STRIPPED) { 3273 hdr_len = ieee80211_hdrlen(hdr->frame_control); 3274 crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype); 3275 3276 memmove(msdu->data + hal_rx_desc_sz + crypto_len, 3277 msdu->data + hal_rx_desc_sz, hdr_len); 3278 skb_pull(msdu, crypto_len); 3279 } 3280 } 3281 3282 static int ath12k_dp_rx_h_defrag(struct ath12k *ar, 3283 struct ath12k_peer *peer, 3284 struct ath12k_dp_rx_tid *rx_tid, 3285 struct sk_buff **defrag_skb) 3286 { 3287 struct ath12k_base *ab = ar->ab; 3288 struct hal_rx_desc *rx_desc; 3289 struct sk_buff *skb, *first_frag, *last_frag; 3290 struct ieee80211_hdr *hdr; 3291 enum hal_encrypt_type enctype; 3292 bool is_decrypted = false; 3293 int msdu_len = 0; 3294 int extra_space; 3295 u32 flags, hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 3296 3297 first_frag = skb_peek(&rx_tid->rx_frags); 3298 last_frag = skb_peek_tail(&rx_tid->rx_frags); 3299 3300 skb_queue_walk(&rx_tid->rx_frags, skb) { 3301 flags = 0; 3302 rx_desc = (struct hal_rx_desc *)skb->data; 3303 hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz); 3304 3305 enctype = ath12k_dp_rx_h_enctype(ab, rx_desc); 3306 if (enctype != HAL_ENCRYPT_TYPE_OPEN) 3307 is_decrypted = ath12k_dp_rx_h_is_decrypted(ab, 3308 rx_desc); 3309 3310 if (is_decrypted) { 3311 if (skb != first_frag) 3312 flags |= RX_FLAG_IV_STRIPPED; 3313 if (skb != last_frag) 3314 flags |= RX_FLAG_ICV_STRIPPED | 3315 RX_FLAG_MIC_STRIPPED; 3316 } 3317 3318 /* RX fragments are always raw packets */ 3319 if (skb != last_frag) 3320 skb_trim(skb, skb->len - FCS_LEN); 3321 ath12k_dp_rx_h_undecap_frag(ar, skb, enctype, flags); 3322 3323 if (skb != first_frag) 3324 skb_pull(skb, hal_rx_desc_sz + 3325 ieee80211_hdrlen(hdr->frame_control)); 3326 msdu_len += skb->len; 3327 } 3328 3329 extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag)); 3330 if (extra_space > 0 && 3331 (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0)) 3332 return -ENOMEM; 3333 3334 __skb_unlink(first_frag, &rx_tid->rx_frags); 3335 while ((skb = __skb_dequeue(&rx_tid->rx_frags))) { 3336 skb_put_data(first_frag, skb->data, skb->len); 3337 dev_kfree_skb_any(skb); 3338 } 3339 3340 hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz); 3341 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); 3342 ATH12K_SKB_RXCB(first_frag)->is_frag = 1; 3343 3344 if (ath12k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag)) 3345 first_frag = NULL; 3346 3347 *defrag_skb = first_frag; 3348 return 0; 3349 } 3350 3351 static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar, 3352 struct ath12k_dp_rx_tid *rx_tid, 3353 struct sk_buff *defrag_skb) 3354 { 3355 struct ath12k_base *ab = ar->ab; 3356 struct ath12k_dp *dp = &ab->dp; 3357 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data; 3358 struct hal_reo_entrance_ring *reo_ent_ring; 3359 struct hal_reo_dest_ring *reo_dest_ring; 3360 struct dp_link_desc_bank *link_desc_banks; 3361 struct hal_rx_msdu_link *msdu_link; 3362 struct hal_rx_msdu_details *msdu0; 3363 struct hal_srng *srng; 3364 dma_addr_t link_paddr, buf_paddr; 3365 u32 desc_bank, msdu_info, msdu_ext_info, mpdu_info; 3366 u32 cookie, hal_rx_desc_sz, dest_ring_info0, queue_addr_hi; 3367 int ret; 3368 struct ath12k_rx_desc_info *desc_info; 3369 enum hal_rx_buf_return_buf_manager idle_link_rbm = dp->idle_link_rbm; 3370 u8 dst_ind; 3371 3372 hal_rx_desc_sz = ab->hal.hal_desc_sz; 3373 link_desc_banks = dp->link_desc_banks; 3374 reo_dest_ring = rx_tid->dst_ring_desc; 3375 3376 ath12k_hal_rx_reo_ent_paddr_get(ab, &reo_dest_ring->buf_addr_info, 3377 &link_paddr, &cookie); 3378 desc_bank = u32_get_bits(cookie, DP_LINK_DESC_BANK_MASK); 3379 3380 msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr + 3381 (link_paddr - link_desc_banks[desc_bank].paddr)); 3382 msdu0 = &msdu_link->msdu_link[0]; 3383 msdu_ext_info = le32_to_cpu(msdu0->rx_msdu_ext_info.info0); 3384 dst_ind = u32_get_bits(msdu_ext_info, RX_MSDU_EXT_DESC_INFO0_REO_DEST_IND); 3385 3386 memset(msdu0, 0, sizeof(*msdu0)); 3387 3388 msdu_info = u32_encode_bits(1, RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU) | 3389 u32_encode_bits(1, RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU) | 3390 u32_encode_bits(0, RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) | 3391 u32_encode_bits(defrag_skb->len - hal_rx_desc_sz, 3392 RX_MSDU_DESC_INFO0_MSDU_LENGTH) | 3393 u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_SA) | 3394 u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_DA); 3395 msdu0->rx_msdu_info.info0 = cpu_to_le32(msdu_info); 3396 msdu0->rx_msdu_ext_info.info0 = cpu_to_le32(msdu_ext_info); 3397 3398 /* change msdu len in hal rx desc */ 3399 ath12k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz); 3400 3401 buf_paddr = dma_map_single(ab->dev, defrag_skb->data, 3402 defrag_skb->len + skb_tailroom(defrag_skb), 3403 DMA_TO_DEVICE); 3404 if (dma_mapping_error(ab->dev, buf_paddr)) 3405 return -ENOMEM; 3406 3407 spin_lock_bh(&dp->rx_desc_lock); 3408 desc_info = list_first_entry_or_null(&dp->rx_desc_free_list, 3409 struct ath12k_rx_desc_info, 3410 list); 3411 if (!desc_info) { 3412 spin_unlock_bh(&dp->rx_desc_lock); 3413 ath12k_warn(ab, "failed to find rx desc for reinject\n"); 3414 ret = -ENOMEM; 3415 goto err_unmap_dma; 3416 } 3417 3418 desc_info->skb = defrag_skb; 3419 desc_info->in_use = true; 3420 3421 list_del(&desc_info->list); 3422 spin_unlock_bh(&dp->rx_desc_lock); 3423 3424 ATH12K_SKB_RXCB(defrag_skb)->paddr = buf_paddr; 3425 3426 ath12k_hal_rx_buf_addr_info_set(&msdu0->buf_addr_info, buf_paddr, 3427 desc_info->cookie, 3428 HAL_RX_BUF_RBM_SW3_BM); 3429 3430 /* Fill mpdu details into reo entrance ring */ 3431 srng = &ab->hal.srng_list[dp->reo_reinject_ring.ring_id]; 3432 3433 spin_lock_bh(&srng->lock); 3434 ath12k_hal_srng_access_begin(ab, srng); 3435 3436 reo_ent_ring = ath12k_hal_srng_src_get_next_entry(ab, srng); 3437 if (!reo_ent_ring) { 3438 ath12k_hal_srng_access_end(ab, srng); 3439 spin_unlock_bh(&srng->lock); 3440 ret = -ENOSPC; 3441 goto err_free_desc; 3442 } 3443 memset(reo_ent_ring, 0, sizeof(*reo_ent_ring)); 3444 3445 ath12k_hal_rx_buf_addr_info_set(&reo_ent_ring->buf_addr_info, link_paddr, 3446 cookie, 3447 idle_link_rbm); 3448 3449 mpdu_info = u32_encode_bits(1, RX_MPDU_DESC_INFO0_MSDU_COUNT) | 3450 u32_encode_bits(0, RX_MPDU_DESC_INFO0_FRAG_FLAG) | 3451 u32_encode_bits(1, RX_MPDU_DESC_INFO0_RAW_MPDU) | 3452 u32_encode_bits(1, RX_MPDU_DESC_INFO0_VALID_PN) | 3453 u32_encode_bits(rx_tid->tid, RX_MPDU_DESC_INFO0_TID); 3454 3455 reo_ent_ring->rx_mpdu_info.info0 = cpu_to_le32(mpdu_info); 3456 reo_ent_ring->rx_mpdu_info.peer_meta_data = 3457 reo_dest_ring->rx_mpdu_info.peer_meta_data; 3458 3459 if (ab->hw_params->reoq_lut_support) { 3460 reo_ent_ring->queue_addr_lo = reo_dest_ring->rx_mpdu_info.peer_meta_data; 3461 queue_addr_hi = 0; 3462 } else { 3463 reo_ent_ring->queue_addr_lo = 3464 cpu_to_le32(lower_32_bits(rx_tid->qbuf.paddr_aligned)); 3465 queue_addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); 3466 } 3467 3468 reo_ent_ring->info0 = le32_encode_bits(queue_addr_hi, 3469 HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI) | 3470 le32_encode_bits(dst_ind, 3471 HAL_REO_ENTR_RING_INFO0_DEST_IND); 3472 3473 reo_ent_ring->info1 = le32_encode_bits(rx_tid->cur_sn, 3474 HAL_REO_ENTR_RING_INFO1_MPDU_SEQ_NUM); 3475 dest_ring_info0 = le32_get_bits(reo_dest_ring->info0, 3476 HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); 3477 reo_ent_ring->info2 = 3478 cpu_to_le32(u32_get_bits(dest_ring_info0, 3479 HAL_REO_ENTR_RING_INFO2_SRC_LINK_ID)); 3480 3481 ath12k_hal_srng_access_end(ab, srng); 3482 spin_unlock_bh(&srng->lock); 3483 3484 return 0; 3485 3486 err_free_desc: 3487 spin_lock_bh(&dp->rx_desc_lock); 3488 desc_info->in_use = false; 3489 desc_info->skb = NULL; 3490 list_add_tail(&desc_info->list, &dp->rx_desc_free_list); 3491 spin_unlock_bh(&dp->rx_desc_lock); 3492 err_unmap_dma: 3493 dma_unmap_single(ab->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb), 3494 DMA_TO_DEVICE); 3495 return ret; 3496 } 3497 3498 static int ath12k_dp_rx_h_cmp_frags(struct ath12k_base *ab, 3499 struct sk_buff *a, struct sk_buff *b) 3500 { 3501 int frag1, frag2; 3502 3503 frag1 = ath12k_dp_rx_h_frag_no(ab, a); 3504 frag2 = ath12k_dp_rx_h_frag_no(ab, b); 3505 3506 return frag1 - frag2; 3507 } 3508 3509 static void ath12k_dp_rx_h_sort_frags(struct ath12k_base *ab, 3510 struct sk_buff_head *frag_list, 3511 struct sk_buff *cur_frag) 3512 { 3513 struct sk_buff *skb; 3514 int cmp; 3515 3516 skb_queue_walk(frag_list, skb) { 3517 cmp = ath12k_dp_rx_h_cmp_frags(ab, skb, cur_frag); 3518 if (cmp < 0) 3519 continue; 3520 __skb_queue_before(frag_list, skb, cur_frag); 3521 return; 3522 } 3523 __skb_queue_tail(frag_list, cur_frag); 3524 } 3525 3526 static u64 ath12k_dp_rx_h_get_pn(struct ath12k *ar, struct sk_buff *skb) 3527 { 3528 struct ieee80211_hdr *hdr; 3529 u64 pn = 0; 3530 u8 *ehdr; 3531 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 3532 3533 hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz); 3534 ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control); 3535 3536 pn = ehdr[0]; 3537 pn |= (u64)ehdr[1] << 8; 3538 pn |= (u64)ehdr[4] << 16; 3539 pn |= (u64)ehdr[5] << 24; 3540 pn |= (u64)ehdr[6] << 32; 3541 pn |= (u64)ehdr[7] << 40; 3542 3543 return pn; 3544 } 3545 3546 static bool 3547 ath12k_dp_rx_h_defrag_validate_incr_pn(struct ath12k *ar, struct ath12k_dp_rx_tid *rx_tid) 3548 { 3549 struct ath12k_base *ab = ar->ab; 3550 enum hal_encrypt_type encrypt_type; 3551 struct sk_buff *first_frag, *skb; 3552 struct hal_rx_desc *desc; 3553 u64 last_pn; 3554 u64 cur_pn; 3555 3556 first_frag = skb_peek(&rx_tid->rx_frags); 3557 desc = (struct hal_rx_desc *)first_frag->data; 3558 3559 encrypt_type = ath12k_dp_rx_h_enctype(ab, desc); 3560 if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 && 3561 encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 && 3562 encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 && 3563 encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256) 3564 return true; 3565 3566 last_pn = ath12k_dp_rx_h_get_pn(ar, first_frag); 3567 skb_queue_walk(&rx_tid->rx_frags, skb) { 3568 if (skb == first_frag) 3569 continue; 3570 3571 cur_pn = ath12k_dp_rx_h_get_pn(ar, skb); 3572 if (cur_pn != last_pn + 1) 3573 return false; 3574 last_pn = cur_pn; 3575 } 3576 return true; 3577 } 3578 3579 static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar, 3580 struct sk_buff *msdu, 3581 struct hal_reo_dest_ring *ring_desc) 3582 { 3583 struct ath12k_base *ab = ar->ab; 3584 struct hal_rx_desc *rx_desc; 3585 struct ath12k_peer *peer; 3586 struct ath12k_dp_rx_tid *rx_tid; 3587 struct sk_buff *defrag_skb = NULL; 3588 u32 peer_id; 3589 u16 seqno, frag_no; 3590 u8 tid; 3591 int ret = 0; 3592 bool more_frags; 3593 3594 rx_desc = (struct hal_rx_desc *)msdu->data; 3595 peer_id = ath12k_dp_rx_h_peer_id(ab, rx_desc); 3596 tid = ath12k_dp_rx_h_tid(ab, rx_desc); 3597 seqno = ath12k_dp_rx_h_seq_no(ab, rx_desc); 3598 frag_no = ath12k_dp_rx_h_frag_no(ab, msdu); 3599 more_frags = ath12k_dp_rx_h_more_frags(ab, msdu); 3600 3601 if (!ath12k_dp_rx_h_seq_ctrl_valid(ab, rx_desc) || 3602 !ath12k_dp_rx_h_fc_valid(ab, rx_desc) || 3603 tid > IEEE80211_NUM_TIDS) 3604 return -EINVAL; 3605 3606 /* received unfragmented packet in reo 3607 * exception ring, this shouldn't happen 3608 * as these packets typically come from 3609 * reo2sw srngs. 3610 */ 3611 if (WARN_ON_ONCE(!frag_no && !more_frags)) 3612 return -EINVAL; 3613 3614 spin_lock_bh(&ab->base_lock); 3615 peer = ath12k_peer_find_by_id(ab, peer_id); 3616 if (!peer) { 3617 ath12k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n", 3618 peer_id); 3619 ret = -ENOENT; 3620 goto out_unlock; 3621 } 3622 3623 if (!peer->dp_setup_done) { 3624 ath12k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n", 3625 peer->addr, peer_id); 3626 ret = -ENOENT; 3627 goto out_unlock; 3628 } 3629 3630 rx_tid = &peer->rx_tid[tid]; 3631 3632 if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) || 3633 skb_queue_empty(&rx_tid->rx_frags)) { 3634 /* Flush stored fragments and start a new sequence */ 3635 ath12k_dp_rx_frags_cleanup(rx_tid, true); 3636 rx_tid->cur_sn = seqno; 3637 } 3638 3639 if (rx_tid->rx_frag_bitmap & BIT(frag_no)) { 3640 /* Fragment already present */ 3641 ret = -EINVAL; 3642 goto out_unlock; 3643 } 3644 3645 if ((!rx_tid->rx_frag_bitmap || frag_no > __fls(rx_tid->rx_frag_bitmap))) 3646 __skb_queue_tail(&rx_tid->rx_frags, msdu); 3647 else 3648 ath12k_dp_rx_h_sort_frags(ab, &rx_tid->rx_frags, msdu); 3649 3650 rx_tid->rx_frag_bitmap |= BIT(frag_no); 3651 if (!more_frags) 3652 rx_tid->last_frag_no = frag_no; 3653 3654 if (frag_no == 0) { 3655 rx_tid->dst_ring_desc = kmemdup(ring_desc, 3656 sizeof(*rx_tid->dst_ring_desc), 3657 GFP_ATOMIC); 3658 if (!rx_tid->dst_ring_desc) { 3659 ret = -ENOMEM; 3660 goto out_unlock; 3661 } 3662 } else { 3663 ath12k_dp_rx_link_desc_return(ab, &ring_desc->buf_addr_info, 3664 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3665 } 3666 3667 if (!rx_tid->last_frag_no || 3668 rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) { 3669 mod_timer(&rx_tid->frag_timer, jiffies + 3670 ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS); 3671 goto out_unlock; 3672 } 3673 3674 spin_unlock_bh(&ab->base_lock); 3675 timer_delete_sync(&rx_tid->frag_timer); 3676 spin_lock_bh(&ab->base_lock); 3677 3678 peer = ath12k_peer_find_by_id(ab, peer_id); 3679 if (!peer) 3680 goto err_frags_cleanup; 3681 3682 if (!ath12k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid)) 3683 goto err_frags_cleanup; 3684 3685 if (ath12k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb)) 3686 goto err_frags_cleanup; 3687 3688 if (!defrag_skb) 3689 goto err_frags_cleanup; 3690 3691 if (ath12k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb)) 3692 goto err_frags_cleanup; 3693 3694 ath12k_dp_rx_frags_cleanup(rx_tid, false); 3695 goto out_unlock; 3696 3697 err_frags_cleanup: 3698 dev_kfree_skb_any(defrag_skb); 3699 ath12k_dp_rx_frags_cleanup(rx_tid, true); 3700 out_unlock: 3701 spin_unlock_bh(&ab->base_lock); 3702 return ret; 3703 } 3704 3705 static int 3706 ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc, 3707 struct list_head *used_list, 3708 bool drop, u32 cookie) 3709 { 3710 struct ath12k_base *ab = ar->ab; 3711 struct sk_buff *msdu; 3712 struct ath12k_skb_rxcb *rxcb; 3713 struct hal_rx_desc *rx_desc; 3714 u16 msdu_len; 3715 u32 hal_rx_desc_sz = ab->hal.hal_desc_sz; 3716 struct ath12k_rx_desc_info *desc_info; 3717 u64 desc_va; 3718 3719 desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 | 3720 le32_to_cpu(desc->buf_va_lo)); 3721 desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va); 3722 3723 /* retry manual desc retrieval */ 3724 if (!desc_info) { 3725 desc_info = ath12k_dp_get_rx_desc(ab, cookie); 3726 if (!desc_info) { 3727 ath12k_warn(ab, "Invalid cookie in DP rx error descriptor retrieval: 0x%x\n", 3728 cookie); 3729 return -EINVAL; 3730 } 3731 } 3732 3733 if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC) 3734 ath12k_warn(ab, " RX Exception, Check HW CC implementation"); 3735 3736 msdu = desc_info->skb; 3737 desc_info->skb = NULL; 3738 3739 list_add_tail(&desc_info->list, used_list); 3740 3741 rxcb = ATH12K_SKB_RXCB(msdu); 3742 dma_unmap_single(ar->ab->dev, rxcb->paddr, 3743 msdu->len + skb_tailroom(msdu), 3744 DMA_FROM_DEVICE); 3745 3746 if (drop) { 3747 dev_kfree_skb_any(msdu); 3748 return 0; 3749 } 3750 3751 rcu_read_lock(); 3752 if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) { 3753 dev_kfree_skb_any(msdu); 3754 goto exit; 3755 } 3756 3757 if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) { 3758 dev_kfree_skb_any(msdu); 3759 goto exit; 3760 } 3761 3762 rx_desc = (struct hal_rx_desc *)msdu->data; 3763 msdu_len = ath12k_dp_rx_h_msdu_len(ar->ab, rx_desc); 3764 if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) { 3765 ath12k_warn(ar->ab, "invalid msdu leng %u", msdu_len); 3766 ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "", rx_desc, 3767 sizeof(*rx_desc)); 3768 dev_kfree_skb_any(msdu); 3769 goto exit; 3770 } 3771 3772 skb_put(msdu, hal_rx_desc_sz + msdu_len); 3773 3774 if (ath12k_dp_rx_frag_h_mpdu(ar, msdu, desc)) { 3775 dev_kfree_skb_any(msdu); 3776 ath12k_dp_rx_link_desc_return(ar->ab, &desc->buf_addr_info, 3777 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3778 } 3779 exit: 3780 rcu_read_unlock(); 3781 return 0; 3782 } 3783 3784 int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi, 3785 int budget) 3786 { 3787 struct ath12k_hw_group *ag = ab->ag; 3788 struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES]; 3789 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; 3790 int num_buffs_reaped[ATH12K_MAX_DEVICES] = {}; 3791 struct dp_link_desc_bank *link_desc_banks; 3792 enum hal_rx_buf_return_buf_manager rbm; 3793 struct hal_rx_msdu_link *link_desc_va; 3794 int tot_n_bufs_reaped, quota, ret, i; 3795 struct hal_reo_dest_ring *reo_desc; 3796 struct dp_rxdma_ring *rx_ring; 3797 struct dp_srng *reo_except; 3798 struct ath12k_hw_link *hw_links = ag->hw_links; 3799 struct ath12k_base *partner_ab; 3800 u8 hw_link_id, device_id; 3801 u32 desc_bank, num_msdus; 3802 struct hal_srng *srng; 3803 struct ath12k *ar; 3804 dma_addr_t paddr; 3805 bool is_frag; 3806 bool drop; 3807 int pdev_id; 3808 3809 tot_n_bufs_reaped = 0; 3810 quota = budget; 3811 3812 for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) 3813 INIT_LIST_HEAD(&rx_desc_used_list[device_id]); 3814 3815 reo_except = &ab->dp.reo_except_ring; 3816 3817 srng = &ab->hal.srng_list[reo_except->ring_id]; 3818 3819 spin_lock_bh(&srng->lock); 3820 3821 ath12k_hal_srng_access_begin(ab, srng); 3822 3823 while (budget && 3824 (reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) { 3825 drop = false; 3826 ab->device_stats.err_ring_pkts++; 3827 3828 ret = ath12k_hal_desc_reo_parse_err(ab, reo_desc, &paddr, 3829 &desc_bank); 3830 if (ret) { 3831 ath12k_warn(ab, "failed to parse error reo desc %d\n", 3832 ret); 3833 continue; 3834 } 3835 3836 hw_link_id = le32_get_bits(reo_desc->info0, 3837 HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); 3838 device_id = hw_links[hw_link_id].device_id; 3839 partner_ab = ath12k_ag_to_ab(ag, device_id); 3840 3841 pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params, 3842 hw_links[hw_link_id].pdev_idx); 3843 ar = partner_ab->pdevs[pdev_id].ar; 3844 3845 link_desc_banks = partner_ab->dp.link_desc_banks; 3846 link_desc_va = link_desc_banks[desc_bank].vaddr + 3847 (paddr - link_desc_banks[desc_bank].paddr); 3848 ath12k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies, 3849 &rbm); 3850 if (rbm != partner_ab->dp.idle_link_rbm && 3851 rbm != HAL_RX_BUF_RBM_SW3_BM && 3852 rbm != partner_ab->hw_params->hal_params->rx_buf_rbm) { 3853 ab->device_stats.invalid_rbm++; 3854 ath12k_warn(ab, "invalid return buffer manager %d\n", rbm); 3855 ath12k_dp_rx_link_desc_return(partner_ab, 3856 &reo_desc->buf_addr_info, 3857 HAL_WBM_REL_BM_ACT_REL_MSDU); 3858 continue; 3859 } 3860 3861 is_frag = !!(le32_to_cpu(reo_desc->rx_mpdu_info.info0) & 3862 RX_MPDU_DESC_INFO0_FRAG_FLAG); 3863 3864 /* Process only rx fragments with one msdu per link desc below, and drop 3865 * msdu's indicated due to error reasons. 3866 * Dynamic fragmentation not supported in Multi-link client, so drop the 3867 * partner device buffers. 3868 */ 3869 if (!is_frag || num_msdus > 1 || 3870 partner_ab->device_id != ab->device_id) { 3871 drop = true; 3872 3873 /* Return the link desc back to wbm idle list */ 3874 ath12k_dp_rx_link_desc_return(partner_ab, 3875 &reo_desc->buf_addr_info, 3876 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3877 } 3878 3879 for (i = 0; i < num_msdus; i++) { 3880 if (!ath12k_dp_process_rx_err_buf(ar, reo_desc, 3881 &rx_desc_used_list[device_id], 3882 drop, 3883 msdu_cookies[i])) { 3884 num_buffs_reaped[device_id]++; 3885 tot_n_bufs_reaped++; 3886 } 3887 } 3888 3889 if (tot_n_bufs_reaped >= quota) { 3890 tot_n_bufs_reaped = quota; 3891 goto exit; 3892 } 3893 3894 budget = quota - tot_n_bufs_reaped; 3895 } 3896 3897 exit: 3898 ath12k_hal_srng_access_end(ab, srng); 3899 3900 spin_unlock_bh(&srng->lock); 3901 3902 for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) { 3903 if (!num_buffs_reaped[device_id]) 3904 continue; 3905 3906 partner_ab = ath12k_ag_to_ab(ag, device_id); 3907 rx_ring = &partner_ab->dp.rx_refill_buf_ring; 3908 3909 ath12k_dp_rx_bufs_replenish(partner_ab, rx_ring, 3910 &rx_desc_used_list[device_id], 3911 num_buffs_reaped[device_id]); 3912 } 3913 3914 return tot_n_bufs_reaped; 3915 } 3916 3917 static void ath12k_dp_rx_null_q_desc_sg_drop(struct ath12k *ar, 3918 int msdu_len, 3919 struct sk_buff_head *msdu_list) 3920 { 3921 struct sk_buff *skb, *tmp; 3922 struct ath12k_skb_rxcb *rxcb; 3923 int n_buffs; 3924 3925 n_buffs = DIV_ROUND_UP(msdu_len, 3926 (DP_RX_BUFFER_SIZE - ar->ab->hal.hal_desc_sz)); 3927 3928 skb_queue_walk_safe(msdu_list, skb, tmp) { 3929 rxcb = ATH12K_SKB_RXCB(skb); 3930 if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO && 3931 rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) { 3932 if (!n_buffs) 3933 break; 3934 __skb_unlink(skb, msdu_list); 3935 dev_kfree_skb_any(skb); 3936 n_buffs--; 3937 } 3938 } 3939 } 3940 3941 static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu, 3942 struct ath12k_dp_rx_info *rx_info, 3943 struct sk_buff_head *msdu_list) 3944 { 3945 struct ath12k_base *ab = ar->ab; 3946 u16 msdu_len; 3947 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 3948 u8 l3pad_bytes; 3949 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 3950 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 3951 3952 msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc); 3953 3954 if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) { 3955 /* First buffer will be freed by the caller, so deduct it's length */ 3956 msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz); 3957 ath12k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list); 3958 return -EINVAL; 3959 } 3960 3961 /* Even after cleaning up the sg buffers in the msdu list with above check 3962 * any msdu received with continuation flag needs to be dropped as invalid. 3963 * This protects against some random err frame with continuation flag. 3964 */ 3965 if (rxcb->is_continuation) 3966 return -EINVAL; 3967 3968 if (!ath12k_dp_rx_h_msdu_done(ab, desc)) { 3969 ath12k_warn(ar->ab, 3970 "msdu_done bit not set in null_q_des processing\n"); 3971 __skb_queue_purge(msdu_list); 3972 return -EIO; 3973 } 3974 3975 /* Handle NULL queue descriptor violations arising out a missing 3976 * REO queue for a given peer or a given TID. This typically 3977 * may happen if a packet is received on a QOS enabled TID before the 3978 * ADDBA negotiation for that TID, when the TID queue is setup. Or 3979 * it may also happen for MC/BC frames if they are not routed to the 3980 * non-QOS TID queue, in the absence of any other default TID queue. 3981 * This error can show up both in a REO destination or WBM release ring. 3982 */ 3983 3984 if (rxcb->is_frag) { 3985 skb_pull(msdu, hal_rx_desc_sz); 3986 } else { 3987 l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc); 3988 3989 if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) 3990 return -EINVAL; 3991 3992 skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); 3993 skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); 3994 } 3995 if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu))) 3996 return -EINVAL; 3997 3998 ath12k_dp_rx_h_fetch_info(ab, desc, rx_info); 3999 ath12k_dp_rx_h_ppdu(ar, rx_info); 4000 ath12k_dp_rx_h_mpdu(ar, msdu, desc, rx_info); 4001 4002 rxcb->tid = rx_info->tid; 4003 4004 /* Please note that caller will having the access to msdu and completing 4005 * rx with mac80211. Need not worry about cleaning up amsdu_list. 4006 */ 4007 4008 return 0; 4009 } 4010 4011 static bool ath12k_dp_rx_h_reo_err(struct ath12k *ar, struct sk_buff *msdu, 4012 struct ath12k_dp_rx_info *rx_info, 4013 struct sk_buff_head *msdu_list) 4014 { 4015 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 4016 bool drop = false; 4017 4018 ar->ab->device_stats.reo_error[rxcb->err_code]++; 4019 4020 switch (rxcb->err_code) { 4021 case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO: 4022 if (ath12k_dp_rx_h_null_q_desc(ar, msdu, rx_info, msdu_list)) 4023 drop = true; 4024 break; 4025 case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED: 4026 /* TODO: Do not drop PN failed packets in the driver; 4027 * instead, it is good to drop such packets in mac80211 4028 * after incrementing the replay counters. 4029 */ 4030 fallthrough; 4031 default: 4032 /* TODO: Review other errors and process them to mac80211 4033 * as appropriate. 4034 */ 4035 drop = true; 4036 break; 4037 } 4038 4039 return drop; 4040 } 4041 4042 static bool ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu, 4043 struct ath12k_dp_rx_info *rx_info) 4044 { 4045 struct ath12k_base *ab = ar->ab; 4046 u16 msdu_len; 4047 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 4048 u8 l3pad_bytes; 4049 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 4050 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 4051 4052 rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, desc); 4053 rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, desc); 4054 4055 l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc); 4056 msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc); 4057 4058 if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) { 4059 ath12k_dbg(ab, ATH12K_DBG_DATA, 4060 "invalid msdu len in tkip mic err %u\n", msdu_len); 4061 ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", desc, 4062 sizeof(*desc)); 4063 return true; 4064 } 4065 4066 skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); 4067 skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); 4068 4069 if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu))) 4070 return true; 4071 4072 ath12k_dp_rx_h_ppdu(ar, rx_info); 4073 4074 rx_info->rx_status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR | 4075 RX_FLAG_DECRYPTED); 4076 4077 ath12k_dp_rx_h_undecap(ar, msdu, desc, 4078 HAL_ENCRYPT_TYPE_TKIP_MIC, rx_info->rx_status, false); 4079 return false; 4080 } 4081 4082 static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar, struct sk_buff *msdu, 4083 struct ath12k_dp_rx_info *rx_info) 4084 { 4085 struct ath12k_base *ab = ar->ab; 4086 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 4087 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data; 4088 bool drop = false; 4089 u32 err_bitmap; 4090 4091 ar->ab->device_stats.rxdma_error[rxcb->err_code]++; 4092 4093 switch (rxcb->err_code) { 4094 case HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR: 4095 case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR: 4096 err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc); 4097 if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) { 4098 ath12k_dp_rx_h_fetch_info(ab, rx_desc, rx_info); 4099 drop = ath12k_dp_rx_h_tkip_mic_err(ar, msdu, rx_info); 4100 break; 4101 } 4102 fallthrough; 4103 default: 4104 /* TODO: Review other rxdma error code to check if anything is 4105 * worth reporting to mac80211 4106 */ 4107 drop = true; 4108 break; 4109 } 4110 4111 return drop; 4112 } 4113 4114 static void ath12k_dp_rx_wbm_err(struct ath12k *ar, 4115 struct napi_struct *napi, 4116 struct sk_buff *msdu, 4117 struct sk_buff_head *msdu_list) 4118 { 4119 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 4120 struct ieee80211_rx_status rxs = {}; 4121 struct ath12k_dp_rx_info rx_info; 4122 bool drop = true; 4123 4124 rx_info.addr2_present = false; 4125 rx_info.rx_status = &rxs; 4126 4127 switch (rxcb->err_rel_src) { 4128 case HAL_WBM_REL_SRC_MODULE_REO: 4129 drop = ath12k_dp_rx_h_reo_err(ar, msdu, &rx_info, msdu_list); 4130 break; 4131 case HAL_WBM_REL_SRC_MODULE_RXDMA: 4132 drop = ath12k_dp_rx_h_rxdma_err(ar, msdu, &rx_info); 4133 break; 4134 default: 4135 /* msdu will get freed */ 4136 break; 4137 } 4138 4139 if (drop) { 4140 dev_kfree_skb_any(msdu); 4141 return; 4142 } 4143 4144 rx_info.rx_status->flag |= RX_FLAG_SKIP_MONITOR; 4145 4146 ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_info); 4147 } 4148 4149 int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab, 4150 struct napi_struct *napi, int budget) 4151 { 4152 struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES]; 4153 struct ath12k_hw_group *ag = ab->ag; 4154 struct ath12k *ar; 4155 struct ath12k_dp *dp = &ab->dp; 4156 struct dp_rxdma_ring *rx_ring; 4157 struct hal_rx_wbm_rel_info err_info; 4158 struct hal_srng *srng; 4159 struct sk_buff *msdu; 4160 struct sk_buff_head msdu_list, scatter_msdu_list; 4161 struct ath12k_skb_rxcb *rxcb; 4162 void *rx_desc; 4163 int num_buffs_reaped[ATH12K_MAX_DEVICES] = {}; 4164 int total_num_buffs_reaped = 0; 4165 struct ath12k_rx_desc_info *desc_info; 4166 struct ath12k_device_dp_stats *device_stats = &ab->device_stats; 4167 struct ath12k_hw_link *hw_links = ag->hw_links; 4168 struct ath12k_base *partner_ab; 4169 u8 hw_link_id, device_id; 4170 int ret, pdev_id; 4171 struct hal_rx_desc *msdu_data; 4172 4173 __skb_queue_head_init(&msdu_list); 4174 __skb_queue_head_init(&scatter_msdu_list); 4175 4176 for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) 4177 INIT_LIST_HEAD(&rx_desc_used_list[device_id]); 4178 4179 srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id]; 4180 spin_lock_bh(&srng->lock); 4181 4182 ath12k_hal_srng_access_begin(ab, srng); 4183 4184 while (budget) { 4185 rx_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng); 4186 if (!rx_desc) 4187 break; 4188 4189 ret = ath12k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info); 4190 if (ret) { 4191 ath12k_warn(ab, 4192 "failed to parse rx error in wbm_rel ring desc %d\n", 4193 ret); 4194 continue; 4195 } 4196 4197 desc_info = err_info.rx_desc; 4198 4199 /* retry manual desc retrieval if hw cc is not done */ 4200 if (!desc_info) { 4201 desc_info = ath12k_dp_get_rx_desc(ab, err_info.cookie); 4202 if (!desc_info) { 4203 ath12k_warn(ab, "Invalid cookie in DP WBM rx error descriptor retrieval: 0x%x\n", 4204 err_info.cookie); 4205 continue; 4206 } 4207 } 4208 4209 if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC) 4210 ath12k_warn(ab, "WBM RX err, Check HW CC implementation"); 4211 4212 msdu = desc_info->skb; 4213 desc_info->skb = NULL; 4214 4215 device_id = desc_info->device_id; 4216 partner_ab = ath12k_ag_to_ab(ag, device_id); 4217 if (unlikely(!partner_ab)) { 4218 dev_kfree_skb_any(msdu); 4219 4220 /* In any case continuation bit is set 4221 * in the previous record, cleanup scatter_msdu_list 4222 */ 4223 ath12k_dp_clean_up_skb_list(&scatter_msdu_list); 4224 continue; 4225 } 4226 4227 list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]); 4228 4229 rxcb = ATH12K_SKB_RXCB(msdu); 4230 dma_unmap_single(partner_ab->dev, rxcb->paddr, 4231 msdu->len + skb_tailroom(msdu), 4232 DMA_FROM_DEVICE); 4233 4234 num_buffs_reaped[device_id]++; 4235 total_num_buffs_reaped++; 4236 4237 if (!err_info.continuation) 4238 budget--; 4239 4240 if (err_info.push_reason != 4241 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { 4242 dev_kfree_skb_any(msdu); 4243 continue; 4244 } 4245 4246 msdu_data = (struct hal_rx_desc *)msdu->data; 4247 rxcb->err_rel_src = err_info.err_rel_src; 4248 rxcb->err_code = err_info.err_code; 4249 rxcb->is_first_msdu = err_info.first_msdu; 4250 rxcb->is_last_msdu = err_info.last_msdu; 4251 rxcb->is_continuation = err_info.continuation; 4252 rxcb->rx_desc = msdu_data; 4253 4254 if (err_info.continuation) { 4255 __skb_queue_tail(&scatter_msdu_list, msdu); 4256 continue; 4257 } 4258 4259 hw_link_id = ath12k_dp_rx_get_msdu_src_link(partner_ab, 4260 msdu_data); 4261 if (hw_link_id >= ATH12K_GROUP_MAX_RADIO) { 4262 dev_kfree_skb_any(msdu); 4263 4264 /* In any case continuation bit is set 4265 * in the previous record, cleanup scatter_msdu_list 4266 */ 4267 ath12k_dp_clean_up_skb_list(&scatter_msdu_list); 4268 continue; 4269 } 4270 4271 if (!skb_queue_empty(&scatter_msdu_list)) { 4272 struct sk_buff *msdu; 4273 4274 skb_queue_walk(&scatter_msdu_list, msdu) { 4275 rxcb = ATH12K_SKB_RXCB(msdu); 4276 rxcb->hw_link_id = hw_link_id; 4277 } 4278 4279 skb_queue_splice_tail_init(&scatter_msdu_list, 4280 &msdu_list); 4281 } 4282 4283 rxcb = ATH12K_SKB_RXCB(msdu); 4284 rxcb->hw_link_id = hw_link_id; 4285 __skb_queue_tail(&msdu_list, msdu); 4286 } 4287 4288 /* In any case continuation bit is set in the 4289 * last record, cleanup scatter_msdu_list 4290 */ 4291 ath12k_dp_clean_up_skb_list(&scatter_msdu_list); 4292 4293 ath12k_hal_srng_access_end(ab, srng); 4294 4295 spin_unlock_bh(&srng->lock); 4296 4297 if (!total_num_buffs_reaped) 4298 goto done; 4299 4300 for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) { 4301 if (!num_buffs_reaped[device_id]) 4302 continue; 4303 4304 partner_ab = ath12k_ag_to_ab(ag, device_id); 4305 rx_ring = &partner_ab->dp.rx_refill_buf_ring; 4306 4307 ath12k_dp_rx_bufs_replenish(ab, rx_ring, 4308 &rx_desc_used_list[device_id], 4309 num_buffs_reaped[device_id]); 4310 } 4311 4312 rcu_read_lock(); 4313 while ((msdu = __skb_dequeue(&msdu_list))) { 4314 rxcb = ATH12K_SKB_RXCB(msdu); 4315 hw_link_id = rxcb->hw_link_id; 4316 4317 device_id = hw_links[hw_link_id].device_id; 4318 partner_ab = ath12k_ag_to_ab(ag, device_id); 4319 if (unlikely(!partner_ab)) { 4320 ath12k_dbg(ab, ATH12K_DBG_DATA, 4321 "Unable to process WBM error msdu due to invalid hw link id %d device id %d\n", 4322 hw_link_id, device_id); 4323 dev_kfree_skb_any(msdu); 4324 continue; 4325 } 4326 4327 pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params, 4328 hw_links[hw_link_id].pdev_idx); 4329 ar = partner_ab->pdevs[pdev_id].ar; 4330 4331 if (!ar || !rcu_dereference(ar->ab->pdevs_active[pdev_id])) { 4332 dev_kfree_skb_any(msdu); 4333 continue; 4334 } 4335 4336 if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) { 4337 dev_kfree_skb_any(msdu); 4338 continue; 4339 } 4340 4341 if (rxcb->err_rel_src < HAL_WBM_REL_SRC_MODULE_MAX) { 4342 device_id = ar->ab->device_id; 4343 device_stats->rx_wbm_rel_source[rxcb->err_rel_src][device_id]++; 4344 } 4345 4346 ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list); 4347 } 4348 rcu_read_unlock(); 4349 done: 4350 return total_num_buffs_reaped; 4351 } 4352 4353 void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab) 4354 { 4355 struct ath12k_dp *dp = &ab->dp; 4356 struct hal_tlv_64_hdr *hdr; 4357 struct hal_srng *srng; 4358 struct ath12k_dp_rx_reo_cmd *cmd, *tmp; 4359 bool found = false; 4360 u16 tag; 4361 struct hal_reo_status reo_status; 4362 4363 srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id]; 4364 4365 memset(&reo_status, 0, sizeof(reo_status)); 4366 4367 spin_lock_bh(&srng->lock); 4368 4369 ath12k_hal_srng_access_begin(ab, srng); 4370 4371 while ((hdr = ath12k_hal_srng_dst_get_next_entry(ab, srng))) { 4372 tag = le64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG); 4373 4374 switch (tag) { 4375 case HAL_REO_GET_QUEUE_STATS_STATUS: 4376 ath12k_hal_reo_status_queue_stats(ab, hdr, 4377 &reo_status); 4378 break; 4379 case HAL_REO_FLUSH_QUEUE_STATUS: 4380 ath12k_hal_reo_flush_queue_status(ab, hdr, 4381 &reo_status); 4382 break; 4383 case HAL_REO_FLUSH_CACHE_STATUS: 4384 ath12k_hal_reo_flush_cache_status(ab, hdr, 4385 &reo_status); 4386 break; 4387 case HAL_REO_UNBLOCK_CACHE_STATUS: 4388 ath12k_hal_reo_unblk_cache_status(ab, hdr, 4389 &reo_status); 4390 break; 4391 case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS: 4392 ath12k_hal_reo_flush_timeout_list_status(ab, hdr, 4393 &reo_status); 4394 break; 4395 case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS: 4396 ath12k_hal_reo_desc_thresh_reached_status(ab, hdr, 4397 &reo_status); 4398 break; 4399 case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS: 4400 ath12k_hal_reo_update_rx_reo_queue_status(ab, hdr, 4401 &reo_status); 4402 break; 4403 default: 4404 ath12k_warn(ab, "Unknown reo status type %d\n", tag); 4405 continue; 4406 } 4407 4408 spin_lock_bh(&dp->reo_cmd_lock); 4409 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 4410 if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) { 4411 found = true; 4412 list_del(&cmd->list); 4413 break; 4414 } 4415 } 4416 spin_unlock_bh(&dp->reo_cmd_lock); 4417 4418 if (found) { 4419 cmd->handler(dp, (void *)&cmd->data, 4420 reo_status.uniform_hdr.cmd_status); 4421 kfree(cmd); 4422 } 4423 4424 found = false; 4425 } 4426 4427 ath12k_hal_srng_access_end(ab, srng); 4428 4429 spin_unlock_bh(&srng->lock); 4430 } 4431 4432 void ath12k_dp_rx_free(struct ath12k_base *ab) 4433 { 4434 struct ath12k_dp *dp = &ab->dp; 4435 struct dp_srng *srng; 4436 int i; 4437 4438 ath12k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring); 4439 4440 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4441 if (ab->hw_params->rx_mac_buf_ring) 4442 ath12k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]); 4443 if (!ab->hw_params->rxdma1_enable) { 4444 srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring; 4445 ath12k_dp_srng_cleanup(ab, srng); 4446 } 4447 } 4448 4449 for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) 4450 ath12k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]); 4451 4452 ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring); 4453 4454 ath12k_dp_rxdma_buf_free(ab); 4455 } 4456 4457 void ath12k_dp_rx_pdev_free(struct ath12k_base *ab, int mac_id) 4458 { 4459 struct ath12k *ar = ab->pdevs[mac_id].ar; 4460 4461 ath12k_dp_rx_pdev_srng_free(ar); 4462 } 4463 4464 int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab) 4465 { 4466 struct ath12k_dp *dp = &ab->dp; 4467 struct htt_rx_ring_tlv_filter tlv_filter = {}; 4468 u32 ring_id; 4469 int ret; 4470 u32 hal_rx_desc_sz = ab->hal.hal_desc_sz; 4471 4472 ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; 4473 4474 tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING; 4475 tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR; 4476 tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST | 4477 HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST | 4478 HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA; 4479 tlv_filter.offset_valid = true; 4480 tlv_filter.rx_packet_offset = hal_rx_desc_sz; 4481 4482 tlv_filter.rx_mpdu_start_offset = 4483 ab->hal_rx_ops->rx_desc_get_mpdu_start_offset(); 4484 tlv_filter.rx_msdu_end_offset = 4485 ab->hal_rx_ops->rx_desc_get_msdu_end_offset(); 4486 4487 if (ath12k_dp_wmask_compaction_rx_tlv_supported(ab)) { 4488 tlv_filter.rx_mpdu_start_wmask = 4489 ab->hw_params->hal_ops->rxdma_ring_wmask_rx_mpdu_start(); 4490 tlv_filter.rx_msdu_end_wmask = 4491 ab->hw_params->hal_ops->rxdma_ring_wmask_rx_msdu_end(); 4492 ath12k_dbg(ab, ATH12K_DBG_DATA, 4493 "Configuring compact tlv masks rx_mpdu_start_wmask 0x%x rx_msdu_end_wmask 0x%x\n", 4494 tlv_filter.rx_mpdu_start_wmask, tlv_filter.rx_msdu_end_wmask); 4495 } 4496 4497 ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, 0, 4498 HAL_RXDMA_BUF, 4499 DP_RXDMA_REFILL_RING_SIZE, 4500 &tlv_filter); 4501 4502 return ret; 4503 } 4504 4505 int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab) 4506 { 4507 struct ath12k_dp *dp = &ab->dp; 4508 struct htt_rx_ring_tlv_filter tlv_filter = {}; 4509 u32 ring_id; 4510 int ret = 0; 4511 u32 hal_rx_desc_sz = ab->hal.hal_desc_sz; 4512 int i; 4513 4514 ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; 4515 4516 tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING; 4517 tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR; 4518 tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST | 4519 HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST | 4520 HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA; 4521 tlv_filter.offset_valid = true; 4522 tlv_filter.rx_packet_offset = hal_rx_desc_sz; 4523 4524 tlv_filter.rx_header_offset = offsetof(struct hal_rx_desc_wcn7850, pkt_hdr_tlv); 4525 4526 tlv_filter.rx_mpdu_start_offset = 4527 ab->hal_rx_ops->rx_desc_get_mpdu_start_offset(); 4528 tlv_filter.rx_msdu_end_offset = 4529 ab->hal_rx_ops->rx_desc_get_msdu_end_offset(); 4530 4531 /* TODO: Selectively subscribe to required qwords within msdu_end 4532 * and mpdu_start and setup the mask in below msg 4533 * and modify the rx_desc struct 4534 */ 4535 4536 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4537 ring_id = dp->rx_mac_buf_ring[i].ring_id; 4538 ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, i, 4539 HAL_RXDMA_BUF, 4540 DP_RXDMA_REFILL_RING_SIZE, 4541 &tlv_filter); 4542 } 4543 4544 return ret; 4545 } 4546 4547 int ath12k_dp_rx_htt_setup(struct ath12k_base *ab) 4548 { 4549 struct ath12k_dp *dp = &ab->dp; 4550 u32 ring_id; 4551 int i, ret; 4552 4553 /* TODO: Need to verify the HTT setup for QCN9224 */ 4554 ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; 4555 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 0, HAL_RXDMA_BUF); 4556 if (ret) { 4557 ath12k_warn(ab, "failed to configure rx_refill_buf_ring %d\n", 4558 ret); 4559 return ret; 4560 } 4561 4562 if (ab->hw_params->rx_mac_buf_ring) { 4563 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4564 ring_id = dp->rx_mac_buf_ring[i].ring_id; 4565 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 4566 i, HAL_RXDMA_BUF); 4567 if (ret) { 4568 ath12k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n", 4569 i, ret); 4570 return ret; 4571 } 4572 } 4573 } 4574 4575 for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) { 4576 ring_id = dp->rxdma_err_dst_ring[i].ring_id; 4577 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 4578 i, HAL_RXDMA_DST); 4579 if (ret) { 4580 ath12k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n", 4581 i, ret); 4582 return ret; 4583 } 4584 } 4585 4586 if (ab->hw_params->rxdma1_enable) { 4587 ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id; 4588 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 4589 0, HAL_RXDMA_MONITOR_BUF); 4590 if (ret) { 4591 ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n", 4592 ret); 4593 return ret; 4594 } 4595 } else { 4596 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4597 ring_id = 4598 dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; 4599 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, i, 4600 HAL_RXDMA_MONITOR_STATUS); 4601 if (ret) { 4602 ath12k_warn(ab, 4603 "failed to configure mon_status_refill_ring%d %d\n", 4604 i, ret); 4605 return ret; 4606 } 4607 } 4608 } 4609 4610 ret = ab->hw_params->hw_ops->rxdma_ring_sel_config(ab); 4611 if (ret) { 4612 ath12k_warn(ab, "failed to setup rxdma ring selection config\n"); 4613 return ret; 4614 } 4615 4616 return 0; 4617 } 4618 4619 int ath12k_dp_rx_alloc(struct ath12k_base *ab) 4620 { 4621 struct ath12k_dp *dp = &ab->dp; 4622 struct dp_srng *srng; 4623 int i, ret; 4624 4625 idr_init(&dp->rxdma_mon_buf_ring.bufs_idr); 4626 spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock); 4627 4628 ret = ath12k_dp_srng_setup(ab, 4629 &dp->rx_refill_buf_ring.refill_buf_ring, 4630 HAL_RXDMA_BUF, 0, 0, 4631 DP_RXDMA_BUF_RING_SIZE); 4632 if (ret) { 4633 ath12k_warn(ab, "failed to setup rx_refill_buf_ring\n"); 4634 return ret; 4635 } 4636 4637 if (ab->hw_params->rx_mac_buf_ring) { 4638 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4639 ret = ath12k_dp_srng_setup(ab, 4640 &dp->rx_mac_buf_ring[i], 4641 HAL_RXDMA_BUF, 1, 4642 i, DP_RX_MAC_BUF_RING_SIZE); 4643 if (ret) { 4644 ath12k_warn(ab, "failed to setup rx_mac_buf_ring %d\n", 4645 i); 4646 return ret; 4647 } 4648 } 4649 } 4650 4651 for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) { 4652 ret = ath12k_dp_srng_setup(ab, &dp->rxdma_err_dst_ring[i], 4653 HAL_RXDMA_DST, 0, i, 4654 DP_RXDMA_ERR_DST_RING_SIZE); 4655 if (ret) { 4656 ath12k_warn(ab, "failed to setup rxdma_err_dst_ring %d\n", i); 4657 return ret; 4658 } 4659 } 4660 4661 if (ab->hw_params->rxdma1_enable) { 4662 ret = ath12k_dp_srng_setup(ab, 4663 &dp->rxdma_mon_buf_ring.refill_buf_ring, 4664 HAL_RXDMA_MONITOR_BUF, 0, 0, 4665 DP_RXDMA_MONITOR_BUF_RING_SIZE(ab)); 4666 if (ret) { 4667 ath12k_warn(ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n"); 4668 return ret; 4669 } 4670 } else { 4671 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4672 idr_init(&dp->rx_mon_status_refill_ring[i].bufs_idr); 4673 spin_lock_init(&dp->rx_mon_status_refill_ring[i].idr_lock); 4674 } 4675 4676 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4677 srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring; 4678 ret = ath12k_dp_srng_setup(ab, srng, 4679 HAL_RXDMA_MONITOR_STATUS, 0, i, 4680 DP_RXDMA_MON_STATUS_RING_SIZE); 4681 if (ret) { 4682 ath12k_warn(ab, "failed to setup mon status ring %d\n", 4683 i); 4684 return ret; 4685 } 4686 } 4687 } 4688 4689 ret = ath12k_dp_rxdma_buf_setup(ab); 4690 if (ret) { 4691 ath12k_warn(ab, "failed to setup rxdma ring\n"); 4692 return ret; 4693 } 4694 4695 return 0; 4696 } 4697 4698 int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int mac_id) 4699 { 4700 struct ath12k *ar = ab->pdevs[mac_id].ar; 4701 struct ath12k_pdev_dp *dp = &ar->dp; 4702 u32 ring_id; 4703 int i; 4704 int ret; 4705 4706 if (!ab->hw_params->rxdma1_enable) 4707 goto out; 4708 4709 ret = ath12k_dp_rx_pdev_srng_alloc(ar); 4710 if (ret) { 4711 ath12k_warn(ab, "failed to setup rx srngs\n"); 4712 return ret; 4713 } 4714 4715 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4716 ring_id = dp->rxdma_mon_dst_ring[i].ring_id; 4717 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 4718 mac_id + i, 4719 HAL_RXDMA_MONITOR_DST); 4720 if (ret) { 4721 ath12k_warn(ab, 4722 "failed to configure rxdma_mon_dst_ring %d %d\n", 4723 i, ret); 4724 return ret; 4725 } 4726 } 4727 out: 4728 return 0; 4729 } 4730 4731 static int ath12k_dp_rx_pdev_mon_status_attach(struct ath12k *ar) 4732 { 4733 struct ath12k_pdev_dp *dp = &ar->dp; 4734 struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&dp->mon_data; 4735 4736 skb_queue_head_init(&pmon->rx_status_q); 4737 4738 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 4739 4740 memset(&pmon->rx_mon_stats, 0, 4741 sizeof(pmon->rx_mon_stats)); 4742 return 0; 4743 } 4744 4745 int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar) 4746 { 4747 struct ath12k_pdev_dp *dp = &ar->dp; 4748 struct ath12k_mon_data *pmon = &dp->mon_data; 4749 int ret = 0; 4750 4751 ret = ath12k_dp_rx_pdev_mon_status_attach(ar); 4752 if (ret) { 4753 ath12k_warn(ar->ab, "pdev_mon_status_attach() failed"); 4754 return ret; 4755 } 4756 4757 pmon->mon_last_linkdesc_paddr = 0; 4758 pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1; 4759 spin_lock_init(&pmon->mon_lock); 4760 4761 if (!ar->ab->hw_params->rxdma1_enable) 4762 return 0; 4763 4764 INIT_LIST_HEAD(&pmon->dp_rx_mon_mpdu_list); 4765 pmon->mon_mpdu = NULL; 4766 4767 return 0; 4768 } 4769