1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 */ 6 7 #include <linux/ieee80211.h> 8 #include <linux/kernel.h> 9 #include <linux/skbuff.h> 10 #include <crypto/hash.h> 11 #include "core.h" 12 #include "debug.h" 13 #include "hal_desc.h" 14 #include "hw.h" 15 #include "dp_rx.h" 16 #include "hal_rx.h" 17 #include "dp_tx.h" 18 #include "peer.h" 19 #include "dp_mon.h" 20 #include "debugfs_htt_stats.h" 21 22 #define ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ) 23 24 static enum hal_encrypt_type ath12k_dp_rx_h_enctype(struct ath12k_base *ab, 25 struct hal_rx_desc *desc) 26 { 27 if (!ab->hal_rx_ops->rx_desc_encrypt_valid(desc)) 28 return HAL_ENCRYPT_TYPE_OPEN; 29 30 return ab->hal_rx_ops->rx_desc_get_encrypt_type(desc); 31 } 32 33 u8 ath12k_dp_rx_h_decap_type(struct ath12k_base *ab, 34 struct hal_rx_desc *desc) 35 { 36 return ab->hal_rx_ops->rx_desc_get_decap_type(desc); 37 } 38 39 static u8 ath12k_dp_rx_h_mesh_ctl_present(struct ath12k_base *ab, 40 struct hal_rx_desc *desc) 41 { 42 return ab->hal_rx_ops->rx_desc_get_mesh_ctl(desc); 43 } 44 45 static bool ath12k_dp_rx_h_seq_ctrl_valid(struct ath12k_base *ab, 46 struct hal_rx_desc *desc) 47 { 48 return ab->hal_rx_ops->rx_desc_get_mpdu_seq_ctl_vld(desc); 49 } 50 51 static bool ath12k_dp_rx_h_fc_valid(struct ath12k_base *ab, 52 struct hal_rx_desc *desc) 53 { 54 return ab->hal_rx_ops->rx_desc_get_mpdu_fc_valid(desc); 55 } 56 57 static bool ath12k_dp_rx_h_more_frags(struct ath12k_base *ab, 58 struct sk_buff *skb) 59 { 60 struct ieee80211_hdr *hdr; 61 62 hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz); 63 return ieee80211_has_morefrags(hdr->frame_control); 64 } 65 66 static u16 ath12k_dp_rx_h_frag_no(struct ath12k_base *ab, 67 struct sk_buff *skb) 68 { 69 struct ieee80211_hdr *hdr; 70 71 hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz); 72 return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; 73 } 74 75 static u16 ath12k_dp_rx_h_seq_no(struct ath12k_base *ab, 76 struct hal_rx_desc *desc) 77 { 78 return ab->hal_rx_ops->rx_desc_get_mpdu_start_seq_no(desc); 79 } 80 81 static bool ath12k_dp_rx_h_msdu_done(struct ath12k_base *ab, 82 struct hal_rx_desc *desc) 83 { 84 return ab->hal_rx_ops->dp_rx_h_msdu_done(desc); 85 } 86 87 static bool ath12k_dp_rx_h_l4_cksum_fail(struct ath12k_base *ab, 88 struct hal_rx_desc *desc) 89 { 90 return ab->hal_rx_ops->dp_rx_h_l4_cksum_fail(desc); 91 } 92 93 static bool ath12k_dp_rx_h_ip_cksum_fail(struct ath12k_base *ab, 94 struct hal_rx_desc *desc) 95 { 96 return ab->hal_rx_ops->dp_rx_h_ip_cksum_fail(desc); 97 } 98 99 static bool ath12k_dp_rx_h_is_decrypted(struct ath12k_base *ab, 100 struct hal_rx_desc *desc) 101 { 102 return ab->hal_rx_ops->dp_rx_h_is_decrypted(desc); 103 } 104 105 u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab, 106 struct hal_rx_desc *desc) 107 { 108 return ab->hal_rx_ops->dp_rx_h_mpdu_err(desc); 109 } 110 111 static u16 ath12k_dp_rx_h_msdu_len(struct ath12k_base *ab, 112 struct hal_rx_desc *desc) 113 { 114 return ab->hal_rx_ops->rx_desc_get_msdu_len(desc); 115 } 116 117 static u8 ath12k_dp_rx_h_sgi(struct ath12k_base *ab, 118 struct hal_rx_desc *desc) 119 { 120 return ab->hal_rx_ops->rx_desc_get_msdu_sgi(desc); 121 } 122 123 static u8 ath12k_dp_rx_h_rate_mcs(struct ath12k_base *ab, 124 struct hal_rx_desc *desc) 125 { 126 return ab->hal_rx_ops->rx_desc_get_msdu_rate_mcs(desc); 127 } 128 129 static u8 ath12k_dp_rx_h_rx_bw(struct ath12k_base *ab, 130 struct hal_rx_desc *desc) 131 { 132 return ab->hal_rx_ops->rx_desc_get_msdu_rx_bw(desc); 133 } 134 135 static u32 ath12k_dp_rx_h_freq(struct ath12k_base *ab, 136 struct hal_rx_desc *desc) 137 { 138 return ab->hal_rx_ops->rx_desc_get_msdu_freq(desc); 139 } 140 141 static u8 ath12k_dp_rx_h_pkt_type(struct ath12k_base *ab, 142 struct hal_rx_desc *desc) 143 { 144 return ab->hal_rx_ops->rx_desc_get_msdu_pkt_type(desc); 145 } 146 147 static u8 ath12k_dp_rx_h_nss(struct ath12k_base *ab, 148 struct hal_rx_desc *desc) 149 { 150 return hweight8(ab->hal_rx_ops->rx_desc_get_msdu_nss(desc)); 151 } 152 153 static u8 ath12k_dp_rx_h_tid(struct ath12k_base *ab, 154 struct hal_rx_desc *desc) 155 { 156 return ab->hal_rx_ops->rx_desc_get_mpdu_tid(desc); 157 } 158 159 static u16 ath12k_dp_rx_h_peer_id(struct ath12k_base *ab, 160 struct hal_rx_desc *desc) 161 { 162 return ab->hal_rx_ops->rx_desc_get_mpdu_peer_id(desc); 163 } 164 165 u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab, 166 struct hal_rx_desc *desc) 167 { 168 return ab->hal_rx_ops->rx_desc_get_l3_pad_bytes(desc); 169 } 170 171 static bool ath12k_dp_rx_h_first_msdu(struct ath12k_base *ab, 172 struct hal_rx_desc *desc) 173 { 174 return ab->hal_rx_ops->rx_desc_get_first_msdu(desc); 175 } 176 177 static bool ath12k_dp_rx_h_last_msdu(struct ath12k_base *ab, 178 struct hal_rx_desc *desc) 179 { 180 return ab->hal_rx_ops->rx_desc_get_last_msdu(desc); 181 } 182 183 static void ath12k_dp_rx_desc_end_tlv_copy(struct ath12k_base *ab, 184 struct hal_rx_desc *fdesc, 185 struct hal_rx_desc *ldesc) 186 { 187 ab->hal_rx_ops->rx_desc_copy_end_tlv(fdesc, ldesc); 188 } 189 190 static void ath12k_dp_rxdesc_set_msdu_len(struct ath12k_base *ab, 191 struct hal_rx_desc *desc, 192 u16 len) 193 { 194 ab->hal_rx_ops->rx_desc_set_msdu_len(desc, len); 195 } 196 197 u32 ath12k_dp_rxdesc_get_ppduid(struct ath12k_base *ab, 198 struct hal_rx_desc *rx_desc) 199 { 200 return ab->hal_rx_ops->rx_desc_get_mpdu_ppdu_id(rx_desc); 201 } 202 203 bool ath12k_dp_rxdesc_mpdu_valid(struct ath12k_base *ab, 204 struct hal_rx_desc *rx_desc) 205 { 206 u32 tlv_tag; 207 208 tlv_tag = ab->hal_rx_ops->rx_desc_get_mpdu_start_tag(rx_desc); 209 210 return tlv_tag == HAL_RX_MPDU_START; 211 } 212 213 static bool ath12k_dp_rx_h_is_da_mcbc(struct ath12k_base *ab, 214 struct hal_rx_desc *desc) 215 { 216 return (ath12k_dp_rx_h_first_msdu(ab, desc) && 217 ab->hal_rx_ops->rx_desc_is_da_mcbc(desc)); 218 } 219 220 static bool ath12k_dp_rxdesc_mac_addr2_valid(struct ath12k_base *ab, 221 struct hal_rx_desc *desc) 222 { 223 return ab->hal_rx_ops->rx_desc_mac_addr2_valid(desc); 224 } 225 226 static u8 *ath12k_dp_rxdesc_get_mpdu_start_addr2(struct ath12k_base *ab, 227 struct hal_rx_desc *desc) 228 { 229 return ab->hal_rx_ops->rx_desc_mpdu_start_addr2(desc); 230 } 231 232 static void ath12k_dp_rx_desc_get_dot11_hdr(struct ath12k_base *ab, 233 struct hal_rx_desc *desc, 234 struct ieee80211_hdr *hdr) 235 { 236 ab->hal_rx_ops->rx_desc_get_dot11_hdr(desc, hdr); 237 } 238 239 static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab, 240 struct hal_rx_desc *desc, 241 u8 *crypto_hdr, 242 enum hal_encrypt_type enctype) 243 { 244 ab->hal_rx_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype); 245 } 246 247 static inline u8 ath12k_dp_rx_get_msdu_src_link(struct ath12k_base *ab, 248 struct hal_rx_desc *desc) 249 { 250 return ab->hal_rx_ops->rx_desc_get_msdu_src_link_id(desc); 251 } 252 253 static void ath12k_dp_clean_up_skb_list(struct sk_buff_head *skb_list) 254 { 255 struct sk_buff *skb; 256 257 while ((skb = __skb_dequeue(skb_list))) 258 dev_kfree_skb_any(skb); 259 } 260 261 static size_t ath12k_dp_list_cut_nodes(struct list_head *list, 262 struct list_head *head, 263 size_t count) 264 { 265 struct list_head *cur; 266 struct ath12k_rx_desc_info *rx_desc; 267 size_t nodes = 0; 268 269 if (!count) { 270 INIT_LIST_HEAD(list); 271 goto out; 272 } 273 274 list_for_each(cur, head) { 275 if (!count) 276 break; 277 278 rx_desc = list_entry(cur, struct ath12k_rx_desc_info, list); 279 rx_desc->in_use = true; 280 281 count--; 282 nodes++; 283 } 284 285 list_cut_before(list, head, cur); 286 out: 287 return nodes; 288 } 289 290 static void ath12k_dp_rx_enqueue_free(struct ath12k_dp *dp, 291 struct list_head *used_list) 292 { 293 struct ath12k_rx_desc_info *rx_desc, *safe; 294 295 /* Reset the use flag */ 296 list_for_each_entry_safe(rx_desc, safe, used_list, list) 297 rx_desc->in_use = false; 298 299 spin_lock_bh(&dp->rx_desc_lock); 300 list_splice_tail(used_list, &dp->rx_desc_free_list); 301 spin_unlock_bh(&dp->rx_desc_lock); 302 } 303 304 /* Returns number of Rx buffers replenished */ 305 int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab, 306 struct dp_rxdma_ring *rx_ring, 307 struct list_head *used_list, 308 int req_entries) 309 { 310 struct ath12k_buffer_addr *desc; 311 struct hal_srng *srng; 312 struct sk_buff *skb; 313 int num_free; 314 int num_remain; 315 u32 cookie; 316 dma_addr_t paddr; 317 struct ath12k_dp *dp = &ab->dp; 318 struct ath12k_rx_desc_info *rx_desc; 319 enum hal_rx_buf_return_buf_manager mgr = ab->hw_params->hal_params->rx_buf_rbm; 320 321 req_entries = min(req_entries, rx_ring->bufs_max); 322 323 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 324 325 spin_lock_bh(&srng->lock); 326 327 ath12k_hal_srng_access_begin(ab, srng); 328 329 num_free = ath12k_hal_srng_src_num_free(ab, srng, true); 330 if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4)) 331 req_entries = num_free; 332 333 req_entries = min(num_free, req_entries); 334 num_remain = req_entries; 335 336 if (!num_remain) 337 goto out; 338 339 /* Get the descriptor from free list */ 340 if (list_empty(used_list)) { 341 spin_lock_bh(&dp->rx_desc_lock); 342 req_entries = ath12k_dp_list_cut_nodes(used_list, 343 &dp->rx_desc_free_list, 344 num_remain); 345 spin_unlock_bh(&dp->rx_desc_lock); 346 num_remain = req_entries; 347 } 348 349 while (num_remain > 0) { 350 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + 351 DP_RX_BUFFER_ALIGN_SIZE); 352 if (!skb) 353 break; 354 355 if (!IS_ALIGNED((unsigned long)skb->data, 356 DP_RX_BUFFER_ALIGN_SIZE)) { 357 skb_pull(skb, 358 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - 359 skb->data); 360 } 361 362 paddr = dma_map_single(ab->dev, skb->data, 363 skb->len + skb_tailroom(skb), 364 DMA_FROM_DEVICE); 365 if (dma_mapping_error(ab->dev, paddr)) 366 goto fail_free_skb; 367 368 rx_desc = list_first_entry_or_null(used_list, 369 struct ath12k_rx_desc_info, 370 list); 371 if (!rx_desc) 372 goto fail_dma_unmap; 373 374 rx_desc->skb = skb; 375 cookie = rx_desc->cookie; 376 377 desc = ath12k_hal_srng_src_get_next_entry(ab, srng); 378 if (!desc) 379 goto fail_dma_unmap; 380 381 list_del(&rx_desc->list); 382 ATH12K_SKB_RXCB(skb)->paddr = paddr; 383 384 num_remain--; 385 386 ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); 387 } 388 389 goto out; 390 391 fail_dma_unmap: 392 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 393 DMA_FROM_DEVICE); 394 fail_free_skb: 395 dev_kfree_skb_any(skb); 396 out: 397 ath12k_hal_srng_access_end(ab, srng); 398 399 if (!list_empty(used_list)) 400 ath12k_dp_rx_enqueue_free(dp, used_list); 401 402 spin_unlock_bh(&srng->lock); 403 404 return req_entries - num_remain; 405 } 406 407 static int ath12k_dp_rxdma_mon_buf_ring_free(struct ath12k_base *ab, 408 struct dp_rxdma_mon_ring *rx_ring) 409 { 410 struct sk_buff *skb; 411 int buf_id; 412 413 spin_lock_bh(&rx_ring->idr_lock); 414 idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { 415 idr_remove(&rx_ring->bufs_idr, buf_id); 416 /* TODO: Understand where internal driver does this dma_unmap 417 * of rxdma_buffer. 418 */ 419 dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr, 420 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); 421 dev_kfree_skb_any(skb); 422 } 423 424 idr_destroy(&rx_ring->bufs_idr); 425 spin_unlock_bh(&rx_ring->idr_lock); 426 427 return 0; 428 } 429 430 static int ath12k_dp_rxdma_buf_free(struct ath12k_base *ab) 431 { 432 struct ath12k_dp *dp = &ab->dp; 433 int i; 434 435 ath12k_dp_rxdma_mon_buf_ring_free(ab, &dp->rxdma_mon_buf_ring); 436 437 if (ab->hw_params->rxdma1_enable) 438 return 0; 439 440 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) 441 ath12k_dp_rxdma_mon_buf_ring_free(ab, 442 &dp->rx_mon_status_refill_ring[i]); 443 444 return 0; 445 } 446 447 static int ath12k_dp_rxdma_mon_ring_buf_setup(struct ath12k_base *ab, 448 struct dp_rxdma_mon_ring *rx_ring, 449 u32 ringtype) 450 { 451 int num_entries; 452 453 num_entries = rx_ring->refill_buf_ring.size / 454 ath12k_hal_srng_get_entrysize(ab, ringtype); 455 456 rx_ring->bufs_max = num_entries; 457 458 if (ringtype == HAL_RXDMA_MONITOR_STATUS) 459 ath12k_dp_mon_status_bufs_replenish(ab, rx_ring, 460 num_entries); 461 else 462 ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries); 463 464 return 0; 465 } 466 467 static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab, 468 struct dp_rxdma_ring *rx_ring) 469 { 470 LIST_HEAD(list); 471 472 rx_ring->bufs_max = rx_ring->refill_buf_ring.size / 473 ath12k_hal_srng_get_entrysize(ab, HAL_RXDMA_BUF); 474 475 ath12k_dp_rx_bufs_replenish(ab, rx_ring, &list, 0); 476 477 return 0; 478 } 479 480 static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab) 481 { 482 struct ath12k_dp *dp = &ab->dp; 483 struct dp_rxdma_mon_ring *mon_ring; 484 int ret, i; 485 486 ret = ath12k_dp_rxdma_ring_buf_setup(ab, &dp->rx_refill_buf_ring); 487 if (ret) { 488 ath12k_warn(ab, 489 "failed to setup HAL_RXDMA_BUF\n"); 490 return ret; 491 } 492 493 if (ab->hw_params->rxdma1_enable) { 494 ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab, 495 &dp->rxdma_mon_buf_ring, 496 HAL_RXDMA_MONITOR_BUF); 497 if (ret) 498 ath12k_warn(ab, 499 "failed to setup HAL_RXDMA_MONITOR_BUF\n"); 500 return ret; 501 } 502 503 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 504 mon_ring = &dp->rx_mon_status_refill_ring[i]; 505 ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab, mon_ring, 506 HAL_RXDMA_MONITOR_STATUS); 507 if (ret) { 508 ath12k_warn(ab, 509 "failed to setup HAL_RXDMA_MONITOR_STATUS\n"); 510 return ret; 511 } 512 } 513 514 return 0; 515 } 516 517 static void ath12k_dp_rx_pdev_srng_free(struct ath12k *ar) 518 { 519 struct ath12k_pdev_dp *dp = &ar->dp; 520 struct ath12k_base *ab = ar->ab; 521 int i; 522 523 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) 524 ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_dst_ring[i]); 525 } 526 527 void ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base *ab) 528 { 529 struct ath12k_dp *dp = &ab->dp; 530 int i; 531 532 for (i = 0; i < DP_REO_DST_RING_MAX; i++) 533 ath12k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]); 534 } 535 536 int ath12k_dp_rx_pdev_reo_setup(struct ath12k_base *ab) 537 { 538 struct ath12k_dp *dp = &ab->dp; 539 int ret; 540 int i; 541 542 for (i = 0; i < DP_REO_DST_RING_MAX; i++) { 543 ret = ath12k_dp_srng_setup(ab, &dp->reo_dst_ring[i], 544 HAL_REO_DST, i, 0, 545 DP_REO_DST_RING_SIZE); 546 if (ret) { 547 ath12k_warn(ab, "failed to setup reo_dst_ring\n"); 548 goto err_reo_cleanup; 549 } 550 } 551 552 return 0; 553 554 err_reo_cleanup: 555 ath12k_dp_rx_pdev_reo_cleanup(ab); 556 557 return ret; 558 } 559 560 static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar) 561 { 562 struct ath12k_pdev_dp *dp = &ar->dp; 563 struct ath12k_base *ab = ar->ab; 564 int i; 565 int ret; 566 u32 mac_id = dp->mac_id; 567 568 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 569 ret = ath12k_dp_srng_setup(ar->ab, 570 &dp->rxdma_mon_dst_ring[i], 571 HAL_RXDMA_MONITOR_DST, 572 0, mac_id + i, 573 DP_RXDMA_MONITOR_DST_RING_SIZE); 574 if (ret) { 575 ath12k_warn(ar->ab, 576 "failed to setup HAL_RXDMA_MONITOR_DST\n"); 577 return ret; 578 } 579 } 580 581 return 0; 582 } 583 584 void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab) 585 { 586 struct ath12k_dp *dp = &ab->dp; 587 struct ath12k_dp_rx_reo_cmd *cmd, *tmp; 588 struct ath12k_dp_rx_reo_cache_flush_elem *cmd_cache, *tmp_cache; 589 590 spin_lock_bh(&dp->reo_cmd_lock); 591 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 592 list_del(&cmd->list); 593 dma_unmap_single(ab->dev, cmd->data.qbuf.paddr_aligned, 594 cmd->data.qbuf.size, DMA_BIDIRECTIONAL); 595 kfree(cmd->data.qbuf.vaddr); 596 kfree(cmd); 597 } 598 599 list_for_each_entry_safe(cmd_cache, tmp_cache, 600 &dp->reo_cmd_cache_flush_list, list) { 601 list_del(&cmd_cache->list); 602 dp->reo_cmd_cache_flush_count--; 603 dma_unmap_single(ab->dev, cmd_cache->data.qbuf.paddr_aligned, 604 cmd_cache->data.qbuf.size, DMA_BIDIRECTIONAL); 605 kfree(cmd_cache->data.qbuf.vaddr); 606 kfree(cmd_cache); 607 } 608 spin_unlock_bh(&dp->reo_cmd_lock); 609 } 610 611 static void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx, 612 enum hal_reo_cmd_status status) 613 { 614 struct ath12k_dp_rx_tid *rx_tid = ctx; 615 616 if (status != HAL_REO_CMD_SUCCESS) 617 ath12k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n", 618 rx_tid->tid, status); 619 620 dma_unmap_single(dp->ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size, 621 DMA_BIDIRECTIONAL); 622 kfree(rx_tid->qbuf.vaddr); 623 rx_tid->qbuf.vaddr = NULL; 624 } 625 626 static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab, struct ath12k_dp_rx_tid *rx_tid, 627 enum hal_reo_cmd_type type, 628 struct ath12k_hal_reo_cmd *cmd, 629 void (*cb)(struct ath12k_dp *dp, void *ctx, 630 enum hal_reo_cmd_status status)) 631 { 632 struct ath12k_dp *dp = &ab->dp; 633 struct ath12k_dp_rx_reo_cmd *dp_cmd; 634 struct hal_srng *cmd_ring; 635 int cmd_num; 636 637 cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id]; 638 cmd_num = ath12k_hal_reo_cmd_send(ab, cmd_ring, type, cmd); 639 640 /* cmd_num should start from 1, during failure return the error code */ 641 if (cmd_num < 0) 642 return cmd_num; 643 644 /* reo cmd ring descriptors has cmd_num starting from 1 */ 645 if (cmd_num == 0) 646 return -EINVAL; 647 648 if (!cb) 649 return 0; 650 651 /* Can this be optimized so that we keep the pending command list only 652 * for tid delete command to free up the resource on the command status 653 * indication? 654 */ 655 dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC); 656 657 if (!dp_cmd) 658 return -ENOMEM; 659 660 memcpy(&dp_cmd->data, rx_tid, sizeof(*rx_tid)); 661 dp_cmd->cmd_num = cmd_num; 662 dp_cmd->handler = cb; 663 664 spin_lock_bh(&dp->reo_cmd_lock); 665 list_add_tail(&dp_cmd->list, &dp->reo_cmd_list); 666 spin_unlock_bh(&dp->reo_cmd_lock); 667 668 return 0; 669 } 670 671 static void ath12k_dp_reo_cache_flush(struct ath12k_base *ab, 672 struct ath12k_dp_rx_tid *rx_tid) 673 { 674 struct ath12k_hal_reo_cmd cmd = {0}; 675 unsigned long tot_desc_sz, desc_sz; 676 int ret; 677 678 tot_desc_sz = rx_tid->qbuf.size; 679 desc_sz = ath12k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID); 680 681 while (tot_desc_sz > desc_sz) { 682 tot_desc_sz -= desc_sz; 683 cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned + tot_desc_sz); 684 cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); 685 ret = ath12k_dp_reo_cmd_send(ab, rx_tid, 686 HAL_REO_CMD_FLUSH_CACHE, &cmd, 687 NULL); 688 if (ret) 689 ath12k_warn(ab, 690 "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n", 691 rx_tid->tid, ret); 692 } 693 694 memset(&cmd, 0, sizeof(cmd)); 695 cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned); 696 cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); 697 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 698 ret = ath12k_dp_reo_cmd_send(ab, rx_tid, 699 HAL_REO_CMD_FLUSH_CACHE, 700 &cmd, ath12k_dp_reo_cmd_free); 701 if (ret) { 702 ath12k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n", 703 rx_tid->tid, ret); 704 dma_unmap_single(ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size, 705 DMA_BIDIRECTIONAL); 706 kfree(rx_tid->qbuf.vaddr); 707 rx_tid->qbuf.vaddr = NULL; 708 } 709 } 710 711 static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx, 712 enum hal_reo_cmd_status status) 713 { 714 struct ath12k_base *ab = dp->ab; 715 struct ath12k_dp_rx_tid *rx_tid = ctx; 716 struct ath12k_dp_rx_reo_cache_flush_elem *elem, *tmp; 717 718 if (status == HAL_REO_CMD_DRAIN) { 719 goto free_desc; 720 } else if (status != HAL_REO_CMD_SUCCESS) { 721 /* Shouldn't happen! Cleanup in case of other failure? */ 722 ath12k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n", 723 rx_tid->tid, status); 724 return; 725 } 726 727 elem = kzalloc(sizeof(*elem), GFP_ATOMIC); 728 if (!elem) 729 goto free_desc; 730 731 elem->ts = jiffies; 732 memcpy(&elem->data, rx_tid, sizeof(*rx_tid)); 733 734 spin_lock_bh(&dp->reo_cmd_lock); 735 list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list); 736 dp->reo_cmd_cache_flush_count++; 737 738 /* Flush and invalidate aged REO desc from HW cache */ 739 list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list, 740 list) { 741 if (dp->reo_cmd_cache_flush_count > ATH12K_DP_RX_REO_DESC_FREE_THRES || 742 time_after(jiffies, elem->ts + 743 msecs_to_jiffies(ATH12K_DP_RX_REO_DESC_FREE_TIMEOUT_MS))) { 744 list_del(&elem->list); 745 dp->reo_cmd_cache_flush_count--; 746 747 /* Unlock the reo_cmd_lock before using ath12k_dp_reo_cmd_send() 748 * within ath12k_dp_reo_cache_flush. The reo_cmd_cache_flush_list 749 * is used in only two contexts, one is in this function called 750 * from napi and the other in ath12k_dp_free during core destroy. 751 * Before dp_free, the irqs would be disabled and would wait to 752 * synchronize. Hence there wouldn’t be any race against add or 753 * delete to this list. Hence unlock-lock is safe here. 754 */ 755 spin_unlock_bh(&dp->reo_cmd_lock); 756 757 ath12k_dp_reo_cache_flush(ab, &elem->data); 758 kfree(elem); 759 spin_lock_bh(&dp->reo_cmd_lock); 760 } 761 } 762 spin_unlock_bh(&dp->reo_cmd_lock); 763 764 return; 765 free_desc: 766 dma_unmap_single(ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size, 767 DMA_BIDIRECTIONAL); 768 kfree(rx_tid->qbuf.vaddr); 769 rx_tid->qbuf.vaddr = NULL; 770 } 771 772 static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u16 tid, 773 dma_addr_t paddr) 774 { 775 struct ath12k_reo_queue_ref *qref; 776 struct ath12k_dp *dp = &ab->dp; 777 bool ml_peer = false; 778 779 if (!ab->hw_params->reoq_lut_support) 780 return; 781 782 if (peer_id & ATH12K_PEER_ML_ID_VALID) { 783 peer_id &= ~ATH12K_PEER_ML_ID_VALID; 784 ml_peer = true; 785 } 786 787 if (ml_peer) 788 qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr + 789 (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); 790 else 791 qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr + 792 (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); 793 794 qref->info0 = u32_encode_bits(lower_32_bits(paddr), 795 BUFFER_ADDR_INFO0_ADDR); 796 qref->info1 = u32_encode_bits(upper_32_bits(paddr), 797 BUFFER_ADDR_INFO1_ADDR) | 798 u32_encode_bits(tid, DP_REO_QREF_NUM); 799 ath12k_hal_reo_shared_qaddr_cache_clear(ab); 800 } 801 802 static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u16 tid) 803 { 804 struct ath12k_reo_queue_ref *qref; 805 struct ath12k_dp *dp = &ab->dp; 806 bool ml_peer = false; 807 808 if (!ab->hw_params->reoq_lut_support) 809 return; 810 811 if (peer_id & ATH12K_PEER_ML_ID_VALID) { 812 peer_id &= ~ATH12K_PEER_ML_ID_VALID; 813 ml_peer = true; 814 } 815 816 if (ml_peer) 817 qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr + 818 (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); 819 else 820 qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr + 821 (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); 822 823 qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR); 824 qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) | 825 u32_encode_bits(tid, DP_REO_QREF_NUM); 826 } 827 828 void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar, 829 struct ath12k_peer *peer, u8 tid) 830 { 831 struct ath12k_hal_reo_cmd cmd = {0}; 832 struct ath12k_dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 833 int ret; 834 835 if (!rx_tid->active) 836 return; 837 838 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 839 cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned); 840 cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); 841 cmd.upd0 = HAL_REO_CMD_UPD0_VLD; 842 ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid, 843 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 844 ath12k_dp_rx_tid_del_func); 845 if (ret) { 846 ath12k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n", 847 tid, ret); 848 dma_unmap_single(ar->ab->dev, rx_tid->qbuf.paddr_aligned, 849 rx_tid->qbuf.size, DMA_BIDIRECTIONAL); 850 kfree(rx_tid->qbuf.vaddr); 851 rx_tid->qbuf.vaddr = NULL; 852 } 853 854 if (peer->mlo) 855 ath12k_peer_rx_tid_qref_reset(ar->ab, peer->ml_id, tid); 856 else 857 ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid); 858 859 rx_tid->active = false; 860 } 861 862 int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab, 863 struct ath12k_buffer_addr *buf_addr_info, 864 enum hal_wbm_rel_bm_act action) 865 { 866 struct hal_wbm_release_ring *desc; 867 struct ath12k_dp *dp = &ab->dp; 868 struct hal_srng *srng; 869 int ret = 0; 870 871 srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id]; 872 873 spin_lock_bh(&srng->lock); 874 875 ath12k_hal_srng_access_begin(ab, srng); 876 877 desc = ath12k_hal_srng_src_get_next_entry(ab, srng); 878 if (!desc) { 879 ret = -ENOBUFS; 880 goto exit; 881 } 882 883 ath12k_hal_rx_msdu_link_desc_set(ab, desc, buf_addr_info, action); 884 885 exit: 886 ath12k_hal_srng_access_end(ab, srng); 887 888 spin_unlock_bh(&srng->lock); 889 890 return ret; 891 } 892 893 static void ath12k_dp_rx_frags_cleanup(struct ath12k_dp_rx_tid *rx_tid, 894 bool rel_link_desc) 895 { 896 struct ath12k_buffer_addr *buf_addr_info; 897 struct ath12k_base *ab = rx_tid->ab; 898 899 lockdep_assert_held(&ab->base_lock); 900 901 if (rx_tid->dst_ring_desc) { 902 if (rel_link_desc) { 903 buf_addr_info = &rx_tid->dst_ring_desc->buf_addr_info; 904 ath12k_dp_rx_link_desc_return(ab, buf_addr_info, 905 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 906 } 907 kfree(rx_tid->dst_ring_desc); 908 rx_tid->dst_ring_desc = NULL; 909 } 910 911 rx_tid->cur_sn = 0; 912 rx_tid->last_frag_no = 0; 913 rx_tid->rx_frag_bitmap = 0; 914 __skb_queue_purge(&rx_tid->rx_frags); 915 } 916 917 void ath12k_dp_rx_peer_tid_cleanup(struct ath12k *ar, struct ath12k_peer *peer) 918 { 919 struct ath12k_dp_rx_tid *rx_tid; 920 int i; 921 922 lockdep_assert_held(&ar->ab->base_lock); 923 924 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 925 rx_tid = &peer->rx_tid[i]; 926 927 ath12k_dp_rx_peer_tid_delete(ar, peer, i); 928 ath12k_dp_rx_frags_cleanup(rx_tid, true); 929 930 spin_unlock_bh(&ar->ab->base_lock); 931 timer_delete_sync(&rx_tid->frag_timer); 932 spin_lock_bh(&ar->ab->base_lock); 933 } 934 } 935 936 static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar, 937 struct ath12k_peer *peer, 938 struct ath12k_dp_rx_tid *rx_tid, 939 u32 ba_win_sz, u16 ssn, 940 bool update_ssn) 941 { 942 struct ath12k_hal_reo_cmd cmd = {0}; 943 int ret; 944 945 cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned); 946 cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); 947 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 948 cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE; 949 cmd.ba_window_size = ba_win_sz; 950 951 if (update_ssn) { 952 cmd.upd0 |= HAL_REO_CMD_UPD0_SSN; 953 cmd.upd2 = u32_encode_bits(ssn, HAL_REO_CMD_UPD2_SSN); 954 } 955 956 ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid, 957 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 958 NULL); 959 if (ret) { 960 ath12k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n", 961 rx_tid->tid, ret); 962 return ret; 963 } 964 965 rx_tid->ba_win_sz = ba_win_sz; 966 967 return 0; 968 } 969 970 static int ath12k_dp_rx_assign_reoq(struct ath12k_base *ab, 971 struct ath12k_sta *ahsta, 972 struct ath12k_dp_rx_tid *rx_tid, 973 u16 ssn, enum hal_pn_type pn_type) 974 { 975 u32 ba_win_sz = rx_tid->ba_win_sz; 976 struct ath12k_reoq_buf *buf; 977 void *vaddr, *vaddr_aligned; 978 dma_addr_t paddr_aligned; 979 u8 tid = rx_tid->tid; 980 u32 hw_desc_sz; 981 int ret; 982 983 buf = &ahsta->reoq_bufs[tid]; 984 if (!buf->vaddr) { 985 /* TODO: Optimize the memory allocation for qos tid based on 986 * the actual BA window size in REO tid update path. 987 */ 988 if (tid == HAL_DESC_REO_NON_QOS_TID) 989 hw_desc_sz = ath12k_hal_reo_qdesc_size(ba_win_sz, tid); 990 else 991 hw_desc_sz = ath12k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid); 992 993 vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC); 994 if (!vaddr) 995 return -ENOMEM; 996 997 vaddr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN); 998 999 ath12k_hal_reo_qdesc_setup(vaddr_aligned, tid, ba_win_sz, 1000 ssn, pn_type); 1001 1002 paddr_aligned = dma_map_single(ab->dev, vaddr_aligned, hw_desc_sz, 1003 DMA_BIDIRECTIONAL); 1004 ret = dma_mapping_error(ab->dev, paddr_aligned); 1005 if (ret) { 1006 kfree(vaddr); 1007 return ret; 1008 } 1009 1010 buf->vaddr = vaddr; 1011 buf->paddr_aligned = paddr_aligned; 1012 buf->size = hw_desc_sz; 1013 } 1014 1015 rx_tid->qbuf = *buf; 1016 rx_tid->active = true; 1017 1018 return 0; 1019 } 1020 1021 int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id, 1022 u8 tid, u32 ba_win_sz, u16 ssn, 1023 enum hal_pn_type pn_type) 1024 { 1025 struct ath12k_base *ab = ar->ab; 1026 struct ath12k_dp *dp = &ab->dp; 1027 struct ath12k_peer *peer; 1028 struct ath12k_sta *ahsta; 1029 struct ath12k_dp_rx_tid *rx_tid; 1030 dma_addr_t paddr_aligned; 1031 int ret; 1032 1033 spin_lock_bh(&ab->base_lock); 1034 1035 peer = ath12k_peer_find(ab, vdev_id, peer_mac); 1036 if (!peer) { 1037 spin_unlock_bh(&ab->base_lock); 1038 ath12k_warn(ab, "failed to find the peer to set up rx tid\n"); 1039 return -ENOENT; 1040 } 1041 1042 if (ab->hw_params->dp_primary_link_only && 1043 !peer->primary_link) { 1044 spin_unlock_bh(&ab->base_lock); 1045 return 0; 1046 } 1047 1048 if (ab->hw_params->reoq_lut_support && 1049 (!dp->reoq_lut.vaddr || !dp->ml_reoq_lut.vaddr)) { 1050 spin_unlock_bh(&ab->base_lock); 1051 ath12k_warn(ab, "reo qref table is not setup\n"); 1052 return -EINVAL; 1053 } 1054 1055 if (peer->peer_id > DP_MAX_PEER_ID || tid > IEEE80211_NUM_TIDS) { 1056 ath12k_warn(ab, "peer id of peer %d or tid %d doesn't allow reoq setup\n", 1057 peer->peer_id, tid); 1058 spin_unlock_bh(&ab->base_lock); 1059 return -EINVAL; 1060 } 1061 1062 rx_tid = &peer->rx_tid[tid]; 1063 paddr_aligned = rx_tid->qbuf.paddr_aligned; 1064 /* Update the tid queue if it is already setup */ 1065 if (rx_tid->active) { 1066 ret = ath12k_peer_rx_tid_reo_update(ar, peer, rx_tid, 1067 ba_win_sz, ssn, true); 1068 spin_unlock_bh(&ab->base_lock); 1069 if (ret) { 1070 ath12k_warn(ab, "failed to update reo for rx tid %d\n", tid); 1071 return ret; 1072 } 1073 1074 if (!ab->hw_params->reoq_lut_support) { 1075 ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, 1076 peer_mac, 1077 paddr_aligned, tid, 1078 1, ba_win_sz); 1079 if (ret) { 1080 ath12k_warn(ab, "failed to setup peer rx reorder queuefor tid %d: %d\n", 1081 tid, ret); 1082 return ret; 1083 } 1084 } 1085 1086 return 0; 1087 } 1088 1089 rx_tid->tid = tid; 1090 1091 rx_tid->ba_win_sz = ba_win_sz; 1092 1093 ahsta = ath12k_sta_to_ahsta(peer->sta); 1094 ret = ath12k_dp_rx_assign_reoq(ab, ahsta, rx_tid, ssn, pn_type); 1095 if (ret) { 1096 spin_unlock_bh(&ab->base_lock); 1097 ath12k_warn(ab, "failed to assign reoq buf for rx tid %u\n", tid); 1098 return ret; 1099 } 1100 1101 if (ab->hw_params->reoq_lut_support) { 1102 /* Update the REO queue LUT at the corresponding peer id 1103 * and tid with qaddr. 1104 */ 1105 if (peer->mlo) 1106 ath12k_peer_rx_tid_qref_setup(ab, peer->ml_id, tid, 1107 paddr_aligned); 1108 else 1109 ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid, 1110 paddr_aligned); 1111 1112 spin_unlock_bh(&ab->base_lock); 1113 } else { 1114 spin_unlock_bh(&ab->base_lock); 1115 ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, 1116 paddr_aligned, tid, 1, 1117 ba_win_sz); 1118 } 1119 1120 return ret; 1121 } 1122 1123 int ath12k_dp_rx_ampdu_start(struct ath12k *ar, 1124 struct ieee80211_ampdu_params *params, 1125 u8 link_id) 1126 { 1127 struct ath12k_base *ab = ar->ab; 1128 struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta); 1129 struct ath12k_link_sta *arsta; 1130 int vdev_id; 1131 int ret; 1132 1133 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 1134 1135 arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy, 1136 ahsta->link[link_id]); 1137 if (!arsta) 1138 return -ENOLINK; 1139 1140 vdev_id = arsta->arvif->vdev_id; 1141 1142 ret = ath12k_dp_rx_peer_tid_setup(ar, arsta->addr, vdev_id, 1143 params->tid, params->buf_size, 1144 params->ssn, arsta->ahsta->pn_type); 1145 if (ret) 1146 ath12k_warn(ab, "failed to setup rx tid %d\n", ret); 1147 1148 return ret; 1149 } 1150 1151 int ath12k_dp_rx_ampdu_stop(struct ath12k *ar, 1152 struct ieee80211_ampdu_params *params, 1153 u8 link_id) 1154 { 1155 struct ath12k_base *ab = ar->ab; 1156 struct ath12k_peer *peer; 1157 struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta); 1158 struct ath12k_link_sta *arsta; 1159 int vdev_id; 1160 bool active; 1161 int ret; 1162 1163 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 1164 1165 arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy, 1166 ahsta->link[link_id]); 1167 if (!arsta) 1168 return -ENOLINK; 1169 1170 vdev_id = arsta->arvif->vdev_id; 1171 1172 spin_lock_bh(&ab->base_lock); 1173 1174 peer = ath12k_peer_find(ab, vdev_id, arsta->addr); 1175 if (!peer) { 1176 spin_unlock_bh(&ab->base_lock); 1177 ath12k_warn(ab, "failed to find the peer to stop rx aggregation\n"); 1178 return -ENOENT; 1179 } 1180 1181 active = peer->rx_tid[params->tid].active; 1182 1183 if (!active) { 1184 spin_unlock_bh(&ab->base_lock); 1185 return 0; 1186 } 1187 1188 ret = ath12k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false); 1189 spin_unlock_bh(&ab->base_lock); 1190 if (ret) { 1191 ath12k_warn(ab, "failed to update reo for rx tid %d: %d\n", 1192 params->tid, ret); 1193 return ret; 1194 } 1195 1196 return ret; 1197 } 1198 1199 int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif, 1200 const u8 *peer_addr, 1201 enum set_key_cmd key_cmd, 1202 struct ieee80211_key_conf *key) 1203 { 1204 struct ath12k *ar = arvif->ar; 1205 struct ath12k_base *ab = ar->ab; 1206 struct ath12k_hal_reo_cmd cmd = {0}; 1207 struct ath12k_peer *peer; 1208 struct ath12k_dp_rx_tid *rx_tid; 1209 u8 tid; 1210 int ret = 0; 1211 1212 /* NOTE: Enable PN/TSC replay check offload only for unicast frames. 1213 * We use mac80211 PN/TSC replay check functionality for bcast/mcast 1214 * for now. 1215 */ 1216 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) 1217 return 0; 1218 1219 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 1220 cmd.upd0 = HAL_REO_CMD_UPD0_PN | 1221 HAL_REO_CMD_UPD0_PN_SIZE | 1222 HAL_REO_CMD_UPD0_PN_VALID | 1223 HAL_REO_CMD_UPD0_PN_CHECK | 1224 HAL_REO_CMD_UPD0_SVLD; 1225 1226 switch (key->cipher) { 1227 case WLAN_CIPHER_SUITE_TKIP: 1228 case WLAN_CIPHER_SUITE_CCMP: 1229 case WLAN_CIPHER_SUITE_CCMP_256: 1230 case WLAN_CIPHER_SUITE_GCMP: 1231 case WLAN_CIPHER_SUITE_GCMP_256: 1232 if (key_cmd == SET_KEY) { 1233 cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK; 1234 cmd.pn_size = 48; 1235 } 1236 break; 1237 default: 1238 break; 1239 } 1240 1241 spin_lock_bh(&ab->base_lock); 1242 1243 peer = ath12k_peer_find(ab, arvif->vdev_id, peer_addr); 1244 if (!peer) { 1245 spin_unlock_bh(&ab->base_lock); 1246 ath12k_warn(ab, "failed to find the peer %pM to configure pn replay detection\n", 1247 peer_addr); 1248 return -ENOENT; 1249 } 1250 1251 for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) { 1252 rx_tid = &peer->rx_tid[tid]; 1253 if (!rx_tid->active) 1254 continue; 1255 cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned); 1256 cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); 1257 ret = ath12k_dp_reo_cmd_send(ab, rx_tid, 1258 HAL_REO_CMD_UPDATE_RX_QUEUE, 1259 &cmd, NULL); 1260 if (ret) { 1261 ath12k_warn(ab, "failed to configure rx tid %d queue of peer %pM for pn replay detection %d\n", 1262 tid, peer_addr, ret); 1263 break; 1264 } 1265 } 1266 1267 spin_unlock_bh(&ab->base_lock); 1268 1269 return ret; 1270 } 1271 1272 static int ath12k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats, 1273 u16 peer_id) 1274 { 1275 int i; 1276 1277 for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) { 1278 if (ppdu_stats->user_stats[i].is_valid_peer_id) { 1279 if (peer_id == ppdu_stats->user_stats[i].peer_id) 1280 return i; 1281 } else { 1282 return i; 1283 } 1284 } 1285 1286 return -EINVAL; 1287 } 1288 1289 static int ath12k_htt_tlv_ppdu_stats_parse(struct ath12k_base *ab, 1290 u16 tag, u16 len, const void *ptr, 1291 void *data) 1292 { 1293 const struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *ba_status; 1294 const struct htt_ppdu_stats_usr_cmpltn_cmn *cmplt_cmn; 1295 const struct htt_ppdu_stats_user_rate *user_rate; 1296 struct htt_ppdu_stats_info *ppdu_info; 1297 struct htt_ppdu_user_stats *user_stats; 1298 int cur_user; 1299 u16 peer_id; 1300 1301 ppdu_info = data; 1302 1303 switch (tag) { 1304 case HTT_PPDU_STATS_TAG_COMMON: 1305 if (len < sizeof(struct htt_ppdu_stats_common)) { 1306 ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1307 len, tag); 1308 return -EINVAL; 1309 } 1310 memcpy(&ppdu_info->ppdu_stats.common, ptr, 1311 sizeof(struct htt_ppdu_stats_common)); 1312 break; 1313 case HTT_PPDU_STATS_TAG_USR_RATE: 1314 if (len < sizeof(struct htt_ppdu_stats_user_rate)) { 1315 ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1316 len, tag); 1317 return -EINVAL; 1318 } 1319 user_rate = ptr; 1320 peer_id = le16_to_cpu(user_rate->sw_peer_id); 1321 cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1322 peer_id); 1323 if (cur_user < 0) 1324 return -EINVAL; 1325 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1326 user_stats->peer_id = peer_id; 1327 user_stats->is_valid_peer_id = true; 1328 memcpy(&user_stats->rate, ptr, 1329 sizeof(struct htt_ppdu_stats_user_rate)); 1330 user_stats->tlv_flags |= BIT(tag); 1331 break; 1332 case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON: 1333 if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) { 1334 ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1335 len, tag); 1336 return -EINVAL; 1337 } 1338 1339 cmplt_cmn = ptr; 1340 peer_id = le16_to_cpu(cmplt_cmn->sw_peer_id); 1341 cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1342 peer_id); 1343 if (cur_user < 0) 1344 return -EINVAL; 1345 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1346 user_stats->peer_id = peer_id; 1347 user_stats->is_valid_peer_id = true; 1348 memcpy(&user_stats->cmpltn_cmn, ptr, 1349 sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)); 1350 user_stats->tlv_flags |= BIT(tag); 1351 break; 1352 case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS: 1353 if (len < 1354 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) { 1355 ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1356 len, tag); 1357 return -EINVAL; 1358 } 1359 1360 ba_status = ptr; 1361 peer_id = le16_to_cpu(ba_status->sw_peer_id); 1362 cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1363 peer_id); 1364 if (cur_user < 0) 1365 return -EINVAL; 1366 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1367 user_stats->peer_id = peer_id; 1368 user_stats->is_valid_peer_id = true; 1369 memcpy(&user_stats->ack_ba, ptr, 1370 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)); 1371 user_stats->tlv_flags |= BIT(tag); 1372 break; 1373 } 1374 return 0; 1375 } 1376 1377 int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len, 1378 int (*iter)(struct ath12k_base *ar, u16 tag, u16 len, 1379 const void *ptr, void *data), 1380 void *data) 1381 { 1382 const struct htt_tlv *tlv; 1383 const void *begin = ptr; 1384 u16 tlv_tag, tlv_len; 1385 int ret = -EINVAL; 1386 1387 while (len > 0) { 1388 if (len < sizeof(*tlv)) { 1389 ath12k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", 1390 ptr - begin, len, sizeof(*tlv)); 1391 return -EINVAL; 1392 } 1393 tlv = (struct htt_tlv *)ptr; 1394 tlv_tag = le32_get_bits(tlv->header, HTT_TLV_TAG); 1395 tlv_len = le32_get_bits(tlv->header, HTT_TLV_LEN); 1396 ptr += sizeof(*tlv); 1397 len -= sizeof(*tlv); 1398 1399 if (tlv_len > len) { 1400 ath12k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n", 1401 tlv_tag, ptr - begin, len, tlv_len); 1402 return -EINVAL; 1403 } 1404 ret = iter(ab, tlv_tag, tlv_len, ptr, data); 1405 if (ret == -ENOMEM) 1406 return ret; 1407 1408 ptr += tlv_len; 1409 len -= tlv_len; 1410 } 1411 return 0; 1412 } 1413 1414 static void 1415 ath12k_update_per_peer_tx_stats(struct ath12k *ar, 1416 struct htt_ppdu_stats *ppdu_stats, u8 user) 1417 { 1418 struct ath12k_base *ab = ar->ab; 1419 struct ath12k_peer *peer; 1420 struct ieee80211_sta *sta; 1421 struct ath12k_sta *ahsta; 1422 struct ath12k_link_sta *arsta; 1423 struct htt_ppdu_stats_user_rate *user_rate; 1424 struct ath12k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats; 1425 struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user]; 1426 struct htt_ppdu_stats_common *common = &ppdu_stats->common; 1427 int ret; 1428 u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0; 1429 u32 v, succ_bytes = 0; 1430 u16 tones, rate = 0, succ_pkts = 0; 1431 u32 tx_duration = 0; 1432 u8 tid = HTT_PPDU_STATS_NON_QOS_TID; 1433 bool is_ampdu = false; 1434 1435 if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE))) 1436 return; 1437 1438 if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) 1439 is_ampdu = 1440 HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags); 1441 1442 if (usr_stats->tlv_flags & 1443 BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) { 1444 succ_bytes = le32_to_cpu(usr_stats->ack_ba.success_bytes); 1445 succ_pkts = le32_get_bits(usr_stats->ack_ba.info, 1446 HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M); 1447 tid = le32_get_bits(usr_stats->ack_ba.info, 1448 HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM); 1449 } 1450 1451 if (common->fes_duration_us) 1452 tx_duration = le32_to_cpu(common->fes_duration_us); 1453 1454 user_rate = &usr_stats->rate; 1455 flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags); 1456 bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2; 1457 nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1; 1458 mcs = HTT_USR_RATE_MCS(user_rate->rate_flags); 1459 sgi = HTT_USR_RATE_GI(user_rate->rate_flags); 1460 dcm = HTT_USR_RATE_DCM(user_rate->rate_flags); 1461 1462 /* Note: If host configured fixed rates and in some other special 1463 * cases, the broadcast/management frames are sent in different rates. 1464 * Firmware rate's control to be skipped for this? 1465 */ 1466 1467 if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH12K_HE_MCS_MAX) { 1468 ath12k_warn(ab, "Invalid HE mcs %d peer stats", mcs); 1469 return; 1470 } 1471 1472 if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH12K_VHT_MCS_MAX) { 1473 ath12k_warn(ab, "Invalid VHT mcs %d peer stats", mcs); 1474 return; 1475 } 1476 1477 if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH12K_HT_MCS_MAX || nss < 1)) { 1478 ath12k_warn(ab, "Invalid HT mcs %d nss %d peer stats", 1479 mcs, nss); 1480 return; 1481 } 1482 1483 if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) { 1484 ret = ath12k_mac_hw_ratecode_to_legacy_rate(mcs, 1485 flags, 1486 &rate_idx, 1487 &rate); 1488 if (ret < 0) 1489 return; 1490 } 1491 1492 rcu_read_lock(); 1493 spin_lock_bh(&ab->base_lock); 1494 peer = ath12k_peer_find_by_id(ab, usr_stats->peer_id); 1495 1496 if (!peer || !peer->sta) { 1497 spin_unlock_bh(&ab->base_lock); 1498 rcu_read_unlock(); 1499 return; 1500 } 1501 1502 sta = peer->sta; 1503 ahsta = ath12k_sta_to_ahsta(sta); 1504 arsta = &ahsta->deflink; 1505 1506 memset(&arsta->txrate, 0, sizeof(arsta->txrate)); 1507 1508 switch (flags) { 1509 case WMI_RATE_PREAMBLE_OFDM: 1510 arsta->txrate.legacy = rate; 1511 break; 1512 case WMI_RATE_PREAMBLE_CCK: 1513 arsta->txrate.legacy = rate; 1514 break; 1515 case WMI_RATE_PREAMBLE_HT: 1516 arsta->txrate.mcs = mcs + 8 * (nss - 1); 1517 arsta->txrate.flags = RATE_INFO_FLAGS_MCS; 1518 if (sgi) 1519 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1520 break; 1521 case WMI_RATE_PREAMBLE_VHT: 1522 arsta->txrate.mcs = mcs; 1523 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; 1524 if (sgi) 1525 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1526 break; 1527 case WMI_RATE_PREAMBLE_HE: 1528 arsta->txrate.mcs = mcs; 1529 arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS; 1530 arsta->txrate.he_dcm = dcm; 1531 arsta->txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi); 1532 tones = le16_to_cpu(user_rate->ru_end) - 1533 le16_to_cpu(user_rate->ru_start) + 1; 1534 v = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(tones); 1535 arsta->txrate.he_ru_alloc = v; 1536 break; 1537 } 1538 1539 arsta->txrate.nss = nss; 1540 arsta->txrate.bw = ath12k_mac_bw_to_mac80211_bw(bw); 1541 arsta->tx_duration += tx_duration; 1542 memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info)); 1543 1544 /* PPDU stats reported for mgmt packet doesn't have valid tx bytes. 1545 * So skip peer stats update for mgmt packets. 1546 */ 1547 if (tid < HTT_PPDU_STATS_NON_QOS_TID) { 1548 memset(peer_stats, 0, sizeof(*peer_stats)); 1549 peer_stats->succ_pkts = succ_pkts; 1550 peer_stats->succ_bytes = succ_bytes; 1551 peer_stats->is_ampdu = is_ampdu; 1552 peer_stats->duration = tx_duration; 1553 peer_stats->ba_fails = 1554 HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) + 1555 HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags); 1556 } 1557 1558 spin_unlock_bh(&ab->base_lock); 1559 rcu_read_unlock(); 1560 } 1561 1562 static void ath12k_htt_update_ppdu_stats(struct ath12k *ar, 1563 struct htt_ppdu_stats *ppdu_stats) 1564 { 1565 u8 user; 1566 1567 for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++) 1568 ath12k_update_per_peer_tx_stats(ar, ppdu_stats, user); 1569 } 1570 1571 static 1572 struct htt_ppdu_stats_info *ath12k_dp_htt_get_ppdu_desc(struct ath12k *ar, 1573 u32 ppdu_id) 1574 { 1575 struct htt_ppdu_stats_info *ppdu_info; 1576 1577 lockdep_assert_held(&ar->data_lock); 1578 if (!list_empty(&ar->ppdu_stats_info)) { 1579 list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) { 1580 if (ppdu_info->ppdu_id == ppdu_id) 1581 return ppdu_info; 1582 } 1583 1584 if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) { 1585 ppdu_info = list_first_entry(&ar->ppdu_stats_info, 1586 typeof(*ppdu_info), list); 1587 list_del(&ppdu_info->list); 1588 ar->ppdu_stat_list_depth--; 1589 ath12k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats); 1590 kfree(ppdu_info); 1591 } 1592 } 1593 1594 ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC); 1595 if (!ppdu_info) 1596 return NULL; 1597 1598 list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info); 1599 ar->ppdu_stat_list_depth++; 1600 1601 return ppdu_info; 1602 } 1603 1604 static void ath12k_copy_to_delay_stats(struct ath12k_peer *peer, 1605 struct htt_ppdu_user_stats *usr_stats) 1606 { 1607 peer->ppdu_stats_delayba.sw_peer_id = le16_to_cpu(usr_stats->rate.sw_peer_id); 1608 peer->ppdu_stats_delayba.info0 = le32_to_cpu(usr_stats->rate.info0); 1609 peer->ppdu_stats_delayba.ru_end = le16_to_cpu(usr_stats->rate.ru_end); 1610 peer->ppdu_stats_delayba.ru_start = le16_to_cpu(usr_stats->rate.ru_start); 1611 peer->ppdu_stats_delayba.info1 = le32_to_cpu(usr_stats->rate.info1); 1612 peer->ppdu_stats_delayba.rate_flags = le32_to_cpu(usr_stats->rate.rate_flags); 1613 peer->ppdu_stats_delayba.resp_rate_flags = 1614 le32_to_cpu(usr_stats->rate.resp_rate_flags); 1615 1616 peer->delayba_flag = true; 1617 } 1618 1619 static void ath12k_copy_to_bar(struct ath12k_peer *peer, 1620 struct htt_ppdu_user_stats *usr_stats) 1621 { 1622 usr_stats->rate.sw_peer_id = cpu_to_le16(peer->ppdu_stats_delayba.sw_peer_id); 1623 usr_stats->rate.info0 = cpu_to_le32(peer->ppdu_stats_delayba.info0); 1624 usr_stats->rate.ru_end = cpu_to_le16(peer->ppdu_stats_delayba.ru_end); 1625 usr_stats->rate.ru_start = cpu_to_le16(peer->ppdu_stats_delayba.ru_start); 1626 usr_stats->rate.info1 = cpu_to_le32(peer->ppdu_stats_delayba.info1); 1627 usr_stats->rate.rate_flags = cpu_to_le32(peer->ppdu_stats_delayba.rate_flags); 1628 usr_stats->rate.resp_rate_flags = 1629 cpu_to_le32(peer->ppdu_stats_delayba.resp_rate_flags); 1630 1631 peer->delayba_flag = false; 1632 } 1633 1634 static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab, 1635 struct sk_buff *skb) 1636 { 1637 struct ath12k_htt_ppdu_stats_msg *msg; 1638 struct htt_ppdu_stats_info *ppdu_info; 1639 struct ath12k_peer *peer = NULL; 1640 struct htt_ppdu_user_stats *usr_stats = NULL; 1641 u32 peer_id = 0; 1642 struct ath12k *ar; 1643 int ret, i; 1644 u8 pdev_id; 1645 u32 ppdu_id, len; 1646 1647 msg = (struct ath12k_htt_ppdu_stats_msg *)skb->data; 1648 len = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE); 1649 if (len > (skb->len - struct_size(msg, data, 0))) { 1650 ath12k_warn(ab, 1651 "HTT PPDU STATS event has unexpected payload size %u, should be smaller than %u\n", 1652 len, skb->len); 1653 return -EINVAL; 1654 } 1655 1656 pdev_id = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PDEV_ID); 1657 ppdu_id = le32_to_cpu(msg->ppdu_id); 1658 1659 rcu_read_lock(); 1660 ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id); 1661 if (!ar) { 1662 ret = -EINVAL; 1663 goto exit; 1664 } 1665 1666 spin_lock_bh(&ar->data_lock); 1667 ppdu_info = ath12k_dp_htt_get_ppdu_desc(ar, ppdu_id); 1668 if (!ppdu_info) { 1669 spin_unlock_bh(&ar->data_lock); 1670 ret = -EINVAL; 1671 goto exit; 1672 } 1673 1674 ppdu_info->ppdu_id = ppdu_id; 1675 ret = ath12k_dp_htt_tlv_iter(ab, msg->data, len, 1676 ath12k_htt_tlv_ppdu_stats_parse, 1677 (void *)ppdu_info); 1678 if (ret) { 1679 spin_unlock_bh(&ar->data_lock); 1680 ath12k_warn(ab, "Failed to parse tlv %d\n", ret); 1681 goto exit; 1682 } 1683 1684 if (ppdu_info->ppdu_stats.common.num_users >= HTT_PPDU_STATS_MAX_USERS) { 1685 spin_unlock_bh(&ar->data_lock); 1686 ath12k_warn(ab, 1687 "HTT PPDU STATS event has unexpected num_users %u, should be smaller than %u\n", 1688 ppdu_info->ppdu_stats.common.num_users, 1689 HTT_PPDU_STATS_MAX_USERS); 1690 ret = -EINVAL; 1691 goto exit; 1692 } 1693 1694 /* back up data rate tlv for all peers */ 1695 if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_DATA && 1696 (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON)) && 1697 ppdu_info->delay_ba) { 1698 for (i = 0; i < ppdu_info->ppdu_stats.common.num_users; i++) { 1699 peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id; 1700 spin_lock_bh(&ab->base_lock); 1701 peer = ath12k_peer_find_by_id(ab, peer_id); 1702 if (!peer) { 1703 spin_unlock_bh(&ab->base_lock); 1704 continue; 1705 } 1706 1707 usr_stats = &ppdu_info->ppdu_stats.user_stats[i]; 1708 if (usr_stats->delay_ba) 1709 ath12k_copy_to_delay_stats(peer, usr_stats); 1710 spin_unlock_bh(&ab->base_lock); 1711 } 1712 } 1713 1714 /* restore all peers' data rate tlv to mu-bar tlv */ 1715 if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_BAR && 1716 (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON))) { 1717 for (i = 0; i < ppdu_info->bar_num_users; i++) { 1718 peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id; 1719 spin_lock_bh(&ab->base_lock); 1720 peer = ath12k_peer_find_by_id(ab, peer_id); 1721 if (!peer) { 1722 spin_unlock_bh(&ab->base_lock); 1723 continue; 1724 } 1725 1726 usr_stats = &ppdu_info->ppdu_stats.user_stats[i]; 1727 if (peer->delayba_flag) 1728 ath12k_copy_to_bar(peer, usr_stats); 1729 spin_unlock_bh(&ab->base_lock); 1730 } 1731 } 1732 1733 spin_unlock_bh(&ar->data_lock); 1734 1735 exit: 1736 rcu_read_unlock(); 1737 1738 return ret; 1739 } 1740 1741 static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab, 1742 struct sk_buff *skb) 1743 { 1744 struct ath12k_htt_mlo_offset_msg *msg; 1745 struct ath12k_pdev *pdev; 1746 struct ath12k *ar; 1747 u8 pdev_id; 1748 1749 msg = (struct ath12k_htt_mlo_offset_msg *)skb->data; 1750 pdev_id = u32_get_bits(__le32_to_cpu(msg->info), 1751 HTT_T2H_MLO_OFFSET_INFO_PDEV_ID); 1752 1753 rcu_read_lock(); 1754 ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id); 1755 if (!ar) { 1756 /* It is possible that the ar is not yet active (started). 1757 * The above function will only look for the active pdev 1758 * and hence %NULL return is possible. Just silently 1759 * discard this message 1760 */ 1761 goto exit; 1762 } 1763 1764 spin_lock_bh(&ar->data_lock); 1765 pdev = ar->pdev; 1766 1767 pdev->timestamp.info = __le32_to_cpu(msg->info); 1768 pdev->timestamp.sync_timestamp_lo_us = __le32_to_cpu(msg->sync_timestamp_lo_us); 1769 pdev->timestamp.sync_timestamp_hi_us = __le32_to_cpu(msg->sync_timestamp_hi_us); 1770 pdev->timestamp.mlo_offset_lo = __le32_to_cpu(msg->mlo_offset_lo); 1771 pdev->timestamp.mlo_offset_hi = __le32_to_cpu(msg->mlo_offset_hi); 1772 pdev->timestamp.mlo_offset_clks = __le32_to_cpu(msg->mlo_offset_clks); 1773 pdev->timestamp.mlo_comp_clks = __le32_to_cpu(msg->mlo_comp_clks); 1774 pdev->timestamp.mlo_comp_timer = __le32_to_cpu(msg->mlo_comp_timer); 1775 1776 spin_unlock_bh(&ar->data_lock); 1777 exit: 1778 rcu_read_unlock(); 1779 } 1780 1781 void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab, 1782 struct sk_buff *skb) 1783 { 1784 struct ath12k_dp *dp = &ab->dp; 1785 struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data; 1786 enum htt_t2h_msg_type type; 1787 u16 peer_id; 1788 u8 vdev_id; 1789 u8 mac_addr[ETH_ALEN]; 1790 u16 peer_mac_h16; 1791 u16 ast_hash = 0; 1792 u16 hw_peer_id; 1793 1794 type = le32_get_bits(resp->version_msg.version, HTT_T2H_MSG_TYPE); 1795 1796 ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type); 1797 1798 switch (type) { 1799 case HTT_T2H_MSG_TYPE_VERSION_CONF: 1800 dp->htt_tgt_ver_major = le32_get_bits(resp->version_msg.version, 1801 HTT_T2H_VERSION_CONF_MAJOR); 1802 dp->htt_tgt_ver_minor = le32_get_bits(resp->version_msg.version, 1803 HTT_T2H_VERSION_CONF_MINOR); 1804 complete(&dp->htt_tgt_version_received); 1805 break; 1806 /* TODO: remove unused peer map versions after testing */ 1807 case HTT_T2H_MSG_TYPE_PEER_MAP: 1808 vdev_id = le32_get_bits(resp->peer_map_ev.info, 1809 HTT_T2H_PEER_MAP_INFO_VDEV_ID); 1810 peer_id = le32_get_bits(resp->peer_map_ev.info, 1811 HTT_T2H_PEER_MAP_INFO_PEER_ID); 1812 peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1, 1813 HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16); 1814 ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32), 1815 peer_mac_h16, mac_addr); 1816 ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0); 1817 break; 1818 case HTT_T2H_MSG_TYPE_PEER_MAP2: 1819 vdev_id = le32_get_bits(resp->peer_map_ev.info, 1820 HTT_T2H_PEER_MAP_INFO_VDEV_ID); 1821 peer_id = le32_get_bits(resp->peer_map_ev.info, 1822 HTT_T2H_PEER_MAP_INFO_PEER_ID); 1823 peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1, 1824 HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16); 1825 ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32), 1826 peer_mac_h16, mac_addr); 1827 ast_hash = le32_get_bits(resp->peer_map_ev.info2, 1828 HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL); 1829 hw_peer_id = le32_get_bits(resp->peer_map_ev.info1, 1830 HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID); 1831 ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash, 1832 hw_peer_id); 1833 break; 1834 case HTT_T2H_MSG_TYPE_PEER_MAP3: 1835 vdev_id = le32_get_bits(resp->peer_map_ev.info, 1836 HTT_T2H_PEER_MAP_INFO_VDEV_ID); 1837 peer_id = le32_get_bits(resp->peer_map_ev.info, 1838 HTT_T2H_PEER_MAP_INFO_PEER_ID); 1839 peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1, 1840 HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16); 1841 ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32), 1842 peer_mac_h16, mac_addr); 1843 ast_hash = le32_get_bits(resp->peer_map_ev.info2, 1844 HTT_T2H_PEER_MAP3_INFO2_AST_HASH_VAL); 1845 hw_peer_id = le32_get_bits(resp->peer_map_ev.info2, 1846 HTT_T2H_PEER_MAP3_INFO2_HW_PEER_ID); 1847 ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash, 1848 hw_peer_id); 1849 break; 1850 case HTT_T2H_MSG_TYPE_PEER_UNMAP: 1851 case HTT_T2H_MSG_TYPE_PEER_UNMAP2: 1852 peer_id = le32_get_bits(resp->peer_unmap_ev.info, 1853 HTT_T2H_PEER_UNMAP_INFO_PEER_ID); 1854 ath12k_peer_unmap_event(ab, peer_id); 1855 break; 1856 case HTT_T2H_MSG_TYPE_PPDU_STATS_IND: 1857 ath12k_htt_pull_ppdu_stats(ab, skb); 1858 break; 1859 case HTT_T2H_MSG_TYPE_EXT_STATS_CONF: 1860 ath12k_debugfs_htt_ext_stats_handler(ab, skb); 1861 break; 1862 case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND: 1863 ath12k_htt_mlo_offset_event_handler(ab, skb); 1864 break; 1865 default: 1866 ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt event %d not handled\n", 1867 type); 1868 break; 1869 } 1870 1871 dev_kfree_skb_any(skb); 1872 } 1873 1874 static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar, 1875 struct sk_buff_head *msdu_list, 1876 struct sk_buff *first, struct sk_buff *last, 1877 u8 l3pad_bytes, int msdu_len) 1878 { 1879 struct ath12k_base *ab = ar->ab; 1880 struct sk_buff *skb; 1881 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first); 1882 int buf_first_hdr_len, buf_first_len; 1883 struct hal_rx_desc *ldesc; 1884 int space_extra, rem_len, buf_len; 1885 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 1886 bool is_continuation; 1887 1888 /* As the msdu is spread across multiple rx buffers, 1889 * find the offset to the start of msdu for computing 1890 * the length of the msdu in the first buffer. 1891 */ 1892 buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes; 1893 buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len; 1894 1895 if (WARN_ON_ONCE(msdu_len <= buf_first_len)) { 1896 skb_put(first, buf_first_hdr_len + msdu_len); 1897 skb_pull(first, buf_first_hdr_len); 1898 return 0; 1899 } 1900 1901 ldesc = (struct hal_rx_desc *)last->data; 1902 rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, ldesc); 1903 rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, ldesc); 1904 1905 /* MSDU spans over multiple buffers because the length of the MSDU 1906 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data 1907 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. 1908 */ 1909 skb_put(first, DP_RX_BUFFER_SIZE); 1910 skb_pull(first, buf_first_hdr_len); 1911 1912 /* When an MSDU spread over multiple buffers MSDU_END 1913 * tlvs are valid only in the last buffer. Copy those tlvs. 1914 */ 1915 ath12k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc); 1916 1917 space_extra = msdu_len - (buf_first_len + skb_tailroom(first)); 1918 if (space_extra > 0 && 1919 (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) { 1920 /* Free up all buffers of the MSDU */ 1921 while ((skb = __skb_dequeue(msdu_list)) != NULL) { 1922 rxcb = ATH12K_SKB_RXCB(skb); 1923 if (!rxcb->is_continuation) { 1924 dev_kfree_skb_any(skb); 1925 break; 1926 } 1927 dev_kfree_skb_any(skb); 1928 } 1929 return -ENOMEM; 1930 } 1931 1932 rem_len = msdu_len - buf_first_len; 1933 while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) { 1934 rxcb = ATH12K_SKB_RXCB(skb); 1935 is_continuation = rxcb->is_continuation; 1936 if (is_continuation) 1937 buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz; 1938 else 1939 buf_len = rem_len; 1940 1941 if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) { 1942 WARN_ON_ONCE(1); 1943 dev_kfree_skb_any(skb); 1944 return -EINVAL; 1945 } 1946 1947 skb_put(skb, buf_len + hal_rx_desc_sz); 1948 skb_pull(skb, hal_rx_desc_sz); 1949 skb_copy_from_linear_data(skb, skb_put(first, buf_len), 1950 buf_len); 1951 dev_kfree_skb_any(skb); 1952 1953 rem_len -= buf_len; 1954 if (!is_continuation) 1955 break; 1956 } 1957 1958 return 0; 1959 } 1960 1961 static struct sk_buff *ath12k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list, 1962 struct sk_buff *first) 1963 { 1964 struct sk_buff *skb; 1965 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first); 1966 1967 if (!rxcb->is_continuation) 1968 return first; 1969 1970 skb_queue_walk(msdu_list, skb) { 1971 rxcb = ATH12K_SKB_RXCB(skb); 1972 if (!rxcb->is_continuation) 1973 return skb; 1974 } 1975 1976 return NULL; 1977 } 1978 1979 static void ath12k_dp_rx_h_csum_offload(struct sk_buff *msdu, 1980 struct ath12k_dp_rx_info *rx_info) 1981 { 1982 msdu->ip_summed = (rx_info->ip_csum_fail || rx_info->l4_csum_fail) ? 1983 CHECKSUM_NONE : CHECKSUM_UNNECESSARY; 1984 } 1985 1986 int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar, enum hal_encrypt_type enctype) 1987 { 1988 switch (enctype) { 1989 case HAL_ENCRYPT_TYPE_OPEN: 1990 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1991 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1992 return 0; 1993 case HAL_ENCRYPT_TYPE_CCMP_128: 1994 return IEEE80211_CCMP_MIC_LEN; 1995 case HAL_ENCRYPT_TYPE_CCMP_256: 1996 return IEEE80211_CCMP_256_MIC_LEN; 1997 case HAL_ENCRYPT_TYPE_GCMP_128: 1998 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1999 return IEEE80211_GCMP_MIC_LEN; 2000 case HAL_ENCRYPT_TYPE_WEP_40: 2001 case HAL_ENCRYPT_TYPE_WEP_104: 2002 case HAL_ENCRYPT_TYPE_WEP_128: 2003 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 2004 case HAL_ENCRYPT_TYPE_WAPI: 2005 break; 2006 } 2007 2008 ath12k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype); 2009 return 0; 2010 } 2011 2012 static int ath12k_dp_rx_crypto_param_len(struct ath12k *ar, 2013 enum hal_encrypt_type enctype) 2014 { 2015 switch (enctype) { 2016 case HAL_ENCRYPT_TYPE_OPEN: 2017 return 0; 2018 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 2019 case HAL_ENCRYPT_TYPE_TKIP_MIC: 2020 return IEEE80211_TKIP_IV_LEN; 2021 case HAL_ENCRYPT_TYPE_CCMP_128: 2022 return IEEE80211_CCMP_HDR_LEN; 2023 case HAL_ENCRYPT_TYPE_CCMP_256: 2024 return IEEE80211_CCMP_256_HDR_LEN; 2025 case HAL_ENCRYPT_TYPE_GCMP_128: 2026 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 2027 return IEEE80211_GCMP_HDR_LEN; 2028 case HAL_ENCRYPT_TYPE_WEP_40: 2029 case HAL_ENCRYPT_TYPE_WEP_104: 2030 case HAL_ENCRYPT_TYPE_WEP_128: 2031 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 2032 case HAL_ENCRYPT_TYPE_WAPI: 2033 break; 2034 } 2035 2036 ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 2037 return 0; 2038 } 2039 2040 static int ath12k_dp_rx_crypto_icv_len(struct ath12k *ar, 2041 enum hal_encrypt_type enctype) 2042 { 2043 switch (enctype) { 2044 case HAL_ENCRYPT_TYPE_OPEN: 2045 case HAL_ENCRYPT_TYPE_CCMP_128: 2046 case HAL_ENCRYPT_TYPE_CCMP_256: 2047 case HAL_ENCRYPT_TYPE_GCMP_128: 2048 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 2049 return 0; 2050 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 2051 case HAL_ENCRYPT_TYPE_TKIP_MIC: 2052 return IEEE80211_TKIP_ICV_LEN; 2053 case HAL_ENCRYPT_TYPE_WEP_40: 2054 case HAL_ENCRYPT_TYPE_WEP_104: 2055 case HAL_ENCRYPT_TYPE_WEP_128: 2056 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 2057 case HAL_ENCRYPT_TYPE_WAPI: 2058 break; 2059 } 2060 2061 ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 2062 return 0; 2063 } 2064 2065 static void ath12k_dp_rx_h_undecap_nwifi(struct ath12k *ar, 2066 struct sk_buff *msdu, 2067 enum hal_encrypt_type enctype, 2068 struct ieee80211_rx_status *status) 2069 { 2070 struct ath12k_base *ab = ar->ab; 2071 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 2072 u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN]; 2073 struct ieee80211_hdr *hdr; 2074 size_t hdr_len; 2075 u8 *crypto_hdr; 2076 u16 qos_ctl; 2077 2078 /* pull decapped header */ 2079 hdr = (struct ieee80211_hdr *)msdu->data; 2080 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2081 skb_pull(msdu, hdr_len); 2082 2083 /* Rebuild qos header */ 2084 hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 2085 2086 /* Reset the order bit as the HT_Control header is stripped */ 2087 hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER)); 2088 2089 qos_ctl = rxcb->tid; 2090 2091 if (ath12k_dp_rx_h_mesh_ctl_present(ab, rxcb->rx_desc)) 2092 qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT; 2093 2094 /* TODO: Add other QoS ctl fields when required */ 2095 2096 /* copy decap header before overwriting for reuse below */ 2097 memcpy(decap_hdr, hdr, hdr_len); 2098 2099 /* Rebuild crypto header for mac80211 use */ 2100 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 2101 crypto_hdr = skb_push(msdu, ath12k_dp_rx_crypto_param_len(ar, enctype)); 2102 ath12k_dp_rx_desc_get_crypto_header(ar->ab, 2103 rxcb->rx_desc, crypto_hdr, 2104 enctype); 2105 } 2106 2107 memcpy(skb_push(msdu, 2108 IEEE80211_QOS_CTL_LEN), &qos_ctl, 2109 IEEE80211_QOS_CTL_LEN); 2110 memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len); 2111 } 2112 2113 static void ath12k_dp_rx_h_undecap_raw(struct ath12k *ar, struct sk_buff *msdu, 2114 enum hal_encrypt_type enctype, 2115 struct ieee80211_rx_status *status, 2116 bool decrypted) 2117 { 2118 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 2119 struct ieee80211_hdr *hdr; 2120 size_t hdr_len; 2121 size_t crypto_len; 2122 2123 if (!rxcb->is_first_msdu || 2124 !(rxcb->is_first_msdu && rxcb->is_last_msdu)) { 2125 WARN_ON_ONCE(1); 2126 return; 2127 } 2128 2129 skb_trim(msdu, msdu->len - FCS_LEN); 2130 2131 if (!decrypted) 2132 return; 2133 2134 hdr = (void *)msdu->data; 2135 2136 /* Tail */ 2137 if (status->flag & RX_FLAG_IV_STRIPPED) { 2138 skb_trim(msdu, msdu->len - 2139 ath12k_dp_rx_crypto_mic_len(ar, enctype)); 2140 2141 skb_trim(msdu, msdu->len - 2142 ath12k_dp_rx_crypto_icv_len(ar, enctype)); 2143 } else { 2144 /* MIC */ 2145 if (status->flag & RX_FLAG_MIC_STRIPPED) 2146 skb_trim(msdu, msdu->len - 2147 ath12k_dp_rx_crypto_mic_len(ar, enctype)); 2148 2149 /* ICV */ 2150 if (status->flag & RX_FLAG_ICV_STRIPPED) 2151 skb_trim(msdu, msdu->len - 2152 ath12k_dp_rx_crypto_icv_len(ar, enctype)); 2153 } 2154 2155 /* MMIC */ 2156 if ((status->flag & RX_FLAG_MMIC_STRIPPED) && 2157 !ieee80211_has_morefrags(hdr->frame_control) && 2158 enctype == HAL_ENCRYPT_TYPE_TKIP_MIC) 2159 skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN); 2160 2161 /* Head */ 2162 if (status->flag & RX_FLAG_IV_STRIPPED) { 2163 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2164 crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype); 2165 2166 memmove(msdu->data + crypto_len, msdu->data, hdr_len); 2167 skb_pull(msdu, crypto_len); 2168 } 2169 } 2170 2171 static void ath12k_get_dot11_hdr_from_rx_desc(struct ath12k *ar, 2172 struct sk_buff *msdu, 2173 struct ath12k_skb_rxcb *rxcb, 2174 struct ieee80211_rx_status *status, 2175 enum hal_encrypt_type enctype) 2176 { 2177 struct hal_rx_desc *rx_desc = rxcb->rx_desc; 2178 struct ath12k_base *ab = ar->ab; 2179 size_t hdr_len, crypto_len; 2180 struct ieee80211_hdr hdr; 2181 __le16 qos_ctl; 2182 u8 *crypto_hdr, mesh_ctrl; 2183 2184 ath12k_dp_rx_desc_get_dot11_hdr(ab, rx_desc, &hdr); 2185 hdr_len = ieee80211_hdrlen(hdr.frame_control); 2186 mesh_ctrl = ath12k_dp_rx_h_mesh_ctl_present(ab, rx_desc); 2187 2188 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 2189 crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype); 2190 crypto_hdr = skb_push(msdu, crypto_len); 2191 ath12k_dp_rx_desc_get_crypto_header(ab, rx_desc, crypto_hdr, enctype); 2192 } 2193 2194 skb_push(msdu, hdr_len); 2195 memcpy(msdu->data, &hdr, min(hdr_len, sizeof(hdr))); 2196 2197 if (rxcb->is_mcbc) 2198 status->flag &= ~RX_FLAG_PN_VALIDATED; 2199 2200 /* Add QOS header */ 2201 if (ieee80211_is_data_qos(hdr.frame_control)) { 2202 struct ieee80211_hdr *qos_ptr = (struct ieee80211_hdr *)msdu->data; 2203 2204 qos_ctl = cpu_to_le16(rxcb->tid & IEEE80211_QOS_CTL_TID_MASK); 2205 if (mesh_ctrl) 2206 qos_ctl |= cpu_to_le16(IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT); 2207 2208 memcpy(ieee80211_get_qos_ctl(qos_ptr), &qos_ctl, IEEE80211_QOS_CTL_LEN); 2209 } 2210 } 2211 2212 static void ath12k_dp_rx_h_undecap_eth(struct ath12k *ar, 2213 struct sk_buff *msdu, 2214 enum hal_encrypt_type enctype, 2215 struct ieee80211_rx_status *status) 2216 { 2217 struct ieee80211_hdr *hdr; 2218 struct ethhdr *eth; 2219 u8 da[ETH_ALEN]; 2220 u8 sa[ETH_ALEN]; 2221 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 2222 struct ath12k_dp_rx_rfc1042_hdr rfc = {0xaa, 0xaa, 0x03, {0x00, 0x00, 0x00}}; 2223 2224 eth = (struct ethhdr *)msdu->data; 2225 ether_addr_copy(da, eth->h_dest); 2226 ether_addr_copy(sa, eth->h_source); 2227 rfc.snap_type = eth->h_proto; 2228 skb_pull(msdu, sizeof(*eth)); 2229 memcpy(skb_push(msdu, sizeof(rfc)), &rfc, 2230 sizeof(rfc)); 2231 ath12k_get_dot11_hdr_from_rx_desc(ar, msdu, rxcb, status, enctype); 2232 2233 /* original 802.11 header has a different DA and in 2234 * case of 4addr it may also have different SA 2235 */ 2236 hdr = (struct ieee80211_hdr *)msdu->data; 2237 ether_addr_copy(ieee80211_get_DA(hdr), da); 2238 ether_addr_copy(ieee80211_get_SA(hdr), sa); 2239 } 2240 2241 static void ath12k_dp_rx_h_undecap(struct ath12k *ar, struct sk_buff *msdu, 2242 struct hal_rx_desc *rx_desc, 2243 enum hal_encrypt_type enctype, 2244 struct ieee80211_rx_status *status, 2245 bool decrypted) 2246 { 2247 struct ath12k_base *ab = ar->ab; 2248 u8 decap; 2249 struct ethhdr *ehdr; 2250 2251 decap = ath12k_dp_rx_h_decap_type(ab, rx_desc); 2252 2253 switch (decap) { 2254 case DP_RX_DECAP_TYPE_NATIVE_WIFI: 2255 ath12k_dp_rx_h_undecap_nwifi(ar, msdu, enctype, status); 2256 break; 2257 case DP_RX_DECAP_TYPE_RAW: 2258 ath12k_dp_rx_h_undecap_raw(ar, msdu, enctype, status, 2259 decrypted); 2260 break; 2261 case DP_RX_DECAP_TYPE_ETHERNET2_DIX: 2262 ehdr = (struct ethhdr *)msdu->data; 2263 2264 /* mac80211 allows fast path only for authorized STA */ 2265 if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) { 2266 ATH12K_SKB_RXCB(msdu)->is_eapol = true; 2267 ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status); 2268 break; 2269 } 2270 2271 /* PN for mcast packets will be validated in mac80211; 2272 * remove eth header and add 802.11 header. 2273 */ 2274 if (ATH12K_SKB_RXCB(msdu)->is_mcbc && decrypted) 2275 ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status); 2276 break; 2277 case DP_RX_DECAP_TYPE_8023: 2278 /* TODO: Handle undecap for these formats */ 2279 break; 2280 } 2281 } 2282 2283 struct ath12k_peer * 2284 ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu, 2285 struct ath12k_dp_rx_info *rx_info) 2286 { 2287 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 2288 struct ath12k_peer *peer = NULL; 2289 2290 lockdep_assert_held(&ab->base_lock); 2291 2292 if (rxcb->peer_id) 2293 peer = ath12k_peer_find_by_id(ab, rxcb->peer_id); 2294 2295 if (peer) 2296 return peer; 2297 2298 if (rx_info->addr2_present) 2299 peer = ath12k_peer_find_by_addr(ab, rx_info->addr2); 2300 2301 return peer; 2302 } 2303 2304 static void ath12k_dp_rx_h_mpdu(struct ath12k *ar, 2305 struct sk_buff *msdu, 2306 struct hal_rx_desc *rx_desc, 2307 struct ath12k_dp_rx_info *rx_info) 2308 { 2309 struct ath12k_base *ab = ar->ab; 2310 struct ath12k_skb_rxcb *rxcb; 2311 enum hal_encrypt_type enctype; 2312 bool is_decrypted = false; 2313 struct ieee80211_hdr *hdr; 2314 struct ath12k_peer *peer; 2315 struct ieee80211_rx_status *rx_status = rx_info->rx_status; 2316 u32 err_bitmap; 2317 2318 /* PN for multicast packets will be checked in mac80211 */ 2319 rxcb = ATH12K_SKB_RXCB(msdu); 2320 rxcb->is_mcbc = rx_info->is_mcbc; 2321 2322 if (rxcb->is_mcbc) 2323 rxcb->peer_id = rx_info->peer_id; 2324 2325 spin_lock_bh(&ar->ab->base_lock); 2326 peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu, rx_info); 2327 if (peer) { 2328 /* resetting mcbc bit because mcbc packets are unicast 2329 * packets only for AP as STA sends unicast packets. 2330 */ 2331 rxcb->is_mcbc = rxcb->is_mcbc && !peer->ucast_ra_only; 2332 2333 if (rxcb->is_mcbc) 2334 enctype = peer->sec_type_grp; 2335 else 2336 enctype = peer->sec_type; 2337 } else { 2338 enctype = HAL_ENCRYPT_TYPE_OPEN; 2339 } 2340 spin_unlock_bh(&ar->ab->base_lock); 2341 2342 err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc); 2343 if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap) 2344 is_decrypted = ath12k_dp_rx_h_is_decrypted(ab, rx_desc); 2345 2346 /* Clear per-MPDU flags while leaving per-PPDU flags intact */ 2347 rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | 2348 RX_FLAG_MMIC_ERROR | 2349 RX_FLAG_DECRYPTED | 2350 RX_FLAG_IV_STRIPPED | 2351 RX_FLAG_MMIC_STRIPPED); 2352 2353 if (err_bitmap & HAL_RX_MPDU_ERR_FCS) 2354 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 2355 if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) 2356 rx_status->flag |= RX_FLAG_MMIC_ERROR; 2357 2358 if (is_decrypted) { 2359 rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED; 2360 2361 if (rx_info->is_mcbc) 2362 rx_status->flag |= RX_FLAG_MIC_STRIPPED | 2363 RX_FLAG_ICV_STRIPPED; 2364 else 2365 rx_status->flag |= RX_FLAG_IV_STRIPPED | 2366 RX_FLAG_PN_VALIDATED; 2367 } 2368 2369 ath12k_dp_rx_h_csum_offload(msdu, rx_info); 2370 ath12k_dp_rx_h_undecap(ar, msdu, rx_desc, 2371 enctype, rx_status, is_decrypted); 2372 2373 if (!is_decrypted || rx_info->is_mcbc) 2374 return; 2375 2376 if (rx_info->decap_type != DP_RX_DECAP_TYPE_ETHERNET2_DIX) { 2377 hdr = (void *)msdu->data; 2378 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 2379 } 2380 } 2381 2382 static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info) 2383 { 2384 struct ieee80211_supported_band *sband; 2385 struct ieee80211_rx_status *rx_status = rx_info->rx_status; 2386 enum rx_msdu_start_pkt_type pkt_type = rx_info->pkt_type; 2387 u8 bw = rx_info->bw, sgi = rx_info->sgi; 2388 u8 rate_mcs = rx_info->rate_mcs, nss = rx_info->nss; 2389 bool is_cck; 2390 2391 switch (pkt_type) { 2392 case RX_MSDU_START_PKT_TYPE_11A: 2393 case RX_MSDU_START_PKT_TYPE_11B: 2394 is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B); 2395 sband = &ar->mac.sbands[rx_status->band]; 2396 rx_status->rate_idx = ath12k_mac_hw_rate_to_idx(sband, rate_mcs, 2397 is_cck); 2398 break; 2399 case RX_MSDU_START_PKT_TYPE_11N: 2400 rx_status->encoding = RX_ENC_HT; 2401 if (rate_mcs > ATH12K_HT_MCS_MAX) { 2402 ath12k_warn(ar->ab, 2403 "Received with invalid mcs in HT mode %d\n", 2404 rate_mcs); 2405 break; 2406 } 2407 rx_status->rate_idx = rate_mcs + (8 * (nss - 1)); 2408 if (sgi) 2409 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 2410 rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw); 2411 break; 2412 case RX_MSDU_START_PKT_TYPE_11AC: 2413 rx_status->encoding = RX_ENC_VHT; 2414 rx_status->rate_idx = rate_mcs; 2415 if (rate_mcs > ATH12K_VHT_MCS_MAX) { 2416 ath12k_warn(ar->ab, 2417 "Received with invalid mcs in VHT mode %d\n", 2418 rate_mcs); 2419 break; 2420 } 2421 rx_status->nss = nss; 2422 if (sgi) 2423 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 2424 rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw); 2425 break; 2426 case RX_MSDU_START_PKT_TYPE_11AX: 2427 rx_status->rate_idx = rate_mcs; 2428 if (rate_mcs > ATH12K_HE_MCS_MAX) { 2429 ath12k_warn(ar->ab, 2430 "Received with invalid mcs in HE mode %d\n", 2431 rate_mcs); 2432 break; 2433 } 2434 rx_status->encoding = RX_ENC_HE; 2435 rx_status->nss = nss; 2436 rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi); 2437 rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw); 2438 break; 2439 case RX_MSDU_START_PKT_TYPE_11BE: 2440 rx_status->rate_idx = rate_mcs; 2441 2442 if (rate_mcs > ATH12K_EHT_MCS_MAX) { 2443 ath12k_warn(ar->ab, 2444 "Received with invalid mcs in EHT mode %d\n", 2445 rate_mcs); 2446 break; 2447 } 2448 2449 rx_status->encoding = RX_ENC_EHT; 2450 rx_status->nss = nss; 2451 rx_status->eht.gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(sgi); 2452 rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw); 2453 break; 2454 default: 2455 break; 2456 } 2457 } 2458 2459 void ath12k_dp_rx_h_fetch_info(struct ath12k_base *ab, struct hal_rx_desc *rx_desc, 2460 struct ath12k_dp_rx_info *rx_info) 2461 { 2462 rx_info->ip_csum_fail = ath12k_dp_rx_h_ip_cksum_fail(ab, rx_desc); 2463 rx_info->l4_csum_fail = ath12k_dp_rx_h_l4_cksum_fail(ab, rx_desc); 2464 rx_info->is_mcbc = ath12k_dp_rx_h_is_da_mcbc(ab, rx_desc); 2465 rx_info->decap_type = ath12k_dp_rx_h_decap_type(ab, rx_desc); 2466 rx_info->pkt_type = ath12k_dp_rx_h_pkt_type(ab, rx_desc); 2467 rx_info->sgi = ath12k_dp_rx_h_sgi(ab, rx_desc); 2468 rx_info->rate_mcs = ath12k_dp_rx_h_rate_mcs(ab, rx_desc); 2469 rx_info->bw = ath12k_dp_rx_h_rx_bw(ab, rx_desc); 2470 rx_info->nss = ath12k_dp_rx_h_nss(ab, rx_desc); 2471 rx_info->tid = ath12k_dp_rx_h_tid(ab, rx_desc); 2472 rx_info->peer_id = ath12k_dp_rx_h_peer_id(ab, rx_desc); 2473 rx_info->phy_meta_data = ath12k_dp_rx_h_freq(ab, rx_desc); 2474 2475 if (ath12k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)) { 2476 ether_addr_copy(rx_info->addr2, 2477 ath12k_dp_rxdesc_get_mpdu_start_addr2(ab, rx_desc)); 2478 rx_info->addr2_present = true; 2479 } 2480 2481 ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "rx_desc: ", 2482 rx_desc, sizeof(*rx_desc)); 2483 } 2484 2485 void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info) 2486 { 2487 struct ieee80211_rx_status *rx_status = rx_info->rx_status; 2488 u8 channel_num; 2489 u32 center_freq, meta_data; 2490 struct ieee80211_channel *channel; 2491 2492 rx_status->freq = 0; 2493 rx_status->rate_idx = 0; 2494 rx_status->nss = 0; 2495 rx_status->encoding = RX_ENC_LEGACY; 2496 rx_status->bw = RATE_INFO_BW_20; 2497 rx_status->enc_flags = 0; 2498 2499 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 2500 2501 meta_data = rx_info->phy_meta_data; 2502 channel_num = meta_data; 2503 center_freq = meta_data >> 16; 2504 2505 if (center_freq >= ATH12K_MIN_6GHZ_FREQ && 2506 center_freq <= ATH12K_MAX_6GHZ_FREQ) { 2507 rx_status->band = NL80211_BAND_6GHZ; 2508 rx_status->freq = center_freq; 2509 } else if (channel_num >= 1 && channel_num <= 14) { 2510 rx_status->band = NL80211_BAND_2GHZ; 2511 } else if (channel_num >= 36 && channel_num <= 173) { 2512 rx_status->band = NL80211_BAND_5GHZ; 2513 } else { 2514 spin_lock_bh(&ar->data_lock); 2515 channel = ar->rx_channel; 2516 if (channel) { 2517 rx_status->band = channel->band; 2518 channel_num = 2519 ieee80211_frequency_to_channel(channel->center_freq); 2520 } 2521 spin_unlock_bh(&ar->data_lock); 2522 } 2523 2524 if (rx_status->band != NL80211_BAND_6GHZ) 2525 rx_status->freq = ieee80211_channel_to_frequency(channel_num, 2526 rx_status->band); 2527 2528 ath12k_dp_rx_h_rate(ar, rx_info); 2529 } 2530 2531 static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi, 2532 struct sk_buff *msdu, 2533 struct ath12k_dp_rx_info *rx_info) 2534 { 2535 struct ath12k_base *ab = ar->ab; 2536 static const struct ieee80211_radiotap_he known = { 2537 .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | 2538 IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN), 2539 .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN), 2540 }; 2541 struct ieee80211_radiotap_he *he; 2542 struct ieee80211_rx_status *rx_status; 2543 struct ieee80211_sta *pubsta; 2544 struct ath12k_peer *peer; 2545 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 2546 struct ieee80211_rx_status *status = rx_info->rx_status; 2547 u8 decap = DP_RX_DECAP_TYPE_RAW; 2548 bool is_mcbc = rxcb->is_mcbc; 2549 bool is_eapol = rxcb->is_eapol; 2550 2551 if (status->encoding == RX_ENC_HE && !(status->flag & RX_FLAG_RADIOTAP_HE) && 2552 !(status->flag & RX_FLAG_SKIP_MONITOR)) { 2553 he = skb_push(msdu, sizeof(known)); 2554 memcpy(he, &known, sizeof(known)); 2555 status->flag |= RX_FLAG_RADIOTAP_HE; 2556 } 2557 2558 if (!(status->flag & RX_FLAG_ONLY_MONITOR)) 2559 decap = rx_info->decap_type; 2560 2561 spin_lock_bh(&ab->base_lock); 2562 peer = ath12k_dp_rx_h_find_peer(ab, msdu, rx_info); 2563 2564 pubsta = peer ? peer->sta : NULL; 2565 2566 if (pubsta && pubsta->valid_links) { 2567 status->link_valid = 1; 2568 status->link_id = peer->link_id; 2569 } 2570 2571 spin_unlock_bh(&ab->base_lock); 2572 2573 ath12k_dbg(ab, ATH12K_DBG_DATA, 2574 "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", 2575 msdu, 2576 msdu->len, 2577 peer ? peer->addr : NULL, 2578 rxcb->tid, 2579 is_mcbc ? "mcast" : "ucast", 2580 ath12k_dp_rx_h_seq_no(ab, rxcb->rx_desc), 2581 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", 2582 (status->encoding == RX_ENC_HT) ? "ht" : "", 2583 (status->encoding == RX_ENC_VHT) ? "vht" : "", 2584 (status->encoding == RX_ENC_HE) ? "he" : "", 2585 (status->encoding == RX_ENC_EHT) ? "eht" : "", 2586 (status->bw == RATE_INFO_BW_40) ? "40" : "", 2587 (status->bw == RATE_INFO_BW_80) ? "80" : "", 2588 (status->bw == RATE_INFO_BW_160) ? "160" : "", 2589 (status->bw == RATE_INFO_BW_320) ? "320" : "", 2590 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", 2591 status->rate_idx, 2592 status->nss, 2593 status->freq, 2594 status->band, status->flag, 2595 !!(status->flag & RX_FLAG_FAILED_FCS_CRC), 2596 !!(status->flag & RX_FLAG_MMIC_ERROR), 2597 !!(status->flag & RX_FLAG_AMSDU_MORE)); 2598 2599 ath12k_dbg_dump(ab, ATH12K_DBG_DP_RX, NULL, "dp rx msdu: ", 2600 msdu->data, msdu->len); 2601 2602 rx_status = IEEE80211_SKB_RXCB(msdu); 2603 *rx_status = *status; 2604 2605 /* TODO: trace rx packet */ 2606 2607 /* PN for multicast packets are not validate in HW, 2608 * so skip 802.3 rx path 2609 * Also, fast_rx expects the STA to be authorized, hence 2610 * eapol packets are sent in slow path. 2611 */ 2612 if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol && 2613 !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED)) 2614 rx_status->flag |= RX_FLAG_8023; 2615 2616 ieee80211_rx_napi(ath12k_ar_to_hw(ar), pubsta, msdu, napi); 2617 } 2618 2619 static bool ath12k_dp_rx_check_nwifi_hdr_len_valid(struct ath12k_base *ab, 2620 struct hal_rx_desc *rx_desc, 2621 struct sk_buff *msdu) 2622 { 2623 struct ieee80211_hdr *hdr; 2624 u8 decap_type; 2625 u32 hdr_len; 2626 2627 decap_type = ath12k_dp_rx_h_decap_type(ab, rx_desc); 2628 if (decap_type != DP_RX_DECAP_TYPE_NATIVE_WIFI) 2629 return true; 2630 2631 hdr = (struct ieee80211_hdr *)msdu->data; 2632 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2633 2634 if ((likely(hdr_len <= DP_MAX_NWIFI_HDR_LEN))) 2635 return true; 2636 2637 ab->device_stats.invalid_rbm++; 2638 WARN_ON_ONCE(1); 2639 return false; 2640 } 2641 2642 static int ath12k_dp_rx_process_msdu(struct ath12k *ar, 2643 struct sk_buff *msdu, 2644 struct sk_buff_head *msdu_list, 2645 struct ath12k_dp_rx_info *rx_info) 2646 { 2647 struct ath12k_base *ab = ar->ab; 2648 struct hal_rx_desc *rx_desc, *lrx_desc; 2649 struct ath12k_skb_rxcb *rxcb; 2650 struct sk_buff *last_buf; 2651 u8 l3_pad_bytes; 2652 u16 msdu_len; 2653 int ret; 2654 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 2655 2656 last_buf = ath12k_dp_rx_get_msdu_last_buf(msdu_list, msdu); 2657 if (!last_buf) { 2658 ath12k_warn(ab, 2659 "No valid Rx buffer to access MSDU_END tlv\n"); 2660 ret = -EIO; 2661 goto free_out; 2662 } 2663 2664 rx_desc = (struct hal_rx_desc *)msdu->data; 2665 lrx_desc = (struct hal_rx_desc *)last_buf->data; 2666 if (!ath12k_dp_rx_h_msdu_done(ab, lrx_desc)) { 2667 ath12k_warn(ab, "msdu_done bit in msdu_end is not set\n"); 2668 ret = -EIO; 2669 goto free_out; 2670 } 2671 2672 rxcb = ATH12K_SKB_RXCB(msdu); 2673 rxcb->rx_desc = rx_desc; 2674 msdu_len = ath12k_dp_rx_h_msdu_len(ab, lrx_desc); 2675 l3_pad_bytes = ath12k_dp_rx_h_l3pad(ab, lrx_desc); 2676 2677 if (rxcb->is_frag) { 2678 skb_pull(msdu, hal_rx_desc_sz); 2679 } else if (!rxcb->is_continuation) { 2680 if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) { 2681 ret = -EINVAL; 2682 ath12k_warn(ab, "invalid msdu len %u\n", msdu_len); 2683 ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", rx_desc, 2684 sizeof(*rx_desc)); 2685 goto free_out; 2686 } 2687 skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len); 2688 skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes); 2689 } else { 2690 ret = ath12k_dp_rx_msdu_coalesce(ar, msdu_list, 2691 msdu, last_buf, 2692 l3_pad_bytes, msdu_len); 2693 if (ret) { 2694 ath12k_warn(ab, 2695 "failed to coalesce msdu rx buffer%d\n", ret); 2696 goto free_out; 2697 } 2698 } 2699 2700 if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, rx_desc, msdu))) { 2701 ret = -EINVAL; 2702 goto free_out; 2703 } 2704 2705 ath12k_dp_rx_h_fetch_info(ab, rx_desc, rx_info); 2706 ath12k_dp_rx_h_ppdu(ar, rx_info); 2707 ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_info); 2708 2709 rx_info->rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED; 2710 2711 return 0; 2712 2713 free_out: 2714 return ret; 2715 } 2716 2717 static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab, 2718 struct napi_struct *napi, 2719 struct sk_buff_head *msdu_list, 2720 int ring_id) 2721 { 2722 struct ath12k_hw_group *ag = ab->ag; 2723 struct ieee80211_rx_status rx_status = {0}; 2724 struct ath12k_skb_rxcb *rxcb; 2725 struct sk_buff *msdu; 2726 struct ath12k *ar; 2727 struct ath12k_hw_link *hw_links = ag->hw_links; 2728 struct ath12k_base *partner_ab; 2729 struct ath12k_dp_rx_info rx_info; 2730 u8 hw_link_id, pdev_id; 2731 int ret; 2732 2733 if (skb_queue_empty(msdu_list)) 2734 return; 2735 2736 rx_info.addr2_present = false; 2737 rx_info.rx_status = &rx_status; 2738 2739 rcu_read_lock(); 2740 2741 while ((msdu = __skb_dequeue(msdu_list))) { 2742 rxcb = ATH12K_SKB_RXCB(msdu); 2743 hw_link_id = rxcb->hw_link_id; 2744 partner_ab = ath12k_ag_to_ab(ag, 2745 hw_links[hw_link_id].device_id); 2746 pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params, 2747 hw_links[hw_link_id].pdev_idx); 2748 ar = partner_ab->pdevs[pdev_id].ar; 2749 if (!rcu_dereference(partner_ab->pdevs_active[pdev_id])) { 2750 dev_kfree_skb_any(msdu); 2751 continue; 2752 } 2753 2754 if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) { 2755 dev_kfree_skb_any(msdu); 2756 continue; 2757 } 2758 2759 ret = ath12k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_info); 2760 if (ret) { 2761 ath12k_dbg(ab, ATH12K_DBG_DATA, 2762 "Unable to process msdu %d", ret); 2763 dev_kfree_skb_any(msdu); 2764 continue; 2765 } 2766 2767 ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_info); 2768 } 2769 2770 rcu_read_unlock(); 2771 } 2772 2773 static u16 ath12k_dp_rx_get_peer_id(struct ath12k_base *ab, 2774 enum ath12k_peer_metadata_version ver, 2775 __le32 peer_metadata) 2776 { 2777 switch (ver) { 2778 default: 2779 ath12k_warn(ab, "Unknown peer metadata version: %d", ver); 2780 fallthrough; 2781 case ATH12K_PEER_METADATA_V0: 2782 return le32_get_bits(peer_metadata, 2783 RX_MPDU_DESC_META_DATA_V0_PEER_ID); 2784 case ATH12K_PEER_METADATA_V1: 2785 return le32_get_bits(peer_metadata, 2786 RX_MPDU_DESC_META_DATA_V1_PEER_ID); 2787 case ATH12K_PEER_METADATA_V1A: 2788 return le32_get_bits(peer_metadata, 2789 RX_MPDU_DESC_META_DATA_V1A_PEER_ID); 2790 case ATH12K_PEER_METADATA_V1B: 2791 return le32_get_bits(peer_metadata, 2792 RX_MPDU_DESC_META_DATA_V1B_PEER_ID); 2793 } 2794 } 2795 2796 int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id, 2797 struct napi_struct *napi, int budget) 2798 { 2799 struct ath12k_hw_group *ag = ab->ag; 2800 struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES]; 2801 struct ath12k_hw_link *hw_links = ag->hw_links; 2802 int num_buffs_reaped[ATH12K_MAX_DEVICES] = {}; 2803 struct ath12k_rx_desc_info *desc_info; 2804 struct ath12k_dp *dp = &ab->dp; 2805 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 2806 struct hal_reo_dest_ring *desc; 2807 struct ath12k_base *partner_ab; 2808 struct sk_buff_head msdu_list; 2809 struct ath12k_skb_rxcb *rxcb; 2810 int total_msdu_reaped = 0; 2811 u8 hw_link_id, device_id; 2812 struct hal_srng *srng; 2813 struct sk_buff *msdu; 2814 bool done = false; 2815 u64 desc_va; 2816 2817 __skb_queue_head_init(&msdu_list); 2818 2819 for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) 2820 INIT_LIST_HEAD(&rx_desc_used_list[device_id]); 2821 2822 srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id]; 2823 2824 spin_lock_bh(&srng->lock); 2825 2826 try_again: 2827 ath12k_hal_srng_access_begin(ab, srng); 2828 2829 while ((desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) { 2830 struct rx_mpdu_desc *mpdu_info; 2831 struct rx_msdu_desc *msdu_info; 2832 enum hal_reo_dest_ring_push_reason push_reason; 2833 u32 cookie; 2834 2835 cookie = le32_get_bits(desc->buf_addr_info.info1, 2836 BUFFER_ADDR_INFO1_SW_COOKIE); 2837 2838 hw_link_id = le32_get_bits(desc->info0, 2839 HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); 2840 2841 desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 | 2842 le32_to_cpu(desc->buf_va_lo)); 2843 desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va); 2844 2845 device_id = hw_links[hw_link_id].device_id; 2846 partner_ab = ath12k_ag_to_ab(ag, device_id); 2847 if (unlikely(!partner_ab)) { 2848 if (desc_info->skb) { 2849 dev_kfree_skb_any(desc_info->skb); 2850 desc_info->skb = NULL; 2851 } 2852 2853 continue; 2854 } 2855 2856 /* retry manual desc retrieval */ 2857 if (!desc_info) { 2858 desc_info = ath12k_dp_get_rx_desc(partner_ab, cookie); 2859 if (!desc_info) { 2860 ath12k_warn(partner_ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n", 2861 cookie); 2862 continue; 2863 } 2864 } 2865 2866 if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC) 2867 ath12k_warn(ab, "Check HW CC implementation"); 2868 2869 msdu = desc_info->skb; 2870 desc_info->skb = NULL; 2871 2872 list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]); 2873 2874 rxcb = ATH12K_SKB_RXCB(msdu); 2875 dma_unmap_single(partner_ab->dev, rxcb->paddr, 2876 msdu->len + skb_tailroom(msdu), 2877 DMA_FROM_DEVICE); 2878 2879 num_buffs_reaped[device_id]++; 2880 ab->device_stats.reo_rx[ring_id][ab->device_id]++; 2881 2882 push_reason = le32_get_bits(desc->info0, 2883 HAL_REO_DEST_RING_INFO0_PUSH_REASON); 2884 if (push_reason != 2885 HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) { 2886 dev_kfree_skb_any(msdu); 2887 ab->device_stats.hal_reo_error[ring_id]++; 2888 continue; 2889 } 2890 2891 msdu_info = &desc->rx_msdu_info; 2892 mpdu_info = &desc->rx_mpdu_info; 2893 2894 rxcb->is_first_msdu = !!(le32_to_cpu(msdu_info->info0) & 2895 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU); 2896 rxcb->is_last_msdu = !!(le32_to_cpu(msdu_info->info0) & 2897 RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU); 2898 rxcb->is_continuation = !!(le32_to_cpu(msdu_info->info0) & 2899 RX_MSDU_DESC_INFO0_MSDU_CONTINUATION); 2900 rxcb->hw_link_id = hw_link_id; 2901 rxcb->peer_id = ath12k_dp_rx_get_peer_id(ab, dp->peer_metadata_ver, 2902 mpdu_info->peer_meta_data); 2903 rxcb->tid = le32_get_bits(mpdu_info->info0, 2904 RX_MPDU_DESC_INFO0_TID); 2905 2906 __skb_queue_tail(&msdu_list, msdu); 2907 2908 if (!rxcb->is_continuation) { 2909 total_msdu_reaped++; 2910 done = true; 2911 } else { 2912 done = false; 2913 } 2914 2915 if (total_msdu_reaped >= budget) 2916 break; 2917 } 2918 2919 /* Hw might have updated the head pointer after we cached it. 2920 * In this case, even though there are entries in the ring we'll 2921 * get rx_desc NULL. Give the read another try with updated cached 2922 * head pointer so that we can reap complete MPDU in the current 2923 * rx processing. 2924 */ 2925 if (!done && ath12k_hal_srng_dst_num_free(ab, srng, true)) { 2926 ath12k_hal_srng_access_end(ab, srng); 2927 goto try_again; 2928 } 2929 2930 ath12k_hal_srng_access_end(ab, srng); 2931 2932 spin_unlock_bh(&srng->lock); 2933 2934 if (!total_msdu_reaped) 2935 goto exit; 2936 2937 for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) { 2938 if (!num_buffs_reaped[device_id]) 2939 continue; 2940 2941 partner_ab = ath12k_ag_to_ab(ag, device_id); 2942 rx_ring = &partner_ab->dp.rx_refill_buf_ring; 2943 2944 ath12k_dp_rx_bufs_replenish(partner_ab, rx_ring, 2945 &rx_desc_used_list[device_id], 2946 num_buffs_reaped[device_id]); 2947 } 2948 2949 ath12k_dp_rx_process_received_packets(ab, napi, &msdu_list, 2950 ring_id); 2951 2952 exit: 2953 return total_msdu_reaped; 2954 } 2955 2956 static void ath12k_dp_rx_frag_timer(struct timer_list *timer) 2957 { 2958 struct ath12k_dp_rx_tid *rx_tid = timer_container_of(rx_tid, timer, 2959 frag_timer); 2960 2961 spin_lock_bh(&rx_tid->ab->base_lock); 2962 if (rx_tid->last_frag_no && 2963 rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) { 2964 spin_unlock_bh(&rx_tid->ab->base_lock); 2965 return; 2966 } 2967 ath12k_dp_rx_frags_cleanup(rx_tid, true); 2968 spin_unlock_bh(&rx_tid->ab->base_lock); 2969 } 2970 2971 int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id) 2972 { 2973 struct ath12k_base *ab = ar->ab; 2974 struct crypto_shash *tfm; 2975 struct ath12k_peer *peer; 2976 struct ath12k_dp_rx_tid *rx_tid; 2977 int i; 2978 2979 tfm = crypto_alloc_shash("michael_mic", 0, 0); 2980 if (IS_ERR(tfm)) 2981 return PTR_ERR(tfm); 2982 2983 spin_lock_bh(&ab->base_lock); 2984 2985 peer = ath12k_peer_find(ab, vdev_id, peer_mac); 2986 if (!peer) { 2987 spin_unlock_bh(&ab->base_lock); 2988 crypto_free_shash(tfm); 2989 ath12k_warn(ab, "failed to find the peer to set up fragment info\n"); 2990 return -ENOENT; 2991 } 2992 2993 if (!peer->primary_link) { 2994 spin_unlock_bh(&ab->base_lock); 2995 crypto_free_shash(tfm); 2996 return 0; 2997 } 2998 2999 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 3000 rx_tid = &peer->rx_tid[i]; 3001 rx_tid->ab = ab; 3002 timer_setup(&rx_tid->frag_timer, ath12k_dp_rx_frag_timer, 0); 3003 skb_queue_head_init(&rx_tid->rx_frags); 3004 } 3005 3006 peer->tfm_mmic = tfm; 3007 peer->dp_setup_done = true; 3008 spin_unlock_bh(&ab->base_lock); 3009 3010 return 0; 3011 } 3012 3013 static int ath12k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key, 3014 struct ieee80211_hdr *hdr, u8 *data, 3015 size_t data_len, u8 *mic) 3016 { 3017 SHASH_DESC_ON_STACK(desc, tfm); 3018 u8 mic_hdr[16] = {0}; 3019 u8 tid = 0; 3020 int ret; 3021 3022 if (!tfm) 3023 return -EINVAL; 3024 3025 desc->tfm = tfm; 3026 3027 ret = crypto_shash_setkey(tfm, key, 8); 3028 if (ret) 3029 goto out; 3030 3031 ret = crypto_shash_init(desc); 3032 if (ret) 3033 goto out; 3034 3035 /* TKIP MIC header */ 3036 memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN); 3037 memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN); 3038 if (ieee80211_is_data_qos(hdr->frame_control)) 3039 tid = ieee80211_get_tid(hdr); 3040 mic_hdr[12] = tid; 3041 3042 ret = crypto_shash_update(desc, mic_hdr, 16); 3043 if (ret) 3044 goto out; 3045 ret = crypto_shash_update(desc, data, data_len); 3046 if (ret) 3047 goto out; 3048 ret = crypto_shash_final(desc, mic); 3049 out: 3050 shash_desc_zero(desc); 3051 return ret; 3052 } 3053 3054 static int ath12k_dp_rx_h_verify_tkip_mic(struct ath12k *ar, struct ath12k_peer *peer, 3055 struct sk_buff *msdu) 3056 { 3057 struct ath12k_base *ab = ar->ab; 3058 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data; 3059 struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu); 3060 struct ieee80211_key_conf *key_conf; 3061 struct ieee80211_hdr *hdr; 3062 struct ath12k_dp_rx_info rx_info; 3063 u8 mic[IEEE80211_CCMP_MIC_LEN]; 3064 int head_len, tail_len, ret; 3065 size_t data_len; 3066 u32 hdr_len, hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 3067 u8 *key, *data; 3068 u8 key_idx; 3069 3070 if (ath12k_dp_rx_h_enctype(ab, rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC) 3071 return 0; 3072 3073 rx_info.addr2_present = false; 3074 rx_info.rx_status = rxs; 3075 3076 hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz); 3077 hdr_len = ieee80211_hdrlen(hdr->frame_control); 3078 head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN; 3079 tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN; 3080 3081 if (!is_multicast_ether_addr(hdr->addr1)) 3082 key_idx = peer->ucast_keyidx; 3083 else 3084 key_idx = peer->mcast_keyidx; 3085 3086 key_conf = peer->keys[key_idx]; 3087 3088 data = msdu->data + head_len; 3089 data_len = msdu->len - head_len - tail_len; 3090 key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; 3091 3092 ret = ath12k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic); 3093 if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN)) 3094 goto mic_fail; 3095 3096 return 0; 3097 3098 mic_fail: 3099 (ATH12K_SKB_RXCB(msdu))->is_first_msdu = true; 3100 (ATH12K_SKB_RXCB(msdu))->is_last_msdu = true; 3101 3102 ath12k_dp_rx_h_fetch_info(ab, rx_desc, &rx_info); 3103 3104 rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED | 3105 RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED; 3106 skb_pull(msdu, hal_rx_desc_sz); 3107 3108 if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, rx_desc, msdu))) 3109 return -EINVAL; 3110 3111 ath12k_dp_rx_h_ppdu(ar, &rx_info); 3112 ath12k_dp_rx_h_undecap(ar, msdu, rx_desc, 3113 HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true); 3114 ieee80211_rx(ath12k_ar_to_hw(ar), msdu); 3115 return -EINVAL; 3116 } 3117 3118 static void ath12k_dp_rx_h_undecap_frag(struct ath12k *ar, struct sk_buff *msdu, 3119 enum hal_encrypt_type enctype, u32 flags) 3120 { 3121 struct ieee80211_hdr *hdr; 3122 size_t hdr_len; 3123 size_t crypto_len; 3124 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 3125 3126 if (!flags) 3127 return; 3128 3129 hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz); 3130 3131 if (flags & RX_FLAG_MIC_STRIPPED) 3132 skb_trim(msdu, msdu->len - 3133 ath12k_dp_rx_crypto_mic_len(ar, enctype)); 3134 3135 if (flags & RX_FLAG_ICV_STRIPPED) 3136 skb_trim(msdu, msdu->len - 3137 ath12k_dp_rx_crypto_icv_len(ar, enctype)); 3138 3139 if (flags & RX_FLAG_IV_STRIPPED) { 3140 hdr_len = ieee80211_hdrlen(hdr->frame_control); 3141 crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype); 3142 3143 memmove(msdu->data + hal_rx_desc_sz + crypto_len, 3144 msdu->data + hal_rx_desc_sz, hdr_len); 3145 skb_pull(msdu, crypto_len); 3146 } 3147 } 3148 3149 static int ath12k_dp_rx_h_defrag(struct ath12k *ar, 3150 struct ath12k_peer *peer, 3151 struct ath12k_dp_rx_tid *rx_tid, 3152 struct sk_buff **defrag_skb) 3153 { 3154 struct ath12k_base *ab = ar->ab; 3155 struct hal_rx_desc *rx_desc; 3156 struct sk_buff *skb, *first_frag, *last_frag; 3157 struct ieee80211_hdr *hdr; 3158 enum hal_encrypt_type enctype; 3159 bool is_decrypted = false; 3160 int msdu_len = 0; 3161 int extra_space; 3162 u32 flags, hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 3163 3164 first_frag = skb_peek(&rx_tid->rx_frags); 3165 last_frag = skb_peek_tail(&rx_tid->rx_frags); 3166 3167 skb_queue_walk(&rx_tid->rx_frags, skb) { 3168 flags = 0; 3169 rx_desc = (struct hal_rx_desc *)skb->data; 3170 hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz); 3171 3172 enctype = ath12k_dp_rx_h_enctype(ab, rx_desc); 3173 if (enctype != HAL_ENCRYPT_TYPE_OPEN) 3174 is_decrypted = ath12k_dp_rx_h_is_decrypted(ab, 3175 rx_desc); 3176 3177 if (is_decrypted) { 3178 if (skb != first_frag) 3179 flags |= RX_FLAG_IV_STRIPPED; 3180 if (skb != last_frag) 3181 flags |= RX_FLAG_ICV_STRIPPED | 3182 RX_FLAG_MIC_STRIPPED; 3183 } 3184 3185 /* RX fragments are always raw packets */ 3186 if (skb != last_frag) 3187 skb_trim(skb, skb->len - FCS_LEN); 3188 ath12k_dp_rx_h_undecap_frag(ar, skb, enctype, flags); 3189 3190 if (skb != first_frag) 3191 skb_pull(skb, hal_rx_desc_sz + 3192 ieee80211_hdrlen(hdr->frame_control)); 3193 msdu_len += skb->len; 3194 } 3195 3196 extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag)); 3197 if (extra_space > 0 && 3198 (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0)) 3199 return -ENOMEM; 3200 3201 __skb_unlink(first_frag, &rx_tid->rx_frags); 3202 while ((skb = __skb_dequeue(&rx_tid->rx_frags))) { 3203 skb_put_data(first_frag, skb->data, skb->len); 3204 dev_kfree_skb_any(skb); 3205 } 3206 3207 hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz); 3208 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); 3209 ATH12K_SKB_RXCB(first_frag)->is_frag = 1; 3210 3211 if (ath12k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag)) 3212 first_frag = NULL; 3213 3214 *defrag_skb = first_frag; 3215 return 0; 3216 } 3217 3218 static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar, 3219 struct ath12k_dp_rx_tid *rx_tid, 3220 struct sk_buff *defrag_skb) 3221 { 3222 struct ath12k_base *ab = ar->ab; 3223 struct ath12k_dp *dp = &ab->dp; 3224 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data; 3225 struct hal_reo_entrance_ring *reo_ent_ring; 3226 struct hal_reo_dest_ring *reo_dest_ring; 3227 struct dp_link_desc_bank *link_desc_banks; 3228 struct hal_rx_msdu_link *msdu_link; 3229 struct hal_rx_msdu_details *msdu0; 3230 struct hal_srng *srng; 3231 dma_addr_t link_paddr, buf_paddr; 3232 u32 desc_bank, msdu_info, msdu_ext_info, mpdu_info; 3233 u32 cookie, hal_rx_desc_sz, dest_ring_info0, queue_addr_hi; 3234 int ret; 3235 struct ath12k_rx_desc_info *desc_info; 3236 enum hal_rx_buf_return_buf_manager idle_link_rbm = dp->idle_link_rbm; 3237 u8 dst_ind; 3238 3239 hal_rx_desc_sz = ab->hal.hal_desc_sz; 3240 link_desc_banks = dp->link_desc_banks; 3241 reo_dest_ring = rx_tid->dst_ring_desc; 3242 3243 ath12k_hal_rx_reo_ent_paddr_get(ab, &reo_dest_ring->buf_addr_info, 3244 &link_paddr, &cookie); 3245 desc_bank = u32_get_bits(cookie, DP_LINK_DESC_BANK_MASK); 3246 3247 msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr + 3248 (link_paddr - link_desc_banks[desc_bank].paddr)); 3249 msdu0 = &msdu_link->msdu_link[0]; 3250 msdu_ext_info = le32_to_cpu(msdu0->rx_msdu_ext_info.info0); 3251 dst_ind = u32_get_bits(msdu_ext_info, RX_MSDU_EXT_DESC_INFO0_REO_DEST_IND); 3252 3253 memset(msdu0, 0, sizeof(*msdu0)); 3254 3255 msdu_info = u32_encode_bits(1, RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU) | 3256 u32_encode_bits(1, RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU) | 3257 u32_encode_bits(0, RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) | 3258 u32_encode_bits(defrag_skb->len - hal_rx_desc_sz, 3259 RX_MSDU_DESC_INFO0_MSDU_LENGTH) | 3260 u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_SA) | 3261 u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_DA); 3262 msdu0->rx_msdu_info.info0 = cpu_to_le32(msdu_info); 3263 msdu0->rx_msdu_ext_info.info0 = cpu_to_le32(msdu_ext_info); 3264 3265 /* change msdu len in hal rx desc */ 3266 ath12k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz); 3267 3268 buf_paddr = dma_map_single(ab->dev, defrag_skb->data, 3269 defrag_skb->len + skb_tailroom(defrag_skb), 3270 DMA_TO_DEVICE); 3271 if (dma_mapping_error(ab->dev, buf_paddr)) 3272 return -ENOMEM; 3273 3274 spin_lock_bh(&dp->rx_desc_lock); 3275 desc_info = list_first_entry_or_null(&dp->rx_desc_free_list, 3276 struct ath12k_rx_desc_info, 3277 list); 3278 if (!desc_info) { 3279 spin_unlock_bh(&dp->rx_desc_lock); 3280 ath12k_warn(ab, "failed to find rx desc for reinject\n"); 3281 ret = -ENOMEM; 3282 goto err_unmap_dma; 3283 } 3284 3285 desc_info->skb = defrag_skb; 3286 desc_info->in_use = true; 3287 3288 list_del(&desc_info->list); 3289 spin_unlock_bh(&dp->rx_desc_lock); 3290 3291 ATH12K_SKB_RXCB(defrag_skb)->paddr = buf_paddr; 3292 3293 ath12k_hal_rx_buf_addr_info_set(&msdu0->buf_addr_info, buf_paddr, 3294 desc_info->cookie, 3295 HAL_RX_BUF_RBM_SW3_BM); 3296 3297 /* Fill mpdu details into reo entrance ring */ 3298 srng = &ab->hal.srng_list[dp->reo_reinject_ring.ring_id]; 3299 3300 spin_lock_bh(&srng->lock); 3301 ath12k_hal_srng_access_begin(ab, srng); 3302 3303 reo_ent_ring = ath12k_hal_srng_src_get_next_entry(ab, srng); 3304 if (!reo_ent_ring) { 3305 ath12k_hal_srng_access_end(ab, srng); 3306 spin_unlock_bh(&srng->lock); 3307 ret = -ENOSPC; 3308 goto err_free_desc; 3309 } 3310 memset(reo_ent_ring, 0, sizeof(*reo_ent_ring)); 3311 3312 ath12k_hal_rx_buf_addr_info_set(&reo_ent_ring->buf_addr_info, link_paddr, 3313 cookie, 3314 idle_link_rbm); 3315 3316 mpdu_info = u32_encode_bits(1, RX_MPDU_DESC_INFO0_MSDU_COUNT) | 3317 u32_encode_bits(0, RX_MPDU_DESC_INFO0_FRAG_FLAG) | 3318 u32_encode_bits(1, RX_MPDU_DESC_INFO0_RAW_MPDU) | 3319 u32_encode_bits(1, RX_MPDU_DESC_INFO0_VALID_PN) | 3320 u32_encode_bits(rx_tid->tid, RX_MPDU_DESC_INFO0_TID); 3321 3322 reo_ent_ring->rx_mpdu_info.info0 = cpu_to_le32(mpdu_info); 3323 reo_ent_ring->rx_mpdu_info.peer_meta_data = 3324 reo_dest_ring->rx_mpdu_info.peer_meta_data; 3325 3326 if (ab->hw_params->reoq_lut_support) { 3327 reo_ent_ring->queue_addr_lo = reo_dest_ring->rx_mpdu_info.peer_meta_data; 3328 queue_addr_hi = 0; 3329 } else { 3330 reo_ent_ring->queue_addr_lo = 3331 cpu_to_le32(lower_32_bits(rx_tid->qbuf.paddr_aligned)); 3332 queue_addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); 3333 } 3334 3335 reo_ent_ring->info0 = le32_encode_bits(queue_addr_hi, 3336 HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI) | 3337 le32_encode_bits(dst_ind, 3338 HAL_REO_ENTR_RING_INFO0_DEST_IND); 3339 3340 reo_ent_ring->info1 = le32_encode_bits(rx_tid->cur_sn, 3341 HAL_REO_ENTR_RING_INFO1_MPDU_SEQ_NUM); 3342 dest_ring_info0 = le32_get_bits(reo_dest_ring->info0, 3343 HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); 3344 reo_ent_ring->info2 = 3345 cpu_to_le32(u32_get_bits(dest_ring_info0, 3346 HAL_REO_ENTR_RING_INFO2_SRC_LINK_ID)); 3347 3348 ath12k_hal_srng_access_end(ab, srng); 3349 spin_unlock_bh(&srng->lock); 3350 3351 return 0; 3352 3353 err_free_desc: 3354 spin_lock_bh(&dp->rx_desc_lock); 3355 desc_info->in_use = false; 3356 desc_info->skb = NULL; 3357 list_add_tail(&desc_info->list, &dp->rx_desc_free_list); 3358 spin_unlock_bh(&dp->rx_desc_lock); 3359 err_unmap_dma: 3360 dma_unmap_single(ab->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb), 3361 DMA_TO_DEVICE); 3362 return ret; 3363 } 3364 3365 static int ath12k_dp_rx_h_cmp_frags(struct ath12k_base *ab, 3366 struct sk_buff *a, struct sk_buff *b) 3367 { 3368 int frag1, frag2; 3369 3370 frag1 = ath12k_dp_rx_h_frag_no(ab, a); 3371 frag2 = ath12k_dp_rx_h_frag_no(ab, b); 3372 3373 return frag1 - frag2; 3374 } 3375 3376 static void ath12k_dp_rx_h_sort_frags(struct ath12k_base *ab, 3377 struct sk_buff_head *frag_list, 3378 struct sk_buff *cur_frag) 3379 { 3380 struct sk_buff *skb; 3381 int cmp; 3382 3383 skb_queue_walk(frag_list, skb) { 3384 cmp = ath12k_dp_rx_h_cmp_frags(ab, skb, cur_frag); 3385 if (cmp < 0) 3386 continue; 3387 __skb_queue_before(frag_list, skb, cur_frag); 3388 return; 3389 } 3390 __skb_queue_tail(frag_list, cur_frag); 3391 } 3392 3393 static u64 ath12k_dp_rx_h_get_pn(struct ath12k *ar, struct sk_buff *skb) 3394 { 3395 struct ieee80211_hdr *hdr; 3396 u64 pn = 0; 3397 u8 *ehdr; 3398 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 3399 3400 hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz); 3401 ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control); 3402 3403 pn = ehdr[0]; 3404 pn |= (u64)ehdr[1] << 8; 3405 pn |= (u64)ehdr[4] << 16; 3406 pn |= (u64)ehdr[5] << 24; 3407 pn |= (u64)ehdr[6] << 32; 3408 pn |= (u64)ehdr[7] << 40; 3409 3410 return pn; 3411 } 3412 3413 static bool 3414 ath12k_dp_rx_h_defrag_validate_incr_pn(struct ath12k *ar, struct ath12k_dp_rx_tid *rx_tid) 3415 { 3416 struct ath12k_base *ab = ar->ab; 3417 enum hal_encrypt_type encrypt_type; 3418 struct sk_buff *first_frag, *skb; 3419 struct hal_rx_desc *desc; 3420 u64 last_pn; 3421 u64 cur_pn; 3422 3423 first_frag = skb_peek(&rx_tid->rx_frags); 3424 desc = (struct hal_rx_desc *)first_frag->data; 3425 3426 encrypt_type = ath12k_dp_rx_h_enctype(ab, desc); 3427 if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 && 3428 encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 && 3429 encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 && 3430 encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256) 3431 return true; 3432 3433 last_pn = ath12k_dp_rx_h_get_pn(ar, first_frag); 3434 skb_queue_walk(&rx_tid->rx_frags, skb) { 3435 if (skb == first_frag) 3436 continue; 3437 3438 cur_pn = ath12k_dp_rx_h_get_pn(ar, skb); 3439 if (cur_pn != last_pn + 1) 3440 return false; 3441 last_pn = cur_pn; 3442 } 3443 return true; 3444 } 3445 3446 static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar, 3447 struct sk_buff *msdu, 3448 struct hal_reo_dest_ring *ring_desc) 3449 { 3450 struct ath12k_base *ab = ar->ab; 3451 struct hal_rx_desc *rx_desc; 3452 struct ath12k_peer *peer; 3453 struct ath12k_dp_rx_tid *rx_tid; 3454 struct sk_buff *defrag_skb = NULL; 3455 u32 peer_id; 3456 u16 seqno, frag_no; 3457 u8 tid; 3458 int ret = 0; 3459 bool more_frags; 3460 3461 rx_desc = (struct hal_rx_desc *)msdu->data; 3462 peer_id = ath12k_dp_rx_h_peer_id(ab, rx_desc); 3463 tid = ath12k_dp_rx_h_tid(ab, rx_desc); 3464 seqno = ath12k_dp_rx_h_seq_no(ab, rx_desc); 3465 frag_no = ath12k_dp_rx_h_frag_no(ab, msdu); 3466 more_frags = ath12k_dp_rx_h_more_frags(ab, msdu); 3467 3468 if (!ath12k_dp_rx_h_seq_ctrl_valid(ab, rx_desc) || 3469 !ath12k_dp_rx_h_fc_valid(ab, rx_desc) || 3470 tid > IEEE80211_NUM_TIDS) 3471 return -EINVAL; 3472 3473 /* received unfragmented packet in reo 3474 * exception ring, this shouldn't happen 3475 * as these packets typically come from 3476 * reo2sw srngs. 3477 */ 3478 if (WARN_ON_ONCE(!frag_no && !more_frags)) 3479 return -EINVAL; 3480 3481 spin_lock_bh(&ab->base_lock); 3482 peer = ath12k_peer_find_by_id(ab, peer_id); 3483 if (!peer) { 3484 ath12k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n", 3485 peer_id); 3486 ret = -ENOENT; 3487 goto out_unlock; 3488 } 3489 3490 if (!peer->dp_setup_done) { 3491 ath12k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n", 3492 peer->addr, peer_id); 3493 ret = -ENOENT; 3494 goto out_unlock; 3495 } 3496 3497 rx_tid = &peer->rx_tid[tid]; 3498 3499 if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) || 3500 skb_queue_empty(&rx_tid->rx_frags)) { 3501 /* Flush stored fragments and start a new sequence */ 3502 ath12k_dp_rx_frags_cleanup(rx_tid, true); 3503 rx_tid->cur_sn = seqno; 3504 } 3505 3506 if (rx_tid->rx_frag_bitmap & BIT(frag_no)) { 3507 /* Fragment already present */ 3508 ret = -EINVAL; 3509 goto out_unlock; 3510 } 3511 3512 if ((!rx_tid->rx_frag_bitmap || frag_no > __fls(rx_tid->rx_frag_bitmap))) 3513 __skb_queue_tail(&rx_tid->rx_frags, msdu); 3514 else 3515 ath12k_dp_rx_h_sort_frags(ab, &rx_tid->rx_frags, msdu); 3516 3517 rx_tid->rx_frag_bitmap |= BIT(frag_no); 3518 if (!more_frags) 3519 rx_tid->last_frag_no = frag_no; 3520 3521 if (frag_no == 0) { 3522 rx_tid->dst_ring_desc = kmemdup(ring_desc, 3523 sizeof(*rx_tid->dst_ring_desc), 3524 GFP_ATOMIC); 3525 if (!rx_tid->dst_ring_desc) { 3526 ret = -ENOMEM; 3527 goto out_unlock; 3528 } 3529 } else { 3530 ath12k_dp_rx_link_desc_return(ab, &ring_desc->buf_addr_info, 3531 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3532 } 3533 3534 if (!rx_tid->last_frag_no || 3535 rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) { 3536 mod_timer(&rx_tid->frag_timer, jiffies + 3537 ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS); 3538 goto out_unlock; 3539 } 3540 3541 spin_unlock_bh(&ab->base_lock); 3542 timer_delete_sync(&rx_tid->frag_timer); 3543 spin_lock_bh(&ab->base_lock); 3544 3545 peer = ath12k_peer_find_by_id(ab, peer_id); 3546 if (!peer) 3547 goto err_frags_cleanup; 3548 3549 if (!ath12k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid)) 3550 goto err_frags_cleanup; 3551 3552 if (ath12k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb)) 3553 goto err_frags_cleanup; 3554 3555 if (!defrag_skb) 3556 goto err_frags_cleanup; 3557 3558 if (ath12k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb)) 3559 goto err_frags_cleanup; 3560 3561 ath12k_dp_rx_frags_cleanup(rx_tid, false); 3562 goto out_unlock; 3563 3564 err_frags_cleanup: 3565 dev_kfree_skb_any(defrag_skb); 3566 ath12k_dp_rx_frags_cleanup(rx_tid, true); 3567 out_unlock: 3568 spin_unlock_bh(&ab->base_lock); 3569 return ret; 3570 } 3571 3572 static int 3573 ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc, 3574 struct list_head *used_list, 3575 bool drop, u32 cookie) 3576 { 3577 struct ath12k_base *ab = ar->ab; 3578 struct sk_buff *msdu; 3579 struct ath12k_skb_rxcb *rxcb; 3580 struct hal_rx_desc *rx_desc; 3581 u16 msdu_len; 3582 u32 hal_rx_desc_sz = ab->hal.hal_desc_sz; 3583 struct ath12k_rx_desc_info *desc_info; 3584 u64 desc_va; 3585 3586 desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 | 3587 le32_to_cpu(desc->buf_va_lo)); 3588 desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va); 3589 3590 /* retry manual desc retrieval */ 3591 if (!desc_info) { 3592 desc_info = ath12k_dp_get_rx_desc(ab, cookie); 3593 if (!desc_info) { 3594 ath12k_warn(ab, "Invalid cookie in DP rx error descriptor retrieval: 0x%x\n", 3595 cookie); 3596 return -EINVAL; 3597 } 3598 } 3599 3600 if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC) 3601 ath12k_warn(ab, " RX Exception, Check HW CC implementation"); 3602 3603 msdu = desc_info->skb; 3604 desc_info->skb = NULL; 3605 3606 list_add_tail(&desc_info->list, used_list); 3607 3608 rxcb = ATH12K_SKB_RXCB(msdu); 3609 dma_unmap_single(ar->ab->dev, rxcb->paddr, 3610 msdu->len + skb_tailroom(msdu), 3611 DMA_FROM_DEVICE); 3612 3613 if (drop) { 3614 dev_kfree_skb_any(msdu); 3615 return 0; 3616 } 3617 3618 rcu_read_lock(); 3619 if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) { 3620 dev_kfree_skb_any(msdu); 3621 goto exit; 3622 } 3623 3624 if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) { 3625 dev_kfree_skb_any(msdu); 3626 goto exit; 3627 } 3628 3629 rx_desc = (struct hal_rx_desc *)msdu->data; 3630 msdu_len = ath12k_dp_rx_h_msdu_len(ar->ab, rx_desc); 3631 if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) { 3632 ath12k_warn(ar->ab, "invalid msdu leng %u", msdu_len); 3633 ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "", rx_desc, 3634 sizeof(*rx_desc)); 3635 dev_kfree_skb_any(msdu); 3636 goto exit; 3637 } 3638 3639 skb_put(msdu, hal_rx_desc_sz + msdu_len); 3640 3641 if (ath12k_dp_rx_frag_h_mpdu(ar, msdu, desc)) { 3642 dev_kfree_skb_any(msdu); 3643 ath12k_dp_rx_link_desc_return(ar->ab, &desc->buf_addr_info, 3644 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3645 } 3646 exit: 3647 rcu_read_unlock(); 3648 return 0; 3649 } 3650 3651 int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi, 3652 int budget) 3653 { 3654 struct ath12k_hw_group *ag = ab->ag; 3655 struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES]; 3656 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; 3657 int num_buffs_reaped[ATH12K_MAX_DEVICES] = {}; 3658 struct dp_link_desc_bank *link_desc_banks; 3659 enum hal_rx_buf_return_buf_manager rbm; 3660 struct hal_rx_msdu_link *link_desc_va; 3661 int tot_n_bufs_reaped, quota, ret, i; 3662 struct hal_reo_dest_ring *reo_desc; 3663 struct dp_rxdma_ring *rx_ring; 3664 struct dp_srng *reo_except; 3665 struct ath12k_hw_link *hw_links = ag->hw_links; 3666 struct ath12k_base *partner_ab; 3667 u8 hw_link_id, device_id; 3668 u32 desc_bank, num_msdus; 3669 struct hal_srng *srng; 3670 struct ath12k *ar; 3671 dma_addr_t paddr; 3672 bool is_frag; 3673 bool drop; 3674 int pdev_id; 3675 3676 tot_n_bufs_reaped = 0; 3677 quota = budget; 3678 3679 for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) 3680 INIT_LIST_HEAD(&rx_desc_used_list[device_id]); 3681 3682 reo_except = &ab->dp.reo_except_ring; 3683 3684 srng = &ab->hal.srng_list[reo_except->ring_id]; 3685 3686 spin_lock_bh(&srng->lock); 3687 3688 ath12k_hal_srng_access_begin(ab, srng); 3689 3690 while (budget && 3691 (reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) { 3692 drop = false; 3693 ab->device_stats.err_ring_pkts++; 3694 3695 ret = ath12k_hal_desc_reo_parse_err(ab, reo_desc, &paddr, 3696 &desc_bank); 3697 if (ret) { 3698 ath12k_warn(ab, "failed to parse error reo desc %d\n", 3699 ret); 3700 continue; 3701 } 3702 3703 hw_link_id = le32_get_bits(reo_desc->info0, 3704 HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); 3705 device_id = hw_links[hw_link_id].device_id; 3706 partner_ab = ath12k_ag_to_ab(ag, device_id); 3707 3708 pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params, 3709 hw_links[hw_link_id].pdev_idx); 3710 ar = partner_ab->pdevs[pdev_id].ar; 3711 3712 link_desc_banks = partner_ab->dp.link_desc_banks; 3713 link_desc_va = link_desc_banks[desc_bank].vaddr + 3714 (paddr - link_desc_banks[desc_bank].paddr); 3715 ath12k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies, 3716 &rbm); 3717 if (rbm != partner_ab->dp.idle_link_rbm && 3718 rbm != HAL_RX_BUF_RBM_SW3_BM && 3719 rbm != partner_ab->hw_params->hal_params->rx_buf_rbm) { 3720 ab->device_stats.invalid_rbm++; 3721 ath12k_warn(ab, "invalid return buffer manager %d\n", rbm); 3722 ath12k_dp_rx_link_desc_return(partner_ab, 3723 &reo_desc->buf_addr_info, 3724 HAL_WBM_REL_BM_ACT_REL_MSDU); 3725 continue; 3726 } 3727 3728 is_frag = !!(le32_to_cpu(reo_desc->rx_mpdu_info.info0) & 3729 RX_MPDU_DESC_INFO0_FRAG_FLAG); 3730 3731 /* Process only rx fragments with one msdu per link desc below, and drop 3732 * msdu's indicated due to error reasons. 3733 * Dynamic fragmentation not supported in Multi-link client, so drop the 3734 * partner device buffers. 3735 */ 3736 if (!is_frag || num_msdus > 1 || 3737 partner_ab->device_id != ab->device_id) { 3738 drop = true; 3739 3740 /* Return the link desc back to wbm idle list */ 3741 ath12k_dp_rx_link_desc_return(partner_ab, 3742 &reo_desc->buf_addr_info, 3743 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3744 } 3745 3746 for (i = 0; i < num_msdus; i++) { 3747 if (!ath12k_dp_process_rx_err_buf(ar, reo_desc, 3748 &rx_desc_used_list[device_id], 3749 drop, 3750 msdu_cookies[i])) { 3751 num_buffs_reaped[device_id]++; 3752 tot_n_bufs_reaped++; 3753 } 3754 } 3755 3756 if (tot_n_bufs_reaped >= quota) { 3757 tot_n_bufs_reaped = quota; 3758 goto exit; 3759 } 3760 3761 budget = quota - tot_n_bufs_reaped; 3762 } 3763 3764 exit: 3765 ath12k_hal_srng_access_end(ab, srng); 3766 3767 spin_unlock_bh(&srng->lock); 3768 3769 for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) { 3770 if (!num_buffs_reaped[device_id]) 3771 continue; 3772 3773 partner_ab = ath12k_ag_to_ab(ag, device_id); 3774 rx_ring = &partner_ab->dp.rx_refill_buf_ring; 3775 3776 ath12k_dp_rx_bufs_replenish(partner_ab, rx_ring, 3777 &rx_desc_used_list[device_id], 3778 num_buffs_reaped[device_id]); 3779 } 3780 3781 return tot_n_bufs_reaped; 3782 } 3783 3784 static void ath12k_dp_rx_null_q_desc_sg_drop(struct ath12k *ar, 3785 int msdu_len, 3786 struct sk_buff_head *msdu_list) 3787 { 3788 struct sk_buff *skb, *tmp; 3789 struct ath12k_skb_rxcb *rxcb; 3790 int n_buffs; 3791 3792 n_buffs = DIV_ROUND_UP(msdu_len, 3793 (DP_RX_BUFFER_SIZE - ar->ab->hal.hal_desc_sz)); 3794 3795 skb_queue_walk_safe(msdu_list, skb, tmp) { 3796 rxcb = ATH12K_SKB_RXCB(skb); 3797 if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO && 3798 rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) { 3799 if (!n_buffs) 3800 break; 3801 __skb_unlink(skb, msdu_list); 3802 dev_kfree_skb_any(skb); 3803 n_buffs--; 3804 } 3805 } 3806 } 3807 3808 static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu, 3809 struct ath12k_dp_rx_info *rx_info, 3810 struct sk_buff_head *msdu_list) 3811 { 3812 struct ath12k_base *ab = ar->ab; 3813 u16 msdu_len; 3814 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 3815 u8 l3pad_bytes; 3816 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 3817 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 3818 3819 msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc); 3820 3821 if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) { 3822 /* First buffer will be freed by the caller, so deduct it's length */ 3823 msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz); 3824 ath12k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list); 3825 return -EINVAL; 3826 } 3827 3828 /* Even after cleaning up the sg buffers in the msdu list with above check 3829 * any msdu received with continuation flag needs to be dropped as invalid. 3830 * This protects against some random err frame with continuation flag. 3831 */ 3832 if (rxcb->is_continuation) 3833 return -EINVAL; 3834 3835 if (!ath12k_dp_rx_h_msdu_done(ab, desc)) { 3836 ath12k_warn(ar->ab, 3837 "msdu_done bit not set in null_q_des processing\n"); 3838 __skb_queue_purge(msdu_list); 3839 return -EIO; 3840 } 3841 3842 /* Handle NULL queue descriptor violations arising out a missing 3843 * REO queue for a given peer or a given TID. This typically 3844 * may happen if a packet is received on a QOS enabled TID before the 3845 * ADDBA negotiation for that TID, when the TID queue is setup. Or 3846 * it may also happen for MC/BC frames if they are not routed to the 3847 * non-QOS TID queue, in the absence of any other default TID queue. 3848 * This error can show up both in a REO destination or WBM release ring. 3849 */ 3850 3851 if (rxcb->is_frag) { 3852 skb_pull(msdu, hal_rx_desc_sz); 3853 } else { 3854 l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc); 3855 3856 if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) 3857 return -EINVAL; 3858 3859 skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); 3860 skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); 3861 } 3862 if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu))) 3863 return -EINVAL; 3864 3865 ath12k_dp_rx_h_fetch_info(ab, desc, rx_info); 3866 ath12k_dp_rx_h_ppdu(ar, rx_info); 3867 ath12k_dp_rx_h_mpdu(ar, msdu, desc, rx_info); 3868 3869 rxcb->tid = rx_info->tid; 3870 3871 /* Please note that caller will having the access to msdu and completing 3872 * rx with mac80211. Need not worry about cleaning up amsdu_list. 3873 */ 3874 3875 return 0; 3876 } 3877 3878 static bool ath12k_dp_rx_h_reo_err(struct ath12k *ar, struct sk_buff *msdu, 3879 struct ath12k_dp_rx_info *rx_info, 3880 struct sk_buff_head *msdu_list) 3881 { 3882 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 3883 bool drop = false; 3884 3885 ar->ab->device_stats.reo_error[rxcb->err_code]++; 3886 3887 switch (rxcb->err_code) { 3888 case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO: 3889 if (ath12k_dp_rx_h_null_q_desc(ar, msdu, rx_info, msdu_list)) 3890 drop = true; 3891 break; 3892 case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED: 3893 /* TODO: Do not drop PN failed packets in the driver; 3894 * instead, it is good to drop such packets in mac80211 3895 * after incrementing the replay counters. 3896 */ 3897 fallthrough; 3898 default: 3899 /* TODO: Review other errors and process them to mac80211 3900 * as appropriate. 3901 */ 3902 drop = true; 3903 break; 3904 } 3905 3906 return drop; 3907 } 3908 3909 static bool ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu, 3910 struct ath12k_dp_rx_info *rx_info) 3911 { 3912 struct ath12k_base *ab = ar->ab; 3913 u16 msdu_len; 3914 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 3915 u8 l3pad_bytes; 3916 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 3917 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 3918 3919 rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, desc); 3920 rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, desc); 3921 3922 l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc); 3923 msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc); 3924 3925 if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) { 3926 ath12k_dbg(ab, ATH12K_DBG_DATA, 3927 "invalid msdu len in tkip mic err %u\n", msdu_len); 3928 ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", desc, 3929 sizeof(*desc)); 3930 return true; 3931 } 3932 3933 skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); 3934 skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); 3935 3936 if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu))) 3937 return true; 3938 3939 ath12k_dp_rx_h_ppdu(ar, rx_info); 3940 3941 rx_info->rx_status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR | 3942 RX_FLAG_DECRYPTED); 3943 3944 ath12k_dp_rx_h_undecap(ar, msdu, desc, 3945 HAL_ENCRYPT_TYPE_TKIP_MIC, rx_info->rx_status, false); 3946 return false; 3947 } 3948 3949 static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar, struct sk_buff *msdu, 3950 struct ath12k_dp_rx_info *rx_info) 3951 { 3952 struct ath12k_base *ab = ar->ab; 3953 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 3954 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data; 3955 bool drop = false; 3956 u32 err_bitmap; 3957 3958 ar->ab->device_stats.rxdma_error[rxcb->err_code]++; 3959 3960 switch (rxcb->err_code) { 3961 case HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR: 3962 case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR: 3963 err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc); 3964 if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) { 3965 ath12k_dp_rx_h_fetch_info(ab, rx_desc, rx_info); 3966 drop = ath12k_dp_rx_h_tkip_mic_err(ar, msdu, rx_info); 3967 break; 3968 } 3969 fallthrough; 3970 default: 3971 /* TODO: Review other rxdma error code to check if anything is 3972 * worth reporting to mac80211 3973 */ 3974 drop = true; 3975 break; 3976 } 3977 3978 return drop; 3979 } 3980 3981 static void ath12k_dp_rx_wbm_err(struct ath12k *ar, 3982 struct napi_struct *napi, 3983 struct sk_buff *msdu, 3984 struct sk_buff_head *msdu_list) 3985 { 3986 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 3987 struct ieee80211_rx_status rxs = {0}; 3988 struct ath12k_dp_rx_info rx_info; 3989 bool drop = true; 3990 3991 rx_info.addr2_present = false; 3992 rx_info.rx_status = &rxs; 3993 3994 switch (rxcb->err_rel_src) { 3995 case HAL_WBM_REL_SRC_MODULE_REO: 3996 drop = ath12k_dp_rx_h_reo_err(ar, msdu, &rx_info, msdu_list); 3997 break; 3998 case HAL_WBM_REL_SRC_MODULE_RXDMA: 3999 drop = ath12k_dp_rx_h_rxdma_err(ar, msdu, &rx_info); 4000 break; 4001 default: 4002 /* msdu will get freed */ 4003 break; 4004 } 4005 4006 if (drop) { 4007 dev_kfree_skb_any(msdu); 4008 return; 4009 } 4010 4011 ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_info); 4012 } 4013 4014 int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab, 4015 struct napi_struct *napi, int budget) 4016 { 4017 struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES]; 4018 struct ath12k_hw_group *ag = ab->ag; 4019 struct ath12k *ar; 4020 struct ath12k_dp *dp = &ab->dp; 4021 struct dp_rxdma_ring *rx_ring; 4022 struct hal_rx_wbm_rel_info err_info; 4023 struct hal_srng *srng; 4024 struct sk_buff *msdu; 4025 struct sk_buff_head msdu_list, scatter_msdu_list; 4026 struct ath12k_skb_rxcb *rxcb; 4027 void *rx_desc; 4028 int num_buffs_reaped[ATH12K_MAX_DEVICES] = {}; 4029 int total_num_buffs_reaped = 0; 4030 struct ath12k_rx_desc_info *desc_info; 4031 struct ath12k_device_dp_stats *device_stats = &ab->device_stats; 4032 struct ath12k_hw_link *hw_links = ag->hw_links; 4033 struct ath12k_base *partner_ab; 4034 u8 hw_link_id, device_id; 4035 int ret, pdev_id; 4036 struct hal_rx_desc *msdu_data; 4037 4038 __skb_queue_head_init(&msdu_list); 4039 __skb_queue_head_init(&scatter_msdu_list); 4040 4041 for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) 4042 INIT_LIST_HEAD(&rx_desc_used_list[device_id]); 4043 4044 srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id]; 4045 spin_lock_bh(&srng->lock); 4046 4047 ath12k_hal_srng_access_begin(ab, srng); 4048 4049 while (budget) { 4050 rx_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng); 4051 if (!rx_desc) 4052 break; 4053 4054 ret = ath12k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info); 4055 if (ret) { 4056 ath12k_warn(ab, 4057 "failed to parse rx error in wbm_rel ring desc %d\n", 4058 ret); 4059 continue; 4060 } 4061 4062 desc_info = err_info.rx_desc; 4063 4064 /* retry manual desc retrieval if hw cc is not done */ 4065 if (!desc_info) { 4066 desc_info = ath12k_dp_get_rx_desc(ab, err_info.cookie); 4067 if (!desc_info) { 4068 ath12k_warn(ab, "Invalid cookie in DP WBM rx error descriptor retrieval: 0x%x\n", 4069 err_info.cookie); 4070 continue; 4071 } 4072 } 4073 4074 if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC) 4075 ath12k_warn(ab, "WBM RX err, Check HW CC implementation"); 4076 4077 msdu = desc_info->skb; 4078 desc_info->skb = NULL; 4079 4080 device_id = desc_info->device_id; 4081 partner_ab = ath12k_ag_to_ab(ag, device_id); 4082 if (unlikely(!partner_ab)) { 4083 dev_kfree_skb_any(msdu); 4084 4085 /* In any case continuation bit is set 4086 * in the previous record, cleanup scatter_msdu_list 4087 */ 4088 ath12k_dp_clean_up_skb_list(&scatter_msdu_list); 4089 continue; 4090 } 4091 4092 list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]); 4093 4094 rxcb = ATH12K_SKB_RXCB(msdu); 4095 dma_unmap_single(partner_ab->dev, rxcb->paddr, 4096 msdu->len + skb_tailroom(msdu), 4097 DMA_FROM_DEVICE); 4098 4099 num_buffs_reaped[device_id]++; 4100 total_num_buffs_reaped++; 4101 4102 if (!err_info.continuation) 4103 budget--; 4104 4105 if (err_info.push_reason != 4106 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { 4107 dev_kfree_skb_any(msdu); 4108 continue; 4109 } 4110 4111 msdu_data = (struct hal_rx_desc *)msdu->data; 4112 rxcb->err_rel_src = err_info.err_rel_src; 4113 rxcb->err_code = err_info.err_code; 4114 rxcb->is_first_msdu = err_info.first_msdu; 4115 rxcb->is_last_msdu = err_info.last_msdu; 4116 rxcb->is_continuation = err_info.continuation; 4117 rxcb->rx_desc = msdu_data; 4118 4119 if (err_info.continuation) { 4120 __skb_queue_tail(&scatter_msdu_list, msdu); 4121 continue; 4122 } 4123 4124 hw_link_id = ath12k_dp_rx_get_msdu_src_link(partner_ab, 4125 msdu_data); 4126 if (hw_link_id >= ATH12K_GROUP_MAX_RADIO) { 4127 dev_kfree_skb_any(msdu); 4128 4129 /* In any case continuation bit is set 4130 * in the previous record, cleanup scatter_msdu_list 4131 */ 4132 ath12k_dp_clean_up_skb_list(&scatter_msdu_list); 4133 continue; 4134 } 4135 4136 if (!skb_queue_empty(&scatter_msdu_list)) { 4137 struct sk_buff *msdu; 4138 4139 skb_queue_walk(&scatter_msdu_list, msdu) { 4140 rxcb = ATH12K_SKB_RXCB(msdu); 4141 rxcb->hw_link_id = hw_link_id; 4142 } 4143 4144 skb_queue_splice_tail_init(&scatter_msdu_list, 4145 &msdu_list); 4146 } 4147 4148 rxcb = ATH12K_SKB_RXCB(msdu); 4149 rxcb->hw_link_id = hw_link_id; 4150 __skb_queue_tail(&msdu_list, msdu); 4151 } 4152 4153 /* In any case continuation bit is set in the 4154 * last record, cleanup scatter_msdu_list 4155 */ 4156 ath12k_dp_clean_up_skb_list(&scatter_msdu_list); 4157 4158 ath12k_hal_srng_access_end(ab, srng); 4159 4160 spin_unlock_bh(&srng->lock); 4161 4162 if (!total_num_buffs_reaped) 4163 goto done; 4164 4165 for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) { 4166 if (!num_buffs_reaped[device_id]) 4167 continue; 4168 4169 partner_ab = ath12k_ag_to_ab(ag, device_id); 4170 rx_ring = &partner_ab->dp.rx_refill_buf_ring; 4171 4172 ath12k_dp_rx_bufs_replenish(ab, rx_ring, 4173 &rx_desc_used_list[device_id], 4174 num_buffs_reaped[device_id]); 4175 } 4176 4177 rcu_read_lock(); 4178 while ((msdu = __skb_dequeue(&msdu_list))) { 4179 rxcb = ATH12K_SKB_RXCB(msdu); 4180 hw_link_id = rxcb->hw_link_id; 4181 4182 device_id = hw_links[hw_link_id].device_id; 4183 partner_ab = ath12k_ag_to_ab(ag, device_id); 4184 if (unlikely(!partner_ab)) { 4185 ath12k_dbg(ab, ATH12K_DBG_DATA, 4186 "Unable to process WBM error msdu due to invalid hw link id %d device id %d\n", 4187 hw_link_id, device_id); 4188 dev_kfree_skb_any(msdu); 4189 continue; 4190 } 4191 4192 pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params, 4193 hw_links[hw_link_id].pdev_idx); 4194 ar = partner_ab->pdevs[pdev_id].ar; 4195 4196 if (!ar || !rcu_dereference(ar->ab->pdevs_active[pdev_id])) { 4197 dev_kfree_skb_any(msdu); 4198 continue; 4199 } 4200 4201 if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) { 4202 dev_kfree_skb_any(msdu); 4203 continue; 4204 } 4205 4206 if (rxcb->err_rel_src < HAL_WBM_REL_SRC_MODULE_MAX) { 4207 device_id = ar->ab->device_id; 4208 device_stats->rx_wbm_rel_source[rxcb->err_rel_src][device_id]++; 4209 } 4210 4211 ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list); 4212 } 4213 rcu_read_unlock(); 4214 done: 4215 return total_num_buffs_reaped; 4216 } 4217 4218 void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab) 4219 { 4220 struct ath12k_dp *dp = &ab->dp; 4221 struct hal_tlv_64_hdr *hdr; 4222 struct hal_srng *srng; 4223 struct ath12k_dp_rx_reo_cmd *cmd, *tmp; 4224 bool found = false; 4225 u16 tag; 4226 struct hal_reo_status reo_status; 4227 4228 srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id]; 4229 4230 memset(&reo_status, 0, sizeof(reo_status)); 4231 4232 spin_lock_bh(&srng->lock); 4233 4234 ath12k_hal_srng_access_begin(ab, srng); 4235 4236 while ((hdr = ath12k_hal_srng_dst_get_next_entry(ab, srng))) { 4237 tag = le64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG); 4238 4239 switch (tag) { 4240 case HAL_REO_GET_QUEUE_STATS_STATUS: 4241 ath12k_hal_reo_status_queue_stats(ab, hdr, 4242 &reo_status); 4243 break; 4244 case HAL_REO_FLUSH_QUEUE_STATUS: 4245 ath12k_hal_reo_flush_queue_status(ab, hdr, 4246 &reo_status); 4247 break; 4248 case HAL_REO_FLUSH_CACHE_STATUS: 4249 ath12k_hal_reo_flush_cache_status(ab, hdr, 4250 &reo_status); 4251 break; 4252 case HAL_REO_UNBLOCK_CACHE_STATUS: 4253 ath12k_hal_reo_unblk_cache_status(ab, hdr, 4254 &reo_status); 4255 break; 4256 case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS: 4257 ath12k_hal_reo_flush_timeout_list_status(ab, hdr, 4258 &reo_status); 4259 break; 4260 case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS: 4261 ath12k_hal_reo_desc_thresh_reached_status(ab, hdr, 4262 &reo_status); 4263 break; 4264 case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS: 4265 ath12k_hal_reo_update_rx_reo_queue_status(ab, hdr, 4266 &reo_status); 4267 break; 4268 default: 4269 ath12k_warn(ab, "Unknown reo status type %d\n", tag); 4270 continue; 4271 } 4272 4273 spin_lock_bh(&dp->reo_cmd_lock); 4274 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 4275 if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) { 4276 found = true; 4277 list_del(&cmd->list); 4278 break; 4279 } 4280 } 4281 spin_unlock_bh(&dp->reo_cmd_lock); 4282 4283 if (found) { 4284 cmd->handler(dp, (void *)&cmd->data, 4285 reo_status.uniform_hdr.cmd_status); 4286 kfree(cmd); 4287 } 4288 4289 found = false; 4290 } 4291 4292 ath12k_hal_srng_access_end(ab, srng); 4293 4294 spin_unlock_bh(&srng->lock); 4295 } 4296 4297 void ath12k_dp_rx_free(struct ath12k_base *ab) 4298 { 4299 struct ath12k_dp *dp = &ab->dp; 4300 struct dp_srng *srng; 4301 int i; 4302 4303 ath12k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring); 4304 4305 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4306 if (ab->hw_params->rx_mac_buf_ring) 4307 ath12k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]); 4308 if (!ab->hw_params->rxdma1_enable) { 4309 srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring; 4310 ath12k_dp_srng_cleanup(ab, srng); 4311 } 4312 } 4313 4314 for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) 4315 ath12k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]); 4316 4317 ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring); 4318 4319 ath12k_dp_rxdma_buf_free(ab); 4320 } 4321 4322 void ath12k_dp_rx_pdev_free(struct ath12k_base *ab, int mac_id) 4323 { 4324 struct ath12k *ar = ab->pdevs[mac_id].ar; 4325 4326 ath12k_dp_rx_pdev_srng_free(ar); 4327 } 4328 4329 int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab) 4330 { 4331 struct ath12k_dp *dp = &ab->dp; 4332 struct htt_rx_ring_tlv_filter tlv_filter = {0}; 4333 u32 ring_id; 4334 int ret; 4335 u32 hal_rx_desc_sz = ab->hal.hal_desc_sz; 4336 4337 ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; 4338 4339 tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING; 4340 tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR; 4341 tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST | 4342 HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST | 4343 HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA; 4344 tlv_filter.offset_valid = true; 4345 tlv_filter.rx_packet_offset = hal_rx_desc_sz; 4346 4347 tlv_filter.rx_mpdu_start_offset = 4348 ab->hal_rx_ops->rx_desc_get_mpdu_start_offset(); 4349 tlv_filter.rx_msdu_end_offset = 4350 ab->hal_rx_ops->rx_desc_get_msdu_end_offset(); 4351 4352 if (ath12k_dp_wmask_compaction_rx_tlv_supported(ab)) { 4353 tlv_filter.rx_mpdu_start_wmask = 4354 ab->hw_params->hal_ops->rxdma_ring_wmask_rx_mpdu_start(); 4355 tlv_filter.rx_msdu_end_wmask = 4356 ab->hw_params->hal_ops->rxdma_ring_wmask_rx_msdu_end(); 4357 ath12k_dbg(ab, ATH12K_DBG_DATA, 4358 "Configuring compact tlv masks rx_mpdu_start_wmask 0x%x rx_msdu_end_wmask 0x%x\n", 4359 tlv_filter.rx_mpdu_start_wmask, tlv_filter.rx_msdu_end_wmask); 4360 } 4361 4362 ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, 0, 4363 HAL_RXDMA_BUF, 4364 DP_RXDMA_REFILL_RING_SIZE, 4365 &tlv_filter); 4366 4367 return ret; 4368 } 4369 4370 int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab) 4371 { 4372 struct ath12k_dp *dp = &ab->dp; 4373 struct htt_rx_ring_tlv_filter tlv_filter = {0}; 4374 u32 ring_id; 4375 int ret = 0; 4376 u32 hal_rx_desc_sz = ab->hal.hal_desc_sz; 4377 int i; 4378 4379 ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; 4380 4381 tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING; 4382 tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR; 4383 tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST | 4384 HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST | 4385 HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA; 4386 tlv_filter.offset_valid = true; 4387 tlv_filter.rx_packet_offset = hal_rx_desc_sz; 4388 4389 tlv_filter.rx_header_offset = offsetof(struct hal_rx_desc_wcn7850, pkt_hdr_tlv); 4390 4391 tlv_filter.rx_mpdu_start_offset = 4392 ab->hal_rx_ops->rx_desc_get_mpdu_start_offset(); 4393 tlv_filter.rx_msdu_end_offset = 4394 ab->hal_rx_ops->rx_desc_get_msdu_end_offset(); 4395 4396 /* TODO: Selectively subscribe to required qwords within msdu_end 4397 * and mpdu_start and setup the mask in below msg 4398 * and modify the rx_desc struct 4399 */ 4400 4401 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4402 ring_id = dp->rx_mac_buf_ring[i].ring_id; 4403 ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, i, 4404 HAL_RXDMA_BUF, 4405 DP_RXDMA_REFILL_RING_SIZE, 4406 &tlv_filter); 4407 } 4408 4409 return ret; 4410 } 4411 4412 int ath12k_dp_rx_htt_setup(struct ath12k_base *ab) 4413 { 4414 struct ath12k_dp *dp = &ab->dp; 4415 u32 ring_id; 4416 int i, ret; 4417 4418 /* TODO: Need to verify the HTT setup for QCN9224 */ 4419 ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; 4420 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 0, HAL_RXDMA_BUF); 4421 if (ret) { 4422 ath12k_warn(ab, "failed to configure rx_refill_buf_ring %d\n", 4423 ret); 4424 return ret; 4425 } 4426 4427 if (ab->hw_params->rx_mac_buf_ring) { 4428 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4429 ring_id = dp->rx_mac_buf_ring[i].ring_id; 4430 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 4431 i, HAL_RXDMA_BUF); 4432 if (ret) { 4433 ath12k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n", 4434 i, ret); 4435 return ret; 4436 } 4437 } 4438 } 4439 4440 for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) { 4441 ring_id = dp->rxdma_err_dst_ring[i].ring_id; 4442 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 4443 i, HAL_RXDMA_DST); 4444 if (ret) { 4445 ath12k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n", 4446 i, ret); 4447 return ret; 4448 } 4449 } 4450 4451 if (ab->hw_params->rxdma1_enable) { 4452 ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id; 4453 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 4454 0, HAL_RXDMA_MONITOR_BUF); 4455 if (ret) { 4456 ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n", 4457 ret); 4458 return ret; 4459 } 4460 } else { 4461 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4462 ring_id = 4463 dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; 4464 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, i, 4465 HAL_RXDMA_MONITOR_STATUS); 4466 if (ret) { 4467 ath12k_warn(ab, 4468 "failed to configure mon_status_refill_ring%d %d\n", 4469 i, ret); 4470 return ret; 4471 } 4472 } 4473 } 4474 4475 ret = ab->hw_params->hw_ops->rxdma_ring_sel_config(ab); 4476 if (ret) { 4477 ath12k_warn(ab, "failed to setup rxdma ring selection config\n"); 4478 return ret; 4479 } 4480 4481 return 0; 4482 } 4483 4484 int ath12k_dp_rx_alloc(struct ath12k_base *ab) 4485 { 4486 struct ath12k_dp *dp = &ab->dp; 4487 struct dp_srng *srng; 4488 int i, ret; 4489 4490 idr_init(&dp->rxdma_mon_buf_ring.bufs_idr); 4491 spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock); 4492 4493 ret = ath12k_dp_srng_setup(ab, 4494 &dp->rx_refill_buf_ring.refill_buf_ring, 4495 HAL_RXDMA_BUF, 0, 0, 4496 DP_RXDMA_BUF_RING_SIZE); 4497 if (ret) { 4498 ath12k_warn(ab, "failed to setup rx_refill_buf_ring\n"); 4499 return ret; 4500 } 4501 4502 if (ab->hw_params->rx_mac_buf_ring) { 4503 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4504 ret = ath12k_dp_srng_setup(ab, 4505 &dp->rx_mac_buf_ring[i], 4506 HAL_RXDMA_BUF, 1, 4507 i, DP_RX_MAC_BUF_RING_SIZE); 4508 if (ret) { 4509 ath12k_warn(ab, "failed to setup rx_mac_buf_ring %d\n", 4510 i); 4511 return ret; 4512 } 4513 } 4514 } 4515 4516 for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) { 4517 ret = ath12k_dp_srng_setup(ab, &dp->rxdma_err_dst_ring[i], 4518 HAL_RXDMA_DST, 0, i, 4519 DP_RXDMA_ERR_DST_RING_SIZE); 4520 if (ret) { 4521 ath12k_warn(ab, "failed to setup rxdma_err_dst_ring %d\n", i); 4522 return ret; 4523 } 4524 } 4525 4526 if (ab->hw_params->rxdma1_enable) { 4527 ret = ath12k_dp_srng_setup(ab, 4528 &dp->rxdma_mon_buf_ring.refill_buf_ring, 4529 HAL_RXDMA_MONITOR_BUF, 0, 0, 4530 DP_RXDMA_MONITOR_BUF_RING_SIZE); 4531 if (ret) { 4532 ath12k_warn(ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n"); 4533 return ret; 4534 } 4535 } else { 4536 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4537 idr_init(&dp->rx_mon_status_refill_ring[i].bufs_idr); 4538 spin_lock_init(&dp->rx_mon_status_refill_ring[i].idr_lock); 4539 } 4540 4541 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4542 srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring; 4543 ret = ath12k_dp_srng_setup(ab, srng, 4544 HAL_RXDMA_MONITOR_STATUS, 0, i, 4545 DP_RXDMA_MON_STATUS_RING_SIZE); 4546 if (ret) { 4547 ath12k_warn(ab, "failed to setup mon status ring %d\n", 4548 i); 4549 return ret; 4550 } 4551 } 4552 } 4553 4554 ret = ath12k_dp_rxdma_buf_setup(ab); 4555 if (ret) { 4556 ath12k_warn(ab, "failed to setup rxdma ring\n"); 4557 return ret; 4558 } 4559 4560 return 0; 4561 } 4562 4563 int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int mac_id) 4564 { 4565 struct ath12k *ar = ab->pdevs[mac_id].ar; 4566 struct ath12k_pdev_dp *dp = &ar->dp; 4567 u32 ring_id; 4568 int i; 4569 int ret; 4570 4571 if (!ab->hw_params->rxdma1_enable) 4572 goto out; 4573 4574 ret = ath12k_dp_rx_pdev_srng_alloc(ar); 4575 if (ret) { 4576 ath12k_warn(ab, "failed to setup rx srngs\n"); 4577 return ret; 4578 } 4579 4580 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4581 ring_id = dp->rxdma_mon_dst_ring[i].ring_id; 4582 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 4583 mac_id + i, 4584 HAL_RXDMA_MONITOR_DST); 4585 if (ret) { 4586 ath12k_warn(ab, 4587 "failed to configure rxdma_mon_dst_ring %d %d\n", 4588 i, ret); 4589 return ret; 4590 } 4591 } 4592 out: 4593 return 0; 4594 } 4595 4596 static int ath12k_dp_rx_pdev_mon_status_attach(struct ath12k *ar) 4597 { 4598 struct ath12k_pdev_dp *dp = &ar->dp; 4599 struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&dp->mon_data; 4600 4601 skb_queue_head_init(&pmon->rx_status_q); 4602 4603 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 4604 4605 memset(&pmon->rx_mon_stats, 0, 4606 sizeof(pmon->rx_mon_stats)); 4607 return 0; 4608 } 4609 4610 int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar) 4611 { 4612 struct ath12k_pdev_dp *dp = &ar->dp; 4613 struct ath12k_mon_data *pmon = &dp->mon_data; 4614 int ret = 0; 4615 4616 ret = ath12k_dp_rx_pdev_mon_status_attach(ar); 4617 if (ret) { 4618 ath12k_warn(ar->ab, "pdev_mon_status_attach() failed"); 4619 return ret; 4620 } 4621 4622 pmon->mon_last_linkdesc_paddr = 0; 4623 pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1; 4624 spin_lock_init(&pmon->mon_lock); 4625 4626 if (!ar->ab->hw_params->rxdma1_enable) 4627 return 0; 4628 4629 INIT_LIST_HEAD(&pmon->dp_rx_mon_mpdu_list); 4630 pmon->mon_mpdu = NULL; 4631 4632 return 0; 4633 } 4634