1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 */ 6 7 #include <linux/ieee80211.h> 8 #include <linux/kernel.h> 9 #include <linux/skbuff.h> 10 #include <crypto/hash.h> 11 #include "core.h" 12 #include "debug.h" 13 #include "hal_desc.h" 14 #include "hw.h" 15 #include "dp_rx.h" 16 #include "hal_rx.h" 17 #include "dp_tx.h" 18 #include "peer.h" 19 #include "dp_mon.h" 20 #include "debugfs_htt_stats.h" 21 22 #define ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ) 23 24 static enum hal_encrypt_type ath12k_dp_rx_h_enctype(struct ath12k_base *ab, 25 struct hal_rx_desc *desc) 26 { 27 if (!ab->hal_rx_ops->rx_desc_encrypt_valid(desc)) 28 return HAL_ENCRYPT_TYPE_OPEN; 29 30 return ab->hal_rx_ops->rx_desc_get_encrypt_type(desc); 31 } 32 33 u8 ath12k_dp_rx_h_decap_type(struct ath12k_base *ab, 34 struct hal_rx_desc *desc) 35 { 36 return ab->hal_rx_ops->rx_desc_get_decap_type(desc); 37 } 38 39 static u8 ath12k_dp_rx_h_mesh_ctl_present(struct ath12k_base *ab, 40 struct hal_rx_desc *desc) 41 { 42 return ab->hal_rx_ops->rx_desc_get_mesh_ctl(desc); 43 } 44 45 static bool ath12k_dp_rx_h_seq_ctrl_valid(struct ath12k_base *ab, 46 struct hal_rx_desc *desc) 47 { 48 return ab->hal_rx_ops->rx_desc_get_mpdu_seq_ctl_vld(desc); 49 } 50 51 static bool ath12k_dp_rx_h_fc_valid(struct ath12k_base *ab, 52 struct hal_rx_desc *desc) 53 { 54 return ab->hal_rx_ops->rx_desc_get_mpdu_fc_valid(desc); 55 } 56 57 static bool ath12k_dp_rx_h_more_frags(struct ath12k_base *ab, 58 struct sk_buff *skb) 59 { 60 struct ieee80211_hdr *hdr; 61 62 hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz); 63 return ieee80211_has_morefrags(hdr->frame_control); 64 } 65 66 static u16 ath12k_dp_rx_h_frag_no(struct ath12k_base *ab, 67 struct sk_buff *skb) 68 { 69 struct ieee80211_hdr *hdr; 70 71 hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz); 72 return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; 73 } 74 75 static u16 ath12k_dp_rx_h_seq_no(struct ath12k_base *ab, 76 struct hal_rx_desc *desc) 77 { 78 return ab->hal_rx_ops->rx_desc_get_mpdu_start_seq_no(desc); 79 } 80 81 static bool ath12k_dp_rx_h_msdu_done(struct ath12k_base *ab, 82 struct hal_rx_desc *desc) 83 { 84 return ab->hal_rx_ops->dp_rx_h_msdu_done(desc); 85 } 86 87 static bool ath12k_dp_rx_h_l4_cksum_fail(struct ath12k_base *ab, 88 struct hal_rx_desc *desc) 89 { 90 return ab->hal_rx_ops->dp_rx_h_l4_cksum_fail(desc); 91 } 92 93 static bool ath12k_dp_rx_h_ip_cksum_fail(struct ath12k_base *ab, 94 struct hal_rx_desc *desc) 95 { 96 return ab->hal_rx_ops->dp_rx_h_ip_cksum_fail(desc); 97 } 98 99 static bool ath12k_dp_rx_h_is_decrypted(struct ath12k_base *ab, 100 struct hal_rx_desc *desc) 101 { 102 return ab->hal_rx_ops->dp_rx_h_is_decrypted(desc); 103 } 104 105 u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab, 106 struct hal_rx_desc *desc) 107 { 108 return ab->hal_rx_ops->dp_rx_h_mpdu_err(desc); 109 } 110 111 static u16 ath12k_dp_rx_h_msdu_len(struct ath12k_base *ab, 112 struct hal_rx_desc *desc) 113 { 114 return ab->hal_rx_ops->rx_desc_get_msdu_len(desc); 115 } 116 117 static u8 ath12k_dp_rx_h_sgi(struct ath12k_base *ab, 118 struct hal_rx_desc *desc) 119 { 120 return ab->hal_rx_ops->rx_desc_get_msdu_sgi(desc); 121 } 122 123 static u8 ath12k_dp_rx_h_rate_mcs(struct ath12k_base *ab, 124 struct hal_rx_desc *desc) 125 { 126 return ab->hal_rx_ops->rx_desc_get_msdu_rate_mcs(desc); 127 } 128 129 static u8 ath12k_dp_rx_h_rx_bw(struct ath12k_base *ab, 130 struct hal_rx_desc *desc) 131 { 132 return ab->hal_rx_ops->rx_desc_get_msdu_rx_bw(desc); 133 } 134 135 static u32 ath12k_dp_rx_h_freq(struct ath12k_base *ab, 136 struct hal_rx_desc *desc) 137 { 138 return ab->hal_rx_ops->rx_desc_get_msdu_freq(desc); 139 } 140 141 static u8 ath12k_dp_rx_h_pkt_type(struct ath12k_base *ab, 142 struct hal_rx_desc *desc) 143 { 144 return ab->hal_rx_ops->rx_desc_get_msdu_pkt_type(desc); 145 } 146 147 static u8 ath12k_dp_rx_h_nss(struct ath12k_base *ab, 148 struct hal_rx_desc *desc) 149 { 150 return hweight8(ab->hal_rx_ops->rx_desc_get_msdu_nss(desc)); 151 } 152 153 static u8 ath12k_dp_rx_h_tid(struct ath12k_base *ab, 154 struct hal_rx_desc *desc) 155 { 156 return ab->hal_rx_ops->rx_desc_get_mpdu_tid(desc); 157 } 158 159 static u16 ath12k_dp_rx_h_peer_id(struct ath12k_base *ab, 160 struct hal_rx_desc *desc) 161 { 162 return ab->hal_rx_ops->rx_desc_get_mpdu_peer_id(desc); 163 } 164 165 u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab, 166 struct hal_rx_desc *desc) 167 { 168 return ab->hal_rx_ops->rx_desc_get_l3_pad_bytes(desc); 169 } 170 171 static bool ath12k_dp_rx_h_first_msdu(struct ath12k_base *ab, 172 struct hal_rx_desc *desc) 173 { 174 return ab->hal_rx_ops->rx_desc_get_first_msdu(desc); 175 } 176 177 static bool ath12k_dp_rx_h_last_msdu(struct ath12k_base *ab, 178 struct hal_rx_desc *desc) 179 { 180 return ab->hal_rx_ops->rx_desc_get_last_msdu(desc); 181 } 182 183 static void ath12k_dp_rx_desc_end_tlv_copy(struct ath12k_base *ab, 184 struct hal_rx_desc *fdesc, 185 struct hal_rx_desc *ldesc) 186 { 187 ab->hal_rx_ops->rx_desc_copy_end_tlv(fdesc, ldesc); 188 } 189 190 static void ath12k_dp_rxdesc_set_msdu_len(struct ath12k_base *ab, 191 struct hal_rx_desc *desc, 192 u16 len) 193 { 194 ab->hal_rx_ops->rx_desc_set_msdu_len(desc, len); 195 } 196 197 u32 ath12k_dp_rxdesc_get_ppduid(struct ath12k_base *ab, 198 struct hal_rx_desc *rx_desc) 199 { 200 return ab->hal_rx_ops->rx_desc_get_mpdu_ppdu_id(rx_desc); 201 } 202 203 bool ath12k_dp_rxdesc_mpdu_valid(struct ath12k_base *ab, 204 struct hal_rx_desc *rx_desc) 205 { 206 u32 tlv_tag; 207 208 tlv_tag = ab->hal_rx_ops->rx_desc_get_mpdu_start_tag(rx_desc); 209 210 return tlv_tag == HAL_RX_MPDU_START; 211 } 212 213 static bool ath12k_dp_rx_h_is_da_mcbc(struct ath12k_base *ab, 214 struct hal_rx_desc *desc) 215 { 216 return (ath12k_dp_rx_h_first_msdu(ab, desc) && 217 ab->hal_rx_ops->rx_desc_is_da_mcbc(desc)); 218 } 219 220 static bool ath12k_dp_rxdesc_mac_addr2_valid(struct ath12k_base *ab, 221 struct hal_rx_desc *desc) 222 { 223 return ab->hal_rx_ops->rx_desc_mac_addr2_valid(desc); 224 } 225 226 static u8 *ath12k_dp_rxdesc_get_mpdu_start_addr2(struct ath12k_base *ab, 227 struct hal_rx_desc *desc) 228 { 229 return ab->hal_rx_ops->rx_desc_mpdu_start_addr2(desc); 230 } 231 232 static void ath12k_dp_rx_desc_get_dot11_hdr(struct ath12k_base *ab, 233 struct hal_rx_desc *desc, 234 struct ieee80211_hdr *hdr) 235 { 236 ab->hal_rx_ops->rx_desc_get_dot11_hdr(desc, hdr); 237 } 238 239 static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab, 240 struct hal_rx_desc *desc, 241 u8 *crypto_hdr, 242 enum hal_encrypt_type enctype) 243 { 244 ab->hal_rx_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype); 245 } 246 247 static inline u8 ath12k_dp_rx_get_msdu_src_link(struct ath12k_base *ab, 248 struct hal_rx_desc *desc) 249 { 250 return ab->hal_rx_ops->rx_desc_get_msdu_src_link_id(desc); 251 } 252 253 static void ath12k_dp_clean_up_skb_list(struct sk_buff_head *skb_list) 254 { 255 struct sk_buff *skb; 256 257 while ((skb = __skb_dequeue(skb_list))) 258 dev_kfree_skb_any(skb); 259 } 260 261 static size_t ath12k_dp_list_cut_nodes(struct list_head *list, 262 struct list_head *head, 263 size_t count) 264 { 265 struct list_head *cur; 266 struct ath12k_rx_desc_info *rx_desc; 267 size_t nodes = 0; 268 269 if (!count) { 270 INIT_LIST_HEAD(list); 271 goto out; 272 } 273 274 list_for_each(cur, head) { 275 if (!count) 276 break; 277 278 rx_desc = list_entry(cur, struct ath12k_rx_desc_info, list); 279 rx_desc->in_use = true; 280 281 count--; 282 nodes++; 283 } 284 285 list_cut_before(list, head, cur); 286 out: 287 return nodes; 288 } 289 290 static void ath12k_dp_rx_enqueue_free(struct ath12k_dp *dp, 291 struct list_head *used_list) 292 { 293 struct ath12k_rx_desc_info *rx_desc, *safe; 294 295 /* Reset the use flag */ 296 list_for_each_entry_safe(rx_desc, safe, used_list, list) 297 rx_desc->in_use = false; 298 299 spin_lock_bh(&dp->rx_desc_lock); 300 list_splice_tail(used_list, &dp->rx_desc_free_list); 301 spin_unlock_bh(&dp->rx_desc_lock); 302 } 303 304 /* Returns number of Rx buffers replenished */ 305 int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab, 306 struct dp_rxdma_ring *rx_ring, 307 struct list_head *used_list, 308 int req_entries) 309 { 310 struct ath12k_buffer_addr *desc; 311 struct hal_srng *srng; 312 struct sk_buff *skb; 313 int num_free; 314 int num_remain; 315 u32 cookie; 316 dma_addr_t paddr; 317 struct ath12k_dp *dp = &ab->dp; 318 struct ath12k_rx_desc_info *rx_desc; 319 enum hal_rx_buf_return_buf_manager mgr = ab->hw_params->hal_params->rx_buf_rbm; 320 321 req_entries = min(req_entries, rx_ring->bufs_max); 322 323 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 324 325 spin_lock_bh(&srng->lock); 326 327 ath12k_hal_srng_access_begin(ab, srng); 328 329 num_free = ath12k_hal_srng_src_num_free(ab, srng, true); 330 if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4)) 331 req_entries = num_free; 332 333 req_entries = min(num_free, req_entries); 334 num_remain = req_entries; 335 336 if (!num_remain) 337 goto out; 338 339 /* Get the descriptor from free list */ 340 if (list_empty(used_list)) { 341 spin_lock_bh(&dp->rx_desc_lock); 342 req_entries = ath12k_dp_list_cut_nodes(used_list, 343 &dp->rx_desc_free_list, 344 num_remain); 345 spin_unlock_bh(&dp->rx_desc_lock); 346 num_remain = req_entries; 347 } 348 349 while (num_remain > 0) { 350 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + 351 DP_RX_BUFFER_ALIGN_SIZE); 352 if (!skb) 353 break; 354 355 if (!IS_ALIGNED((unsigned long)skb->data, 356 DP_RX_BUFFER_ALIGN_SIZE)) { 357 skb_pull(skb, 358 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - 359 skb->data); 360 } 361 362 paddr = dma_map_single(ab->dev, skb->data, 363 skb->len + skb_tailroom(skb), 364 DMA_FROM_DEVICE); 365 if (dma_mapping_error(ab->dev, paddr)) 366 goto fail_free_skb; 367 368 rx_desc = list_first_entry_or_null(used_list, 369 struct ath12k_rx_desc_info, 370 list); 371 if (!rx_desc) 372 goto fail_dma_unmap; 373 374 rx_desc->skb = skb; 375 cookie = rx_desc->cookie; 376 377 desc = ath12k_hal_srng_src_get_next_entry(ab, srng); 378 if (!desc) 379 goto fail_dma_unmap; 380 381 list_del(&rx_desc->list); 382 ATH12K_SKB_RXCB(skb)->paddr = paddr; 383 384 num_remain--; 385 386 ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); 387 } 388 389 goto out; 390 391 fail_dma_unmap: 392 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 393 DMA_FROM_DEVICE); 394 fail_free_skb: 395 dev_kfree_skb_any(skb); 396 out: 397 ath12k_hal_srng_access_end(ab, srng); 398 399 if (!list_empty(used_list)) 400 ath12k_dp_rx_enqueue_free(dp, used_list); 401 402 spin_unlock_bh(&srng->lock); 403 404 return req_entries - num_remain; 405 } 406 407 static int ath12k_dp_rxdma_mon_buf_ring_free(struct ath12k_base *ab, 408 struct dp_rxdma_mon_ring *rx_ring) 409 { 410 struct sk_buff *skb; 411 int buf_id; 412 413 spin_lock_bh(&rx_ring->idr_lock); 414 idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { 415 idr_remove(&rx_ring->bufs_idr, buf_id); 416 /* TODO: Understand where internal driver does this dma_unmap 417 * of rxdma_buffer. 418 */ 419 dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr, 420 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); 421 dev_kfree_skb_any(skb); 422 } 423 424 idr_destroy(&rx_ring->bufs_idr); 425 spin_unlock_bh(&rx_ring->idr_lock); 426 427 return 0; 428 } 429 430 static int ath12k_dp_rxdma_buf_free(struct ath12k_base *ab) 431 { 432 struct ath12k_dp *dp = &ab->dp; 433 int i; 434 435 ath12k_dp_rxdma_mon_buf_ring_free(ab, &dp->rxdma_mon_buf_ring); 436 437 if (ab->hw_params->rxdma1_enable) 438 return 0; 439 440 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) 441 ath12k_dp_rxdma_mon_buf_ring_free(ab, 442 &dp->rx_mon_status_refill_ring[i]); 443 444 return 0; 445 } 446 447 static int ath12k_dp_rxdma_mon_ring_buf_setup(struct ath12k_base *ab, 448 struct dp_rxdma_mon_ring *rx_ring, 449 u32 ringtype) 450 { 451 int num_entries; 452 453 num_entries = rx_ring->refill_buf_ring.size / 454 ath12k_hal_srng_get_entrysize(ab, ringtype); 455 456 rx_ring->bufs_max = num_entries; 457 458 if (ringtype == HAL_RXDMA_MONITOR_STATUS) 459 ath12k_dp_mon_status_bufs_replenish(ab, rx_ring, 460 num_entries); 461 else 462 ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries); 463 464 return 0; 465 } 466 467 static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab, 468 struct dp_rxdma_ring *rx_ring) 469 { 470 LIST_HEAD(list); 471 472 rx_ring->bufs_max = rx_ring->refill_buf_ring.size / 473 ath12k_hal_srng_get_entrysize(ab, HAL_RXDMA_BUF); 474 475 ath12k_dp_rx_bufs_replenish(ab, rx_ring, &list, 0); 476 477 return 0; 478 } 479 480 static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab) 481 { 482 struct ath12k_dp *dp = &ab->dp; 483 struct dp_rxdma_mon_ring *mon_ring; 484 int ret, i; 485 486 ret = ath12k_dp_rxdma_ring_buf_setup(ab, &dp->rx_refill_buf_ring); 487 if (ret) { 488 ath12k_warn(ab, 489 "failed to setup HAL_RXDMA_BUF\n"); 490 return ret; 491 } 492 493 if (ab->hw_params->rxdma1_enable) { 494 ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab, 495 &dp->rxdma_mon_buf_ring, 496 HAL_RXDMA_MONITOR_BUF); 497 if (ret) 498 ath12k_warn(ab, 499 "failed to setup HAL_RXDMA_MONITOR_BUF\n"); 500 return ret; 501 } 502 503 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 504 mon_ring = &dp->rx_mon_status_refill_ring[i]; 505 ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab, mon_ring, 506 HAL_RXDMA_MONITOR_STATUS); 507 if (ret) { 508 ath12k_warn(ab, 509 "failed to setup HAL_RXDMA_MONITOR_STATUS\n"); 510 return ret; 511 } 512 } 513 514 return 0; 515 } 516 517 static void ath12k_dp_rx_pdev_srng_free(struct ath12k *ar) 518 { 519 struct ath12k_pdev_dp *dp = &ar->dp; 520 struct ath12k_base *ab = ar->ab; 521 int i; 522 523 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) 524 ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_dst_ring[i]); 525 } 526 527 void ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base *ab) 528 { 529 struct ath12k_dp *dp = &ab->dp; 530 int i; 531 532 for (i = 0; i < DP_REO_DST_RING_MAX; i++) 533 ath12k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]); 534 } 535 536 int ath12k_dp_rx_pdev_reo_setup(struct ath12k_base *ab) 537 { 538 struct ath12k_dp *dp = &ab->dp; 539 int ret; 540 int i; 541 542 for (i = 0; i < DP_REO_DST_RING_MAX; i++) { 543 ret = ath12k_dp_srng_setup(ab, &dp->reo_dst_ring[i], 544 HAL_REO_DST, i, 0, 545 DP_REO_DST_RING_SIZE); 546 if (ret) { 547 ath12k_warn(ab, "failed to setup reo_dst_ring\n"); 548 goto err_reo_cleanup; 549 } 550 } 551 552 return 0; 553 554 err_reo_cleanup: 555 ath12k_dp_rx_pdev_reo_cleanup(ab); 556 557 return ret; 558 } 559 560 static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar) 561 { 562 struct ath12k_pdev_dp *dp = &ar->dp; 563 struct ath12k_base *ab = ar->ab; 564 int i; 565 int ret; 566 u32 mac_id = dp->mac_id; 567 568 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 569 ret = ath12k_dp_srng_setup(ar->ab, 570 &dp->rxdma_mon_dst_ring[i], 571 HAL_RXDMA_MONITOR_DST, 572 0, mac_id + i, 573 DP_RXDMA_MONITOR_DST_RING_SIZE); 574 if (ret) { 575 ath12k_warn(ar->ab, 576 "failed to setup HAL_RXDMA_MONITOR_DST\n"); 577 return ret; 578 } 579 } 580 581 return 0; 582 } 583 584 void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab) 585 { 586 struct ath12k_dp *dp = &ab->dp; 587 struct ath12k_dp_rx_reo_cmd *cmd, *tmp; 588 struct ath12k_dp_rx_reo_cache_flush_elem *cmd_cache, *tmp_cache; 589 590 spin_lock_bh(&dp->reo_cmd_lock); 591 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 592 list_del(&cmd->list); 593 dma_unmap_single(ab->dev, cmd->data.qbuf.paddr_aligned, 594 cmd->data.qbuf.size, DMA_BIDIRECTIONAL); 595 kfree(cmd->data.qbuf.vaddr); 596 kfree(cmd); 597 } 598 599 list_for_each_entry_safe(cmd_cache, tmp_cache, 600 &dp->reo_cmd_cache_flush_list, list) { 601 list_del(&cmd_cache->list); 602 dp->reo_cmd_cache_flush_count--; 603 dma_unmap_single(ab->dev, cmd_cache->data.qbuf.paddr_aligned, 604 cmd_cache->data.qbuf.size, DMA_BIDIRECTIONAL); 605 kfree(cmd_cache->data.qbuf.vaddr); 606 kfree(cmd_cache); 607 } 608 spin_unlock_bh(&dp->reo_cmd_lock); 609 } 610 611 static void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx, 612 enum hal_reo_cmd_status status) 613 { 614 struct ath12k_dp_rx_tid *rx_tid = ctx; 615 616 if (status != HAL_REO_CMD_SUCCESS) 617 ath12k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n", 618 rx_tid->tid, status); 619 620 dma_unmap_single(dp->ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size, 621 DMA_BIDIRECTIONAL); 622 kfree(rx_tid->qbuf.vaddr); 623 rx_tid->qbuf.vaddr = NULL; 624 } 625 626 static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab, struct ath12k_dp_rx_tid *rx_tid, 627 enum hal_reo_cmd_type type, 628 struct ath12k_hal_reo_cmd *cmd, 629 void (*cb)(struct ath12k_dp *dp, void *ctx, 630 enum hal_reo_cmd_status status)) 631 { 632 struct ath12k_dp *dp = &ab->dp; 633 struct ath12k_dp_rx_reo_cmd *dp_cmd; 634 struct hal_srng *cmd_ring; 635 int cmd_num; 636 637 cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id]; 638 cmd_num = ath12k_hal_reo_cmd_send(ab, cmd_ring, type, cmd); 639 640 /* cmd_num should start from 1, during failure return the error code */ 641 if (cmd_num < 0) 642 return cmd_num; 643 644 /* reo cmd ring descriptors has cmd_num starting from 1 */ 645 if (cmd_num == 0) 646 return -EINVAL; 647 648 if (!cb) 649 return 0; 650 651 /* Can this be optimized so that we keep the pending command list only 652 * for tid delete command to free up the resource on the command status 653 * indication? 654 */ 655 dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC); 656 657 if (!dp_cmd) 658 return -ENOMEM; 659 660 memcpy(&dp_cmd->data, rx_tid, sizeof(*rx_tid)); 661 dp_cmd->cmd_num = cmd_num; 662 dp_cmd->handler = cb; 663 664 spin_lock_bh(&dp->reo_cmd_lock); 665 list_add_tail(&dp_cmd->list, &dp->reo_cmd_list); 666 spin_unlock_bh(&dp->reo_cmd_lock); 667 668 return 0; 669 } 670 671 static void ath12k_dp_reo_cache_flush(struct ath12k_base *ab, 672 struct ath12k_dp_rx_tid *rx_tid) 673 { 674 struct ath12k_hal_reo_cmd cmd = {0}; 675 unsigned long tot_desc_sz, desc_sz; 676 int ret; 677 678 tot_desc_sz = rx_tid->qbuf.size; 679 desc_sz = ath12k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID); 680 681 while (tot_desc_sz > desc_sz) { 682 tot_desc_sz -= desc_sz; 683 cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned + tot_desc_sz); 684 cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); 685 ret = ath12k_dp_reo_cmd_send(ab, rx_tid, 686 HAL_REO_CMD_FLUSH_CACHE, &cmd, 687 NULL); 688 if (ret) 689 ath12k_warn(ab, 690 "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n", 691 rx_tid->tid, ret); 692 } 693 694 memset(&cmd, 0, sizeof(cmd)); 695 cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned); 696 cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); 697 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 698 ret = ath12k_dp_reo_cmd_send(ab, rx_tid, 699 HAL_REO_CMD_FLUSH_CACHE, 700 &cmd, ath12k_dp_reo_cmd_free); 701 if (ret) { 702 ath12k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n", 703 rx_tid->tid, ret); 704 dma_unmap_single(ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size, 705 DMA_BIDIRECTIONAL); 706 kfree(rx_tid->qbuf.vaddr); 707 rx_tid->qbuf.vaddr = NULL; 708 } 709 } 710 711 static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx, 712 enum hal_reo_cmd_status status) 713 { 714 struct ath12k_base *ab = dp->ab; 715 struct ath12k_dp_rx_tid *rx_tid = ctx; 716 struct ath12k_dp_rx_reo_cache_flush_elem *elem, *tmp; 717 718 if (status == HAL_REO_CMD_DRAIN) { 719 goto free_desc; 720 } else if (status != HAL_REO_CMD_SUCCESS) { 721 /* Shouldn't happen! Cleanup in case of other failure? */ 722 ath12k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n", 723 rx_tid->tid, status); 724 return; 725 } 726 727 elem = kzalloc(sizeof(*elem), GFP_ATOMIC); 728 if (!elem) 729 goto free_desc; 730 731 elem->ts = jiffies; 732 memcpy(&elem->data, rx_tid, sizeof(*rx_tid)); 733 734 spin_lock_bh(&dp->reo_cmd_lock); 735 list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list); 736 dp->reo_cmd_cache_flush_count++; 737 738 /* Flush and invalidate aged REO desc from HW cache */ 739 list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list, 740 list) { 741 if (dp->reo_cmd_cache_flush_count > ATH12K_DP_RX_REO_DESC_FREE_THRES || 742 time_after(jiffies, elem->ts + 743 msecs_to_jiffies(ATH12K_DP_RX_REO_DESC_FREE_TIMEOUT_MS))) { 744 list_del(&elem->list); 745 dp->reo_cmd_cache_flush_count--; 746 747 /* Unlock the reo_cmd_lock before using ath12k_dp_reo_cmd_send() 748 * within ath12k_dp_reo_cache_flush. The reo_cmd_cache_flush_list 749 * is used in only two contexts, one is in this function called 750 * from napi and the other in ath12k_dp_free during core destroy. 751 * Before dp_free, the irqs would be disabled and would wait to 752 * synchronize. Hence there wouldn’t be any race against add or 753 * delete to this list. Hence unlock-lock is safe here. 754 */ 755 spin_unlock_bh(&dp->reo_cmd_lock); 756 757 ath12k_dp_reo_cache_flush(ab, &elem->data); 758 kfree(elem); 759 spin_lock_bh(&dp->reo_cmd_lock); 760 } 761 } 762 spin_unlock_bh(&dp->reo_cmd_lock); 763 764 return; 765 free_desc: 766 dma_unmap_single(ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size, 767 DMA_BIDIRECTIONAL); 768 kfree(rx_tid->qbuf.vaddr); 769 rx_tid->qbuf.vaddr = NULL; 770 } 771 772 static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u16 tid, 773 dma_addr_t paddr) 774 { 775 struct ath12k_reo_queue_ref *qref; 776 struct ath12k_dp *dp = &ab->dp; 777 bool ml_peer = false; 778 779 if (!ab->hw_params->reoq_lut_support) 780 return; 781 782 if (peer_id & ATH12K_PEER_ML_ID_VALID) { 783 peer_id &= ~ATH12K_PEER_ML_ID_VALID; 784 ml_peer = true; 785 } 786 787 if (ml_peer) 788 qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr + 789 (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); 790 else 791 qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr + 792 (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); 793 794 qref->info0 = u32_encode_bits(lower_32_bits(paddr), 795 BUFFER_ADDR_INFO0_ADDR); 796 qref->info1 = u32_encode_bits(upper_32_bits(paddr), 797 BUFFER_ADDR_INFO1_ADDR) | 798 u32_encode_bits(tid, DP_REO_QREF_NUM); 799 ath12k_hal_reo_shared_qaddr_cache_clear(ab); 800 } 801 802 static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u16 tid) 803 { 804 struct ath12k_reo_queue_ref *qref; 805 struct ath12k_dp *dp = &ab->dp; 806 bool ml_peer = false; 807 808 if (!ab->hw_params->reoq_lut_support) 809 return; 810 811 if (peer_id & ATH12K_PEER_ML_ID_VALID) { 812 peer_id &= ~ATH12K_PEER_ML_ID_VALID; 813 ml_peer = true; 814 } 815 816 if (ml_peer) 817 qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr + 818 (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); 819 else 820 qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr + 821 (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); 822 823 qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR); 824 qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) | 825 u32_encode_bits(tid, DP_REO_QREF_NUM); 826 } 827 828 void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar, 829 struct ath12k_peer *peer, u8 tid) 830 { 831 struct ath12k_hal_reo_cmd cmd = {0}; 832 struct ath12k_dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 833 int ret; 834 835 if (!rx_tid->active) 836 return; 837 838 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 839 cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned); 840 cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); 841 cmd.upd0 = HAL_REO_CMD_UPD0_VLD; 842 ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid, 843 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 844 ath12k_dp_rx_tid_del_func); 845 if (ret) { 846 ath12k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n", 847 tid, ret); 848 dma_unmap_single(ar->ab->dev, rx_tid->qbuf.paddr_aligned, 849 rx_tid->qbuf.size, DMA_BIDIRECTIONAL); 850 kfree(rx_tid->qbuf.vaddr); 851 rx_tid->qbuf.vaddr = NULL; 852 } 853 854 if (peer->mlo) 855 ath12k_peer_rx_tid_qref_reset(ar->ab, peer->ml_id, tid); 856 else 857 ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid); 858 859 rx_tid->active = false; 860 } 861 862 int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab, 863 struct ath12k_buffer_addr *buf_addr_info, 864 enum hal_wbm_rel_bm_act action) 865 { 866 struct hal_wbm_release_ring *desc; 867 struct ath12k_dp *dp = &ab->dp; 868 struct hal_srng *srng; 869 int ret = 0; 870 871 srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id]; 872 873 spin_lock_bh(&srng->lock); 874 875 ath12k_hal_srng_access_begin(ab, srng); 876 877 desc = ath12k_hal_srng_src_get_next_entry(ab, srng); 878 if (!desc) { 879 ret = -ENOBUFS; 880 goto exit; 881 } 882 883 ath12k_hal_rx_msdu_link_desc_set(ab, desc, buf_addr_info, action); 884 885 exit: 886 ath12k_hal_srng_access_end(ab, srng); 887 888 spin_unlock_bh(&srng->lock); 889 890 return ret; 891 } 892 893 static void ath12k_dp_rx_frags_cleanup(struct ath12k_dp_rx_tid *rx_tid, 894 bool rel_link_desc) 895 { 896 struct ath12k_buffer_addr *buf_addr_info; 897 struct ath12k_base *ab = rx_tid->ab; 898 899 lockdep_assert_held(&ab->base_lock); 900 901 if (rx_tid->dst_ring_desc) { 902 if (rel_link_desc) { 903 buf_addr_info = &rx_tid->dst_ring_desc->buf_addr_info; 904 ath12k_dp_rx_link_desc_return(ab, buf_addr_info, 905 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 906 } 907 kfree(rx_tid->dst_ring_desc); 908 rx_tid->dst_ring_desc = NULL; 909 } 910 911 rx_tid->cur_sn = 0; 912 rx_tid->last_frag_no = 0; 913 rx_tid->rx_frag_bitmap = 0; 914 __skb_queue_purge(&rx_tid->rx_frags); 915 } 916 917 void ath12k_dp_rx_peer_tid_cleanup(struct ath12k *ar, struct ath12k_peer *peer) 918 { 919 struct ath12k_dp_rx_tid *rx_tid; 920 int i; 921 922 lockdep_assert_held(&ar->ab->base_lock); 923 924 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 925 rx_tid = &peer->rx_tid[i]; 926 927 ath12k_dp_rx_peer_tid_delete(ar, peer, i); 928 ath12k_dp_rx_frags_cleanup(rx_tid, true); 929 930 spin_unlock_bh(&ar->ab->base_lock); 931 timer_delete_sync(&rx_tid->frag_timer); 932 spin_lock_bh(&ar->ab->base_lock); 933 } 934 } 935 936 static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar, 937 struct ath12k_peer *peer, 938 struct ath12k_dp_rx_tid *rx_tid, 939 u32 ba_win_sz, u16 ssn, 940 bool update_ssn) 941 { 942 struct ath12k_hal_reo_cmd cmd = {0}; 943 int ret; 944 945 cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned); 946 cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); 947 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 948 cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE; 949 cmd.ba_window_size = ba_win_sz; 950 951 if (update_ssn) { 952 cmd.upd0 |= HAL_REO_CMD_UPD0_SSN; 953 cmd.upd2 = u32_encode_bits(ssn, HAL_REO_CMD_UPD2_SSN); 954 } 955 956 ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid, 957 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 958 NULL); 959 if (ret) { 960 ath12k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n", 961 rx_tid->tid, ret); 962 return ret; 963 } 964 965 rx_tid->ba_win_sz = ba_win_sz; 966 967 return 0; 968 } 969 970 static int ath12k_dp_rx_assign_reoq(struct ath12k_base *ab, 971 struct ath12k_sta *ahsta, 972 struct ath12k_dp_rx_tid *rx_tid, 973 u16 ssn, enum hal_pn_type pn_type) 974 { 975 u32 ba_win_sz = rx_tid->ba_win_sz; 976 struct ath12k_reoq_buf *buf; 977 void *vaddr, *vaddr_aligned; 978 dma_addr_t paddr_aligned; 979 u8 tid = rx_tid->tid; 980 u32 hw_desc_sz; 981 int ret; 982 983 buf = &ahsta->reoq_bufs[tid]; 984 if (!buf->vaddr) { 985 /* TODO: Optimize the memory allocation for qos tid based on 986 * the actual BA window size in REO tid update path. 987 */ 988 if (tid == HAL_DESC_REO_NON_QOS_TID) 989 hw_desc_sz = ath12k_hal_reo_qdesc_size(ba_win_sz, tid); 990 else 991 hw_desc_sz = ath12k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid); 992 993 vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC); 994 if (!vaddr) 995 return -ENOMEM; 996 997 vaddr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN); 998 999 ath12k_hal_reo_qdesc_setup(vaddr_aligned, tid, ba_win_sz, 1000 ssn, pn_type); 1001 1002 paddr_aligned = dma_map_single(ab->dev, vaddr_aligned, hw_desc_sz, 1003 DMA_BIDIRECTIONAL); 1004 ret = dma_mapping_error(ab->dev, paddr_aligned); 1005 if (ret) { 1006 kfree(vaddr); 1007 return ret; 1008 } 1009 1010 buf->vaddr = vaddr; 1011 buf->paddr_aligned = paddr_aligned; 1012 buf->size = hw_desc_sz; 1013 } 1014 1015 rx_tid->qbuf = *buf; 1016 rx_tid->active = true; 1017 1018 return 0; 1019 } 1020 1021 int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id, 1022 u8 tid, u32 ba_win_sz, u16 ssn, 1023 enum hal_pn_type pn_type) 1024 { 1025 struct ath12k_base *ab = ar->ab; 1026 struct ath12k_dp *dp = &ab->dp; 1027 struct ath12k_peer *peer; 1028 struct ath12k_sta *ahsta; 1029 struct ath12k_dp_rx_tid *rx_tid; 1030 dma_addr_t paddr_aligned; 1031 int ret; 1032 1033 spin_lock_bh(&ab->base_lock); 1034 1035 peer = ath12k_peer_find(ab, vdev_id, peer_mac); 1036 if (!peer) { 1037 spin_unlock_bh(&ab->base_lock); 1038 ath12k_warn(ab, "failed to find the peer to set up rx tid\n"); 1039 return -ENOENT; 1040 } 1041 1042 if (ab->hw_params->dp_primary_link_only && 1043 !peer->primary_link) { 1044 spin_unlock_bh(&ab->base_lock); 1045 return 0; 1046 } 1047 1048 if (ab->hw_params->reoq_lut_support && 1049 (!dp->reoq_lut.vaddr || !dp->ml_reoq_lut.vaddr)) { 1050 spin_unlock_bh(&ab->base_lock); 1051 ath12k_warn(ab, "reo qref table is not setup\n"); 1052 return -EINVAL; 1053 } 1054 1055 if (peer->peer_id > DP_MAX_PEER_ID || tid > IEEE80211_NUM_TIDS) { 1056 ath12k_warn(ab, "peer id of peer %d or tid %d doesn't allow reoq setup\n", 1057 peer->peer_id, tid); 1058 spin_unlock_bh(&ab->base_lock); 1059 return -EINVAL; 1060 } 1061 1062 rx_tid = &peer->rx_tid[tid]; 1063 paddr_aligned = rx_tid->qbuf.paddr_aligned; 1064 /* Update the tid queue if it is already setup */ 1065 if (rx_tid->active) { 1066 ret = ath12k_peer_rx_tid_reo_update(ar, peer, rx_tid, 1067 ba_win_sz, ssn, true); 1068 spin_unlock_bh(&ab->base_lock); 1069 if (ret) { 1070 ath12k_warn(ab, "failed to update reo for rx tid %d\n", tid); 1071 return ret; 1072 } 1073 1074 if (!ab->hw_params->reoq_lut_support) { 1075 ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, 1076 peer_mac, 1077 paddr_aligned, tid, 1078 1, ba_win_sz); 1079 if (ret) { 1080 ath12k_warn(ab, "failed to setup peer rx reorder queuefor tid %d: %d\n", 1081 tid, ret); 1082 return ret; 1083 } 1084 } 1085 1086 return 0; 1087 } 1088 1089 rx_tid->tid = tid; 1090 1091 rx_tid->ba_win_sz = ba_win_sz; 1092 1093 ahsta = ath12k_sta_to_ahsta(peer->sta); 1094 ret = ath12k_dp_rx_assign_reoq(ab, ahsta, rx_tid, ssn, pn_type); 1095 if (ret) { 1096 spin_unlock_bh(&ab->base_lock); 1097 ath12k_warn(ab, "failed to assign reoq buf for rx tid %u\n", tid); 1098 return ret; 1099 } 1100 1101 if (ab->hw_params->reoq_lut_support) { 1102 /* Update the REO queue LUT at the corresponding peer id 1103 * and tid with qaddr. 1104 */ 1105 if (peer->mlo) 1106 ath12k_peer_rx_tid_qref_setup(ab, peer->ml_id, tid, 1107 paddr_aligned); 1108 else 1109 ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid, 1110 paddr_aligned); 1111 1112 spin_unlock_bh(&ab->base_lock); 1113 } else { 1114 spin_unlock_bh(&ab->base_lock); 1115 ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, 1116 paddr_aligned, tid, 1, 1117 ba_win_sz); 1118 } 1119 1120 return ret; 1121 } 1122 1123 int ath12k_dp_rx_ampdu_start(struct ath12k *ar, 1124 struct ieee80211_ampdu_params *params, 1125 u8 link_id) 1126 { 1127 struct ath12k_base *ab = ar->ab; 1128 struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta); 1129 struct ath12k_link_sta *arsta; 1130 int vdev_id; 1131 int ret; 1132 1133 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 1134 1135 arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy, 1136 ahsta->link[link_id]); 1137 if (!arsta) 1138 return -ENOLINK; 1139 1140 vdev_id = arsta->arvif->vdev_id; 1141 1142 ret = ath12k_dp_rx_peer_tid_setup(ar, arsta->addr, vdev_id, 1143 params->tid, params->buf_size, 1144 params->ssn, arsta->ahsta->pn_type); 1145 if (ret) 1146 ath12k_warn(ab, "failed to setup rx tid %d\n", ret); 1147 1148 return ret; 1149 } 1150 1151 int ath12k_dp_rx_ampdu_stop(struct ath12k *ar, 1152 struct ieee80211_ampdu_params *params, 1153 u8 link_id) 1154 { 1155 struct ath12k_base *ab = ar->ab; 1156 struct ath12k_peer *peer; 1157 struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta); 1158 struct ath12k_link_sta *arsta; 1159 int vdev_id; 1160 bool active; 1161 int ret; 1162 1163 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 1164 1165 arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy, 1166 ahsta->link[link_id]); 1167 if (!arsta) 1168 return -ENOLINK; 1169 1170 vdev_id = arsta->arvif->vdev_id; 1171 1172 spin_lock_bh(&ab->base_lock); 1173 1174 peer = ath12k_peer_find(ab, vdev_id, arsta->addr); 1175 if (!peer) { 1176 spin_unlock_bh(&ab->base_lock); 1177 ath12k_warn(ab, "failed to find the peer to stop rx aggregation\n"); 1178 return -ENOENT; 1179 } 1180 1181 active = peer->rx_tid[params->tid].active; 1182 1183 if (!active) { 1184 spin_unlock_bh(&ab->base_lock); 1185 return 0; 1186 } 1187 1188 ret = ath12k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false); 1189 spin_unlock_bh(&ab->base_lock); 1190 if (ret) { 1191 ath12k_warn(ab, "failed to update reo for rx tid %d: %d\n", 1192 params->tid, ret); 1193 return ret; 1194 } 1195 1196 return ret; 1197 } 1198 1199 int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif, 1200 const u8 *peer_addr, 1201 enum set_key_cmd key_cmd, 1202 struct ieee80211_key_conf *key) 1203 { 1204 struct ath12k *ar = arvif->ar; 1205 struct ath12k_base *ab = ar->ab; 1206 struct ath12k_hal_reo_cmd cmd = {0}; 1207 struct ath12k_peer *peer; 1208 struct ath12k_dp_rx_tid *rx_tid; 1209 u8 tid; 1210 int ret = 0; 1211 1212 /* NOTE: Enable PN/TSC replay check offload only for unicast frames. 1213 * We use mac80211 PN/TSC replay check functionality for bcast/mcast 1214 * for now. 1215 */ 1216 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) 1217 return 0; 1218 1219 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 1220 cmd.upd0 = HAL_REO_CMD_UPD0_PN | 1221 HAL_REO_CMD_UPD0_PN_SIZE | 1222 HAL_REO_CMD_UPD0_PN_VALID | 1223 HAL_REO_CMD_UPD0_PN_CHECK | 1224 HAL_REO_CMD_UPD0_SVLD; 1225 1226 switch (key->cipher) { 1227 case WLAN_CIPHER_SUITE_TKIP: 1228 case WLAN_CIPHER_SUITE_CCMP: 1229 case WLAN_CIPHER_SUITE_CCMP_256: 1230 case WLAN_CIPHER_SUITE_GCMP: 1231 case WLAN_CIPHER_SUITE_GCMP_256: 1232 if (key_cmd == SET_KEY) { 1233 cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK; 1234 cmd.pn_size = 48; 1235 } 1236 break; 1237 default: 1238 break; 1239 } 1240 1241 spin_lock_bh(&ab->base_lock); 1242 1243 peer = ath12k_peer_find(ab, arvif->vdev_id, peer_addr); 1244 if (!peer) { 1245 spin_unlock_bh(&ab->base_lock); 1246 ath12k_warn(ab, "failed to find the peer %pM to configure pn replay detection\n", 1247 peer_addr); 1248 return -ENOENT; 1249 } 1250 1251 for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) { 1252 rx_tid = &peer->rx_tid[tid]; 1253 if (!rx_tid->active) 1254 continue; 1255 cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned); 1256 cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); 1257 ret = ath12k_dp_reo_cmd_send(ab, rx_tid, 1258 HAL_REO_CMD_UPDATE_RX_QUEUE, 1259 &cmd, NULL); 1260 if (ret) { 1261 ath12k_warn(ab, "failed to configure rx tid %d queue of peer %pM for pn replay detection %d\n", 1262 tid, peer_addr, ret); 1263 break; 1264 } 1265 } 1266 1267 spin_unlock_bh(&ab->base_lock); 1268 1269 return ret; 1270 } 1271 1272 static int ath12k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats, 1273 u16 peer_id) 1274 { 1275 int i; 1276 1277 for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) { 1278 if (ppdu_stats->user_stats[i].is_valid_peer_id) { 1279 if (peer_id == ppdu_stats->user_stats[i].peer_id) 1280 return i; 1281 } else { 1282 return i; 1283 } 1284 } 1285 1286 return -EINVAL; 1287 } 1288 1289 static int ath12k_htt_tlv_ppdu_stats_parse(struct ath12k_base *ab, 1290 u16 tag, u16 len, const void *ptr, 1291 void *data) 1292 { 1293 const struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *ba_status; 1294 const struct htt_ppdu_stats_usr_cmpltn_cmn *cmplt_cmn; 1295 const struct htt_ppdu_stats_user_rate *user_rate; 1296 struct htt_ppdu_stats_info *ppdu_info; 1297 struct htt_ppdu_user_stats *user_stats; 1298 int cur_user; 1299 u16 peer_id; 1300 1301 ppdu_info = data; 1302 1303 switch (tag) { 1304 case HTT_PPDU_STATS_TAG_COMMON: 1305 if (len < sizeof(struct htt_ppdu_stats_common)) { 1306 ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1307 len, tag); 1308 return -EINVAL; 1309 } 1310 memcpy(&ppdu_info->ppdu_stats.common, ptr, 1311 sizeof(struct htt_ppdu_stats_common)); 1312 break; 1313 case HTT_PPDU_STATS_TAG_USR_RATE: 1314 if (len < sizeof(struct htt_ppdu_stats_user_rate)) { 1315 ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1316 len, tag); 1317 return -EINVAL; 1318 } 1319 user_rate = ptr; 1320 peer_id = le16_to_cpu(user_rate->sw_peer_id); 1321 cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1322 peer_id); 1323 if (cur_user < 0) 1324 return -EINVAL; 1325 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1326 user_stats->peer_id = peer_id; 1327 user_stats->is_valid_peer_id = true; 1328 memcpy(&user_stats->rate, ptr, 1329 sizeof(struct htt_ppdu_stats_user_rate)); 1330 user_stats->tlv_flags |= BIT(tag); 1331 break; 1332 case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON: 1333 if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) { 1334 ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1335 len, tag); 1336 return -EINVAL; 1337 } 1338 1339 cmplt_cmn = ptr; 1340 peer_id = le16_to_cpu(cmplt_cmn->sw_peer_id); 1341 cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1342 peer_id); 1343 if (cur_user < 0) 1344 return -EINVAL; 1345 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1346 user_stats->peer_id = peer_id; 1347 user_stats->is_valid_peer_id = true; 1348 memcpy(&user_stats->cmpltn_cmn, ptr, 1349 sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)); 1350 user_stats->tlv_flags |= BIT(tag); 1351 break; 1352 case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS: 1353 if (len < 1354 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) { 1355 ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1356 len, tag); 1357 return -EINVAL; 1358 } 1359 1360 ba_status = ptr; 1361 peer_id = le16_to_cpu(ba_status->sw_peer_id); 1362 cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1363 peer_id); 1364 if (cur_user < 0) 1365 return -EINVAL; 1366 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1367 user_stats->peer_id = peer_id; 1368 user_stats->is_valid_peer_id = true; 1369 memcpy(&user_stats->ack_ba, ptr, 1370 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)); 1371 user_stats->tlv_flags |= BIT(tag); 1372 break; 1373 } 1374 return 0; 1375 } 1376 1377 int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len, 1378 int (*iter)(struct ath12k_base *ar, u16 tag, u16 len, 1379 const void *ptr, void *data), 1380 void *data) 1381 { 1382 const struct htt_tlv *tlv; 1383 const void *begin = ptr; 1384 u16 tlv_tag, tlv_len; 1385 int ret = -EINVAL; 1386 1387 while (len > 0) { 1388 if (len < sizeof(*tlv)) { 1389 ath12k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", 1390 ptr - begin, len, sizeof(*tlv)); 1391 return -EINVAL; 1392 } 1393 tlv = (struct htt_tlv *)ptr; 1394 tlv_tag = le32_get_bits(tlv->header, HTT_TLV_TAG); 1395 tlv_len = le32_get_bits(tlv->header, HTT_TLV_LEN); 1396 ptr += sizeof(*tlv); 1397 len -= sizeof(*tlv); 1398 1399 if (tlv_len > len) { 1400 ath12k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n", 1401 tlv_tag, ptr - begin, len, tlv_len); 1402 return -EINVAL; 1403 } 1404 ret = iter(ab, tlv_tag, tlv_len, ptr, data); 1405 if (ret == -ENOMEM) 1406 return ret; 1407 1408 ptr += tlv_len; 1409 len -= tlv_len; 1410 } 1411 return 0; 1412 } 1413 1414 static void 1415 ath12k_update_per_peer_tx_stats(struct ath12k *ar, 1416 struct htt_ppdu_stats *ppdu_stats, u8 user) 1417 { 1418 struct ath12k_base *ab = ar->ab; 1419 struct ath12k_peer *peer; 1420 struct ieee80211_sta *sta; 1421 struct ath12k_sta *ahsta; 1422 struct ath12k_link_sta *arsta; 1423 struct htt_ppdu_stats_user_rate *user_rate; 1424 struct ath12k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats; 1425 struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user]; 1426 struct htt_ppdu_stats_common *common = &ppdu_stats->common; 1427 int ret; 1428 u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0; 1429 u32 v, succ_bytes = 0; 1430 u16 tones, rate = 0, succ_pkts = 0; 1431 u32 tx_duration = 0; 1432 u8 tid = HTT_PPDU_STATS_NON_QOS_TID; 1433 bool is_ampdu = false; 1434 1435 if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE))) 1436 return; 1437 1438 if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) 1439 is_ampdu = 1440 HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags); 1441 1442 if (usr_stats->tlv_flags & 1443 BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) { 1444 succ_bytes = le32_to_cpu(usr_stats->ack_ba.success_bytes); 1445 succ_pkts = le32_get_bits(usr_stats->ack_ba.info, 1446 HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M); 1447 tid = le32_get_bits(usr_stats->ack_ba.info, 1448 HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM); 1449 } 1450 1451 if (common->fes_duration_us) 1452 tx_duration = le32_to_cpu(common->fes_duration_us); 1453 1454 user_rate = &usr_stats->rate; 1455 flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags); 1456 bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2; 1457 nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1; 1458 mcs = HTT_USR_RATE_MCS(user_rate->rate_flags); 1459 sgi = HTT_USR_RATE_GI(user_rate->rate_flags); 1460 dcm = HTT_USR_RATE_DCM(user_rate->rate_flags); 1461 1462 /* Note: If host configured fixed rates and in some other special 1463 * cases, the broadcast/management frames are sent in different rates. 1464 * Firmware rate's control to be skipped for this? 1465 */ 1466 1467 if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH12K_HE_MCS_MAX) { 1468 ath12k_warn(ab, "Invalid HE mcs %d peer stats", mcs); 1469 return; 1470 } 1471 1472 if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH12K_VHT_MCS_MAX) { 1473 ath12k_warn(ab, "Invalid VHT mcs %d peer stats", mcs); 1474 return; 1475 } 1476 1477 if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH12K_HT_MCS_MAX || nss < 1)) { 1478 ath12k_warn(ab, "Invalid HT mcs %d nss %d peer stats", 1479 mcs, nss); 1480 return; 1481 } 1482 1483 if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) { 1484 ret = ath12k_mac_hw_ratecode_to_legacy_rate(mcs, 1485 flags, 1486 &rate_idx, 1487 &rate); 1488 if (ret < 0) 1489 return; 1490 } 1491 1492 rcu_read_lock(); 1493 spin_lock_bh(&ab->base_lock); 1494 peer = ath12k_peer_find_by_id(ab, usr_stats->peer_id); 1495 1496 if (!peer || !peer->sta) { 1497 spin_unlock_bh(&ab->base_lock); 1498 rcu_read_unlock(); 1499 return; 1500 } 1501 1502 sta = peer->sta; 1503 ahsta = ath12k_sta_to_ahsta(sta); 1504 arsta = &ahsta->deflink; 1505 1506 memset(&arsta->txrate, 0, sizeof(arsta->txrate)); 1507 1508 switch (flags) { 1509 case WMI_RATE_PREAMBLE_OFDM: 1510 arsta->txrate.legacy = rate; 1511 break; 1512 case WMI_RATE_PREAMBLE_CCK: 1513 arsta->txrate.legacy = rate; 1514 break; 1515 case WMI_RATE_PREAMBLE_HT: 1516 arsta->txrate.mcs = mcs + 8 * (nss - 1); 1517 arsta->txrate.flags = RATE_INFO_FLAGS_MCS; 1518 if (sgi) 1519 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1520 break; 1521 case WMI_RATE_PREAMBLE_VHT: 1522 arsta->txrate.mcs = mcs; 1523 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; 1524 if (sgi) 1525 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1526 break; 1527 case WMI_RATE_PREAMBLE_HE: 1528 arsta->txrate.mcs = mcs; 1529 arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS; 1530 arsta->txrate.he_dcm = dcm; 1531 arsta->txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi); 1532 tones = le16_to_cpu(user_rate->ru_end) - 1533 le16_to_cpu(user_rate->ru_start) + 1; 1534 v = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(tones); 1535 arsta->txrate.he_ru_alloc = v; 1536 break; 1537 } 1538 1539 arsta->txrate.nss = nss; 1540 arsta->txrate.bw = ath12k_mac_bw_to_mac80211_bw(bw); 1541 arsta->tx_duration += tx_duration; 1542 memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info)); 1543 1544 /* PPDU stats reported for mgmt packet doesn't have valid tx bytes. 1545 * So skip peer stats update for mgmt packets. 1546 */ 1547 if (tid < HTT_PPDU_STATS_NON_QOS_TID) { 1548 memset(peer_stats, 0, sizeof(*peer_stats)); 1549 peer_stats->succ_pkts = succ_pkts; 1550 peer_stats->succ_bytes = succ_bytes; 1551 peer_stats->is_ampdu = is_ampdu; 1552 peer_stats->duration = tx_duration; 1553 peer_stats->ba_fails = 1554 HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) + 1555 HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags); 1556 } 1557 1558 spin_unlock_bh(&ab->base_lock); 1559 rcu_read_unlock(); 1560 } 1561 1562 static void ath12k_htt_update_ppdu_stats(struct ath12k *ar, 1563 struct htt_ppdu_stats *ppdu_stats) 1564 { 1565 u8 user; 1566 1567 for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++) 1568 ath12k_update_per_peer_tx_stats(ar, ppdu_stats, user); 1569 } 1570 1571 static 1572 struct htt_ppdu_stats_info *ath12k_dp_htt_get_ppdu_desc(struct ath12k *ar, 1573 u32 ppdu_id) 1574 { 1575 struct htt_ppdu_stats_info *ppdu_info; 1576 1577 lockdep_assert_held(&ar->data_lock); 1578 if (!list_empty(&ar->ppdu_stats_info)) { 1579 list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) { 1580 if (ppdu_info->ppdu_id == ppdu_id) 1581 return ppdu_info; 1582 } 1583 1584 if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) { 1585 ppdu_info = list_first_entry(&ar->ppdu_stats_info, 1586 typeof(*ppdu_info), list); 1587 list_del(&ppdu_info->list); 1588 ar->ppdu_stat_list_depth--; 1589 ath12k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats); 1590 kfree(ppdu_info); 1591 } 1592 } 1593 1594 ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC); 1595 if (!ppdu_info) 1596 return NULL; 1597 1598 list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info); 1599 ar->ppdu_stat_list_depth++; 1600 1601 return ppdu_info; 1602 } 1603 1604 static void ath12k_copy_to_delay_stats(struct ath12k_peer *peer, 1605 struct htt_ppdu_user_stats *usr_stats) 1606 { 1607 peer->ppdu_stats_delayba.sw_peer_id = le16_to_cpu(usr_stats->rate.sw_peer_id); 1608 peer->ppdu_stats_delayba.info0 = le32_to_cpu(usr_stats->rate.info0); 1609 peer->ppdu_stats_delayba.ru_end = le16_to_cpu(usr_stats->rate.ru_end); 1610 peer->ppdu_stats_delayba.ru_start = le16_to_cpu(usr_stats->rate.ru_start); 1611 peer->ppdu_stats_delayba.info1 = le32_to_cpu(usr_stats->rate.info1); 1612 peer->ppdu_stats_delayba.rate_flags = le32_to_cpu(usr_stats->rate.rate_flags); 1613 peer->ppdu_stats_delayba.resp_rate_flags = 1614 le32_to_cpu(usr_stats->rate.resp_rate_flags); 1615 1616 peer->delayba_flag = true; 1617 } 1618 1619 static void ath12k_copy_to_bar(struct ath12k_peer *peer, 1620 struct htt_ppdu_user_stats *usr_stats) 1621 { 1622 usr_stats->rate.sw_peer_id = cpu_to_le16(peer->ppdu_stats_delayba.sw_peer_id); 1623 usr_stats->rate.info0 = cpu_to_le32(peer->ppdu_stats_delayba.info0); 1624 usr_stats->rate.ru_end = cpu_to_le16(peer->ppdu_stats_delayba.ru_end); 1625 usr_stats->rate.ru_start = cpu_to_le16(peer->ppdu_stats_delayba.ru_start); 1626 usr_stats->rate.info1 = cpu_to_le32(peer->ppdu_stats_delayba.info1); 1627 usr_stats->rate.rate_flags = cpu_to_le32(peer->ppdu_stats_delayba.rate_flags); 1628 usr_stats->rate.resp_rate_flags = 1629 cpu_to_le32(peer->ppdu_stats_delayba.resp_rate_flags); 1630 1631 peer->delayba_flag = false; 1632 } 1633 1634 static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab, 1635 struct sk_buff *skb) 1636 { 1637 struct ath12k_htt_ppdu_stats_msg *msg; 1638 struct htt_ppdu_stats_info *ppdu_info; 1639 struct ath12k_peer *peer = NULL; 1640 struct htt_ppdu_user_stats *usr_stats = NULL; 1641 u32 peer_id = 0; 1642 struct ath12k *ar; 1643 int ret, i; 1644 u8 pdev_id; 1645 u32 ppdu_id, len; 1646 1647 msg = (struct ath12k_htt_ppdu_stats_msg *)skb->data; 1648 len = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE); 1649 if (len > (skb->len - struct_size(msg, data, 0))) { 1650 ath12k_warn(ab, 1651 "HTT PPDU STATS event has unexpected payload size %u, should be smaller than %u\n", 1652 len, skb->len); 1653 return -EINVAL; 1654 } 1655 1656 pdev_id = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PDEV_ID); 1657 ppdu_id = le32_to_cpu(msg->ppdu_id); 1658 1659 rcu_read_lock(); 1660 ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id); 1661 if (!ar) { 1662 ret = -EINVAL; 1663 goto exit; 1664 } 1665 1666 spin_lock_bh(&ar->data_lock); 1667 ppdu_info = ath12k_dp_htt_get_ppdu_desc(ar, ppdu_id); 1668 if (!ppdu_info) { 1669 spin_unlock_bh(&ar->data_lock); 1670 ret = -EINVAL; 1671 goto exit; 1672 } 1673 1674 ppdu_info->ppdu_id = ppdu_id; 1675 ret = ath12k_dp_htt_tlv_iter(ab, msg->data, len, 1676 ath12k_htt_tlv_ppdu_stats_parse, 1677 (void *)ppdu_info); 1678 if (ret) { 1679 spin_unlock_bh(&ar->data_lock); 1680 ath12k_warn(ab, "Failed to parse tlv %d\n", ret); 1681 goto exit; 1682 } 1683 1684 if (ppdu_info->ppdu_stats.common.num_users >= HTT_PPDU_STATS_MAX_USERS) { 1685 spin_unlock_bh(&ar->data_lock); 1686 ath12k_warn(ab, 1687 "HTT PPDU STATS event has unexpected num_users %u, should be smaller than %u\n", 1688 ppdu_info->ppdu_stats.common.num_users, 1689 HTT_PPDU_STATS_MAX_USERS); 1690 ret = -EINVAL; 1691 goto exit; 1692 } 1693 1694 /* back up data rate tlv for all peers */ 1695 if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_DATA && 1696 (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON)) && 1697 ppdu_info->delay_ba) { 1698 for (i = 0; i < ppdu_info->ppdu_stats.common.num_users; i++) { 1699 peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id; 1700 spin_lock_bh(&ab->base_lock); 1701 peer = ath12k_peer_find_by_id(ab, peer_id); 1702 if (!peer) { 1703 spin_unlock_bh(&ab->base_lock); 1704 continue; 1705 } 1706 1707 usr_stats = &ppdu_info->ppdu_stats.user_stats[i]; 1708 if (usr_stats->delay_ba) 1709 ath12k_copy_to_delay_stats(peer, usr_stats); 1710 spin_unlock_bh(&ab->base_lock); 1711 } 1712 } 1713 1714 /* restore all peers' data rate tlv to mu-bar tlv */ 1715 if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_BAR && 1716 (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON))) { 1717 for (i = 0; i < ppdu_info->bar_num_users; i++) { 1718 peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id; 1719 spin_lock_bh(&ab->base_lock); 1720 peer = ath12k_peer_find_by_id(ab, peer_id); 1721 if (!peer) { 1722 spin_unlock_bh(&ab->base_lock); 1723 continue; 1724 } 1725 1726 usr_stats = &ppdu_info->ppdu_stats.user_stats[i]; 1727 if (peer->delayba_flag) 1728 ath12k_copy_to_bar(peer, usr_stats); 1729 spin_unlock_bh(&ab->base_lock); 1730 } 1731 } 1732 1733 spin_unlock_bh(&ar->data_lock); 1734 1735 exit: 1736 rcu_read_unlock(); 1737 1738 return ret; 1739 } 1740 1741 static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab, 1742 struct sk_buff *skb) 1743 { 1744 struct ath12k_htt_mlo_offset_msg *msg; 1745 struct ath12k_pdev *pdev; 1746 struct ath12k *ar; 1747 u8 pdev_id; 1748 1749 msg = (struct ath12k_htt_mlo_offset_msg *)skb->data; 1750 pdev_id = u32_get_bits(__le32_to_cpu(msg->info), 1751 HTT_T2H_MLO_OFFSET_INFO_PDEV_ID); 1752 1753 rcu_read_lock(); 1754 ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id); 1755 if (!ar) { 1756 /* It is possible that the ar is not yet active (started). 1757 * The above function will only look for the active pdev 1758 * and hence %NULL return is possible. Just silently 1759 * discard this message 1760 */ 1761 goto exit; 1762 } 1763 1764 spin_lock_bh(&ar->data_lock); 1765 pdev = ar->pdev; 1766 1767 pdev->timestamp.info = __le32_to_cpu(msg->info); 1768 pdev->timestamp.sync_timestamp_lo_us = __le32_to_cpu(msg->sync_timestamp_lo_us); 1769 pdev->timestamp.sync_timestamp_hi_us = __le32_to_cpu(msg->sync_timestamp_hi_us); 1770 pdev->timestamp.mlo_offset_lo = __le32_to_cpu(msg->mlo_offset_lo); 1771 pdev->timestamp.mlo_offset_hi = __le32_to_cpu(msg->mlo_offset_hi); 1772 pdev->timestamp.mlo_offset_clks = __le32_to_cpu(msg->mlo_offset_clks); 1773 pdev->timestamp.mlo_comp_clks = __le32_to_cpu(msg->mlo_comp_clks); 1774 pdev->timestamp.mlo_comp_timer = __le32_to_cpu(msg->mlo_comp_timer); 1775 1776 spin_unlock_bh(&ar->data_lock); 1777 exit: 1778 rcu_read_unlock(); 1779 } 1780 1781 void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab, 1782 struct sk_buff *skb) 1783 { 1784 struct ath12k_dp *dp = &ab->dp; 1785 struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data; 1786 enum htt_t2h_msg_type type; 1787 u16 peer_id; 1788 u8 vdev_id; 1789 u8 mac_addr[ETH_ALEN]; 1790 u16 peer_mac_h16; 1791 u16 ast_hash = 0; 1792 u16 hw_peer_id; 1793 1794 type = le32_get_bits(resp->version_msg.version, HTT_T2H_MSG_TYPE); 1795 1796 ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type); 1797 1798 switch (type) { 1799 case HTT_T2H_MSG_TYPE_VERSION_CONF: 1800 dp->htt_tgt_ver_major = le32_get_bits(resp->version_msg.version, 1801 HTT_T2H_VERSION_CONF_MAJOR); 1802 dp->htt_tgt_ver_minor = le32_get_bits(resp->version_msg.version, 1803 HTT_T2H_VERSION_CONF_MINOR); 1804 complete(&dp->htt_tgt_version_received); 1805 break; 1806 /* TODO: remove unused peer map versions after testing */ 1807 case HTT_T2H_MSG_TYPE_PEER_MAP: 1808 vdev_id = le32_get_bits(resp->peer_map_ev.info, 1809 HTT_T2H_PEER_MAP_INFO_VDEV_ID); 1810 peer_id = le32_get_bits(resp->peer_map_ev.info, 1811 HTT_T2H_PEER_MAP_INFO_PEER_ID); 1812 peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1, 1813 HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16); 1814 ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32), 1815 peer_mac_h16, mac_addr); 1816 ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0); 1817 break; 1818 case HTT_T2H_MSG_TYPE_PEER_MAP2: 1819 vdev_id = le32_get_bits(resp->peer_map_ev.info, 1820 HTT_T2H_PEER_MAP_INFO_VDEV_ID); 1821 peer_id = le32_get_bits(resp->peer_map_ev.info, 1822 HTT_T2H_PEER_MAP_INFO_PEER_ID); 1823 peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1, 1824 HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16); 1825 ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32), 1826 peer_mac_h16, mac_addr); 1827 ast_hash = le32_get_bits(resp->peer_map_ev.info2, 1828 HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL); 1829 hw_peer_id = le32_get_bits(resp->peer_map_ev.info1, 1830 HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID); 1831 ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash, 1832 hw_peer_id); 1833 break; 1834 case HTT_T2H_MSG_TYPE_PEER_MAP3: 1835 vdev_id = le32_get_bits(resp->peer_map_ev.info, 1836 HTT_T2H_PEER_MAP_INFO_VDEV_ID); 1837 peer_id = le32_get_bits(resp->peer_map_ev.info, 1838 HTT_T2H_PEER_MAP_INFO_PEER_ID); 1839 peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1, 1840 HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16); 1841 ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32), 1842 peer_mac_h16, mac_addr); 1843 ast_hash = le32_get_bits(resp->peer_map_ev.info2, 1844 HTT_T2H_PEER_MAP3_INFO2_AST_HASH_VAL); 1845 hw_peer_id = le32_get_bits(resp->peer_map_ev.info2, 1846 HTT_T2H_PEER_MAP3_INFO2_HW_PEER_ID); 1847 ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash, 1848 hw_peer_id); 1849 break; 1850 case HTT_T2H_MSG_TYPE_PEER_UNMAP: 1851 case HTT_T2H_MSG_TYPE_PEER_UNMAP2: 1852 peer_id = le32_get_bits(resp->peer_unmap_ev.info, 1853 HTT_T2H_PEER_UNMAP_INFO_PEER_ID); 1854 ath12k_peer_unmap_event(ab, peer_id); 1855 break; 1856 case HTT_T2H_MSG_TYPE_PPDU_STATS_IND: 1857 ath12k_htt_pull_ppdu_stats(ab, skb); 1858 break; 1859 case HTT_T2H_MSG_TYPE_EXT_STATS_CONF: 1860 ath12k_debugfs_htt_ext_stats_handler(ab, skb); 1861 break; 1862 case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND: 1863 ath12k_htt_mlo_offset_event_handler(ab, skb); 1864 break; 1865 default: 1866 ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt event %d not handled\n", 1867 type); 1868 break; 1869 } 1870 1871 dev_kfree_skb_any(skb); 1872 } 1873 1874 static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar, 1875 struct sk_buff_head *msdu_list, 1876 struct sk_buff *first, struct sk_buff *last, 1877 u8 l3pad_bytes, int msdu_len) 1878 { 1879 struct ath12k_base *ab = ar->ab; 1880 struct sk_buff *skb; 1881 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first); 1882 int buf_first_hdr_len, buf_first_len; 1883 struct hal_rx_desc *ldesc; 1884 int space_extra, rem_len, buf_len; 1885 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 1886 bool is_continuation; 1887 1888 /* As the msdu is spread across multiple rx buffers, 1889 * find the offset to the start of msdu for computing 1890 * the length of the msdu in the first buffer. 1891 */ 1892 buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes; 1893 buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len; 1894 1895 if (WARN_ON_ONCE(msdu_len <= buf_first_len)) { 1896 skb_put(first, buf_first_hdr_len + msdu_len); 1897 skb_pull(first, buf_first_hdr_len); 1898 return 0; 1899 } 1900 1901 ldesc = (struct hal_rx_desc *)last->data; 1902 rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, ldesc); 1903 rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, ldesc); 1904 1905 /* MSDU spans over multiple buffers because the length of the MSDU 1906 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data 1907 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. 1908 */ 1909 skb_put(first, DP_RX_BUFFER_SIZE); 1910 skb_pull(first, buf_first_hdr_len); 1911 1912 /* When an MSDU spread over multiple buffers MSDU_END 1913 * tlvs are valid only in the last buffer. Copy those tlvs. 1914 */ 1915 ath12k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc); 1916 1917 space_extra = msdu_len - (buf_first_len + skb_tailroom(first)); 1918 if (space_extra > 0 && 1919 (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) { 1920 /* Free up all buffers of the MSDU */ 1921 while ((skb = __skb_dequeue(msdu_list)) != NULL) { 1922 rxcb = ATH12K_SKB_RXCB(skb); 1923 if (!rxcb->is_continuation) { 1924 dev_kfree_skb_any(skb); 1925 break; 1926 } 1927 dev_kfree_skb_any(skb); 1928 } 1929 return -ENOMEM; 1930 } 1931 1932 rem_len = msdu_len - buf_first_len; 1933 while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) { 1934 rxcb = ATH12K_SKB_RXCB(skb); 1935 is_continuation = rxcb->is_continuation; 1936 if (is_continuation) 1937 buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz; 1938 else 1939 buf_len = rem_len; 1940 1941 if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) { 1942 WARN_ON_ONCE(1); 1943 dev_kfree_skb_any(skb); 1944 return -EINVAL; 1945 } 1946 1947 skb_put(skb, buf_len + hal_rx_desc_sz); 1948 skb_pull(skb, hal_rx_desc_sz); 1949 skb_copy_from_linear_data(skb, skb_put(first, buf_len), 1950 buf_len); 1951 dev_kfree_skb_any(skb); 1952 1953 rem_len -= buf_len; 1954 if (!is_continuation) 1955 break; 1956 } 1957 1958 return 0; 1959 } 1960 1961 static struct sk_buff *ath12k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list, 1962 struct sk_buff *first) 1963 { 1964 struct sk_buff *skb; 1965 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first); 1966 1967 if (!rxcb->is_continuation) 1968 return first; 1969 1970 skb_queue_walk(msdu_list, skb) { 1971 rxcb = ATH12K_SKB_RXCB(skb); 1972 if (!rxcb->is_continuation) 1973 return skb; 1974 } 1975 1976 return NULL; 1977 } 1978 1979 static void ath12k_dp_rx_h_csum_offload(struct sk_buff *msdu, 1980 struct ath12k_dp_rx_info *rx_info) 1981 { 1982 msdu->ip_summed = (rx_info->ip_csum_fail || rx_info->l4_csum_fail) ? 1983 CHECKSUM_NONE : CHECKSUM_UNNECESSARY; 1984 } 1985 1986 int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar, enum hal_encrypt_type enctype) 1987 { 1988 switch (enctype) { 1989 case HAL_ENCRYPT_TYPE_OPEN: 1990 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1991 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1992 return 0; 1993 case HAL_ENCRYPT_TYPE_CCMP_128: 1994 return IEEE80211_CCMP_MIC_LEN; 1995 case HAL_ENCRYPT_TYPE_CCMP_256: 1996 return IEEE80211_CCMP_256_MIC_LEN; 1997 case HAL_ENCRYPT_TYPE_GCMP_128: 1998 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1999 return IEEE80211_GCMP_MIC_LEN; 2000 case HAL_ENCRYPT_TYPE_WEP_40: 2001 case HAL_ENCRYPT_TYPE_WEP_104: 2002 case HAL_ENCRYPT_TYPE_WEP_128: 2003 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 2004 case HAL_ENCRYPT_TYPE_WAPI: 2005 break; 2006 } 2007 2008 ath12k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype); 2009 return 0; 2010 } 2011 2012 static int ath12k_dp_rx_crypto_param_len(struct ath12k *ar, 2013 enum hal_encrypt_type enctype) 2014 { 2015 switch (enctype) { 2016 case HAL_ENCRYPT_TYPE_OPEN: 2017 return 0; 2018 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 2019 case HAL_ENCRYPT_TYPE_TKIP_MIC: 2020 return IEEE80211_TKIP_IV_LEN; 2021 case HAL_ENCRYPT_TYPE_CCMP_128: 2022 return IEEE80211_CCMP_HDR_LEN; 2023 case HAL_ENCRYPT_TYPE_CCMP_256: 2024 return IEEE80211_CCMP_256_HDR_LEN; 2025 case HAL_ENCRYPT_TYPE_GCMP_128: 2026 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 2027 return IEEE80211_GCMP_HDR_LEN; 2028 case HAL_ENCRYPT_TYPE_WEP_40: 2029 case HAL_ENCRYPT_TYPE_WEP_104: 2030 case HAL_ENCRYPT_TYPE_WEP_128: 2031 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 2032 case HAL_ENCRYPT_TYPE_WAPI: 2033 break; 2034 } 2035 2036 ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 2037 return 0; 2038 } 2039 2040 static int ath12k_dp_rx_crypto_icv_len(struct ath12k *ar, 2041 enum hal_encrypt_type enctype) 2042 { 2043 switch (enctype) { 2044 case HAL_ENCRYPT_TYPE_OPEN: 2045 case HAL_ENCRYPT_TYPE_CCMP_128: 2046 case HAL_ENCRYPT_TYPE_CCMP_256: 2047 case HAL_ENCRYPT_TYPE_GCMP_128: 2048 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 2049 return 0; 2050 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 2051 case HAL_ENCRYPT_TYPE_TKIP_MIC: 2052 return IEEE80211_TKIP_ICV_LEN; 2053 case HAL_ENCRYPT_TYPE_WEP_40: 2054 case HAL_ENCRYPT_TYPE_WEP_104: 2055 case HAL_ENCRYPT_TYPE_WEP_128: 2056 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 2057 case HAL_ENCRYPT_TYPE_WAPI: 2058 break; 2059 } 2060 2061 ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 2062 return 0; 2063 } 2064 2065 static void ath12k_dp_rx_h_undecap_nwifi(struct ath12k *ar, 2066 struct sk_buff *msdu, 2067 enum hal_encrypt_type enctype, 2068 struct ieee80211_rx_status *status) 2069 { 2070 struct ath12k_base *ab = ar->ab; 2071 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 2072 u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN]; 2073 struct ieee80211_hdr *hdr; 2074 size_t hdr_len; 2075 u8 *crypto_hdr; 2076 u16 qos_ctl; 2077 2078 /* pull decapped header */ 2079 hdr = (struct ieee80211_hdr *)msdu->data; 2080 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2081 skb_pull(msdu, hdr_len); 2082 2083 /* Rebuild qos header */ 2084 hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 2085 2086 /* Reset the order bit as the HT_Control header is stripped */ 2087 hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER)); 2088 2089 qos_ctl = rxcb->tid; 2090 2091 if (ath12k_dp_rx_h_mesh_ctl_present(ab, rxcb->rx_desc)) 2092 qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT; 2093 2094 /* TODO: Add other QoS ctl fields when required */ 2095 2096 /* copy decap header before overwriting for reuse below */ 2097 memcpy(decap_hdr, hdr, hdr_len); 2098 2099 /* Rebuild crypto header for mac80211 use */ 2100 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 2101 crypto_hdr = skb_push(msdu, ath12k_dp_rx_crypto_param_len(ar, enctype)); 2102 ath12k_dp_rx_desc_get_crypto_header(ar->ab, 2103 rxcb->rx_desc, crypto_hdr, 2104 enctype); 2105 } 2106 2107 memcpy(skb_push(msdu, 2108 IEEE80211_QOS_CTL_LEN), &qos_ctl, 2109 IEEE80211_QOS_CTL_LEN); 2110 memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len); 2111 } 2112 2113 static void ath12k_dp_rx_h_undecap_raw(struct ath12k *ar, struct sk_buff *msdu, 2114 enum hal_encrypt_type enctype, 2115 struct ieee80211_rx_status *status, 2116 bool decrypted) 2117 { 2118 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 2119 struct ieee80211_hdr *hdr; 2120 size_t hdr_len; 2121 size_t crypto_len; 2122 2123 if (!rxcb->is_first_msdu || 2124 !(rxcb->is_first_msdu && rxcb->is_last_msdu)) { 2125 WARN_ON_ONCE(1); 2126 return; 2127 } 2128 2129 skb_trim(msdu, msdu->len - FCS_LEN); 2130 2131 if (!decrypted) 2132 return; 2133 2134 hdr = (void *)msdu->data; 2135 2136 /* Tail */ 2137 if (status->flag & RX_FLAG_IV_STRIPPED) { 2138 skb_trim(msdu, msdu->len - 2139 ath12k_dp_rx_crypto_mic_len(ar, enctype)); 2140 2141 skb_trim(msdu, msdu->len - 2142 ath12k_dp_rx_crypto_icv_len(ar, enctype)); 2143 } else { 2144 /* MIC */ 2145 if (status->flag & RX_FLAG_MIC_STRIPPED) 2146 skb_trim(msdu, msdu->len - 2147 ath12k_dp_rx_crypto_mic_len(ar, enctype)); 2148 2149 /* ICV */ 2150 if (status->flag & RX_FLAG_ICV_STRIPPED) 2151 skb_trim(msdu, msdu->len - 2152 ath12k_dp_rx_crypto_icv_len(ar, enctype)); 2153 } 2154 2155 /* MMIC */ 2156 if ((status->flag & RX_FLAG_MMIC_STRIPPED) && 2157 !ieee80211_has_morefrags(hdr->frame_control) && 2158 enctype == HAL_ENCRYPT_TYPE_TKIP_MIC) 2159 skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN); 2160 2161 /* Head */ 2162 if (status->flag & RX_FLAG_IV_STRIPPED) { 2163 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2164 crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype); 2165 2166 memmove(msdu->data + crypto_len, msdu->data, hdr_len); 2167 skb_pull(msdu, crypto_len); 2168 } 2169 } 2170 2171 static void ath12k_get_dot11_hdr_from_rx_desc(struct ath12k *ar, 2172 struct sk_buff *msdu, 2173 struct ath12k_skb_rxcb *rxcb, 2174 struct ieee80211_rx_status *status, 2175 enum hal_encrypt_type enctype) 2176 { 2177 struct hal_rx_desc *rx_desc = rxcb->rx_desc; 2178 struct ath12k_base *ab = ar->ab; 2179 size_t hdr_len, crypto_len; 2180 struct ieee80211_hdr hdr; 2181 __le16 qos_ctl; 2182 u8 *crypto_hdr, mesh_ctrl; 2183 2184 ath12k_dp_rx_desc_get_dot11_hdr(ab, rx_desc, &hdr); 2185 hdr_len = ieee80211_hdrlen(hdr.frame_control); 2186 mesh_ctrl = ath12k_dp_rx_h_mesh_ctl_present(ab, rx_desc); 2187 2188 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 2189 crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype); 2190 crypto_hdr = skb_push(msdu, crypto_len); 2191 ath12k_dp_rx_desc_get_crypto_header(ab, rx_desc, crypto_hdr, enctype); 2192 } 2193 2194 skb_push(msdu, hdr_len); 2195 memcpy(msdu->data, &hdr, min(hdr_len, sizeof(hdr))); 2196 2197 if (rxcb->is_mcbc) 2198 status->flag &= ~RX_FLAG_PN_VALIDATED; 2199 2200 /* Add QOS header */ 2201 if (ieee80211_is_data_qos(hdr.frame_control)) { 2202 struct ieee80211_hdr *qos_ptr = (struct ieee80211_hdr *)msdu->data; 2203 2204 qos_ctl = cpu_to_le16(rxcb->tid & IEEE80211_QOS_CTL_TID_MASK); 2205 if (mesh_ctrl) 2206 qos_ctl |= cpu_to_le16(IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT); 2207 2208 memcpy(ieee80211_get_qos_ctl(qos_ptr), &qos_ctl, IEEE80211_QOS_CTL_LEN); 2209 } 2210 } 2211 2212 static void ath12k_dp_rx_h_undecap_eth(struct ath12k *ar, 2213 struct sk_buff *msdu, 2214 enum hal_encrypt_type enctype, 2215 struct ieee80211_rx_status *status) 2216 { 2217 struct ieee80211_hdr *hdr; 2218 struct ethhdr *eth; 2219 u8 da[ETH_ALEN]; 2220 u8 sa[ETH_ALEN]; 2221 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 2222 struct ath12k_dp_rx_rfc1042_hdr rfc = {0xaa, 0xaa, 0x03, {0x00, 0x00, 0x00}}; 2223 2224 eth = (struct ethhdr *)msdu->data; 2225 ether_addr_copy(da, eth->h_dest); 2226 ether_addr_copy(sa, eth->h_source); 2227 rfc.snap_type = eth->h_proto; 2228 skb_pull(msdu, sizeof(*eth)); 2229 memcpy(skb_push(msdu, sizeof(rfc)), &rfc, 2230 sizeof(rfc)); 2231 ath12k_get_dot11_hdr_from_rx_desc(ar, msdu, rxcb, status, enctype); 2232 2233 /* original 802.11 header has a different DA and in 2234 * case of 4addr it may also have different SA 2235 */ 2236 hdr = (struct ieee80211_hdr *)msdu->data; 2237 ether_addr_copy(ieee80211_get_DA(hdr), da); 2238 ether_addr_copy(ieee80211_get_SA(hdr), sa); 2239 } 2240 2241 static void ath12k_dp_rx_h_undecap(struct ath12k *ar, struct sk_buff *msdu, 2242 struct hal_rx_desc *rx_desc, 2243 enum hal_encrypt_type enctype, 2244 struct ieee80211_rx_status *status, 2245 bool decrypted) 2246 { 2247 struct ath12k_base *ab = ar->ab; 2248 u8 decap; 2249 struct ethhdr *ehdr; 2250 2251 decap = ath12k_dp_rx_h_decap_type(ab, rx_desc); 2252 2253 switch (decap) { 2254 case DP_RX_DECAP_TYPE_NATIVE_WIFI: 2255 ath12k_dp_rx_h_undecap_nwifi(ar, msdu, enctype, status); 2256 break; 2257 case DP_RX_DECAP_TYPE_RAW: 2258 ath12k_dp_rx_h_undecap_raw(ar, msdu, enctype, status, 2259 decrypted); 2260 break; 2261 case DP_RX_DECAP_TYPE_ETHERNET2_DIX: 2262 ehdr = (struct ethhdr *)msdu->data; 2263 2264 /* mac80211 allows fast path only for authorized STA */ 2265 if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) { 2266 ATH12K_SKB_RXCB(msdu)->is_eapol = true; 2267 ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status); 2268 break; 2269 } 2270 2271 /* PN for mcast packets will be validated in mac80211; 2272 * remove eth header and add 802.11 header. 2273 */ 2274 if (ATH12K_SKB_RXCB(msdu)->is_mcbc && decrypted) 2275 ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status); 2276 break; 2277 case DP_RX_DECAP_TYPE_8023: 2278 /* TODO: Handle undecap for these formats */ 2279 break; 2280 } 2281 } 2282 2283 struct ath12k_peer * 2284 ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu, 2285 struct ath12k_dp_rx_info *rx_info) 2286 { 2287 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 2288 struct ath12k_peer *peer = NULL; 2289 2290 lockdep_assert_held(&ab->base_lock); 2291 2292 if (rxcb->peer_id) 2293 peer = ath12k_peer_find_by_id(ab, rxcb->peer_id); 2294 2295 if (peer) 2296 return peer; 2297 2298 if (rx_info->addr2_present) 2299 peer = ath12k_peer_find_by_addr(ab, rx_info->addr2); 2300 2301 return peer; 2302 } 2303 2304 static void ath12k_dp_rx_h_mpdu(struct ath12k *ar, 2305 struct sk_buff *msdu, 2306 struct hal_rx_desc *rx_desc, 2307 struct ath12k_dp_rx_info *rx_info) 2308 { 2309 struct ath12k_base *ab = ar->ab; 2310 struct ath12k_skb_rxcb *rxcb; 2311 enum hal_encrypt_type enctype; 2312 bool is_decrypted = false; 2313 struct ieee80211_hdr *hdr; 2314 struct ath12k_peer *peer; 2315 struct ieee80211_rx_status *rx_status = rx_info->rx_status; 2316 u32 err_bitmap; 2317 2318 /* PN for multicast packets will be checked in mac80211 */ 2319 rxcb = ATH12K_SKB_RXCB(msdu); 2320 rxcb->is_mcbc = rx_info->is_mcbc; 2321 2322 if (rxcb->is_mcbc) 2323 rxcb->peer_id = rx_info->peer_id; 2324 2325 spin_lock_bh(&ar->ab->base_lock); 2326 peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu, rx_info); 2327 if (peer) { 2328 /* resetting mcbc bit because mcbc packets are unicast 2329 * packets only for AP as STA sends unicast packets. 2330 */ 2331 rxcb->is_mcbc = rxcb->is_mcbc && !peer->ucast_ra_only; 2332 2333 if (rxcb->is_mcbc) 2334 enctype = peer->sec_type_grp; 2335 else 2336 enctype = peer->sec_type; 2337 } else { 2338 enctype = HAL_ENCRYPT_TYPE_OPEN; 2339 } 2340 spin_unlock_bh(&ar->ab->base_lock); 2341 2342 err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc); 2343 if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap) 2344 is_decrypted = ath12k_dp_rx_h_is_decrypted(ab, rx_desc); 2345 2346 /* Clear per-MPDU flags while leaving per-PPDU flags intact */ 2347 rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | 2348 RX_FLAG_MMIC_ERROR | 2349 RX_FLAG_DECRYPTED | 2350 RX_FLAG_IV_STRIPPED | 2351 RX_FLAG_MMIC_STRIPPED); 2352 2353 if (err_bitmap & HAL_RX_MPDU_ERR_FCS) 2354 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 2355 if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) 2356 rx_status->flag |= RX_FLAG_MMIC_ERROR; 2357 2358 if (is_decrypted) { 2359 rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED; 2360 2361 if (rx_info->is_mcbc) 2362 rx_status->flag |= RX_FLAG_MIC_STRIPPED | 2363 RX_FLAG_ICV_STRIPPED; 2364 else 2365 rx_status->flag |= RX_FLAG_IV_STRIPPED | 2366 RX_FLAG_PN_VALIDATED; 2367 } 2368 2369 ath12k_dp_rx_h_csum_offload(msdu, rx_info); 2370 ath12k_dp_rx_h_undecap(ar, msdu, rx_desc, 2371 enctype, rx_status, is_decrypted); 2372 2373 if (!is_decrypted || rx_info->is_mcbc) 2374 return; 2375 2376 if (rx_info->decap_type != DP_RX_DECAP_TYPE_ETHERNET2_DIX) { 2377 hdr = (void *)msdu->data; 2378 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 2379 } 2380 } 2381 2382 static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info) 2383 { 2384 struct ieee80211_supported_band *sband; 2385 struct ieee80211_rx_status *rx_status = rx_info->rx_status; 2386 enum rx_msdu_start_pkt_type pkt_type = rx_info->pkt_type; 2387 u8 bw = rx_info->bw, sgi = rx_info->sgi; 2388 u8 rate_mcs = rx_info->rate_mcs, nss = rx_info->nss; 2389 bool is_cck; 2390 2391 switch (pkt_type) { 2392 case RX_MSDU_START_PKT_TYPE_11A: 2393 case RX_MSDU_START_PKT_TYPE_11B: 2394 is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B); 2395 sband = &ar->mac.sbands[rx_status->band]; 2396 rx_status->rate_idx = ath12k_mac_hw_rate_to_idx(sband, rate_mcs, 2397 is_cck); 2398 break; 2399 case RX_MSDU_START_PKT_TYPE_11N: 2400 rx_status->encoding = RX_ENC_HT; 2401 if (rate_mcs > ATH12K_HT_MCS_MAX) { 2402 ath12k_warn(ar->ab, 2403 "Received with invalid mcs in HT mode %d\n", 2404 rate_mcs); 2405 break; 2406 } 2407 rx_status->rate_idx = rate_mcs + (8 * (nss - 1)); 2408 if (sgi) 2409 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 2410 rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw); 2411 break; 2412 case RX_MSDU_START_PKT_TYPE_11AC: 2413 rx_status->encoding = RX_ENC_VHT; 2414 rx_status->rate_idx = rate_mcs; 2415 if (rate_mcs > ATH12K_VHT_MCS_MAX) { 2416 ath12k_warn(ar->ab, 2417 "Received with invalid mcs in VHT mode %d\n", 2418 rate_mcs); 2419 break; 2420 } 2421 rx_status->nss = nss; 2422 if (sgi) 2423 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 2424 rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw); 2425 break; 2426 case RX_MSDU_START_PKT_TYPE_11AX: 2427 rx_status->rate_idx = rate_mcs; 2428 if (rate_mcs > ATH12K_HE_MCS_MAX) { 2429 ath12k_warn(ar->ab, 2430 "Received with invalid mcs in HE mode %d\n", 2431 rate_mcs); 2432 break; 2433 } 2434 rx_status->encoding = RX_ENC_HE; 2435 rx_status->nss = nss; 2436 rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi); 2437 rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw); 2438 break; 2439 case RX_MSDU_START_PKT_TYPE_11BE: 2440 rx_status->rate_idx = rate_mcs; 2441 2442 if (rate_mcs > ATH12K_EHT_MCS_MAX) { 2443 ath12k_warn(ar->ab, 2444 "Received with invalid mcs in EHT mode %d\n", 2445 rate_mcs); 2446 break; 2447 } 2448 2449 rx_status->encoding = RX_ENC_EHT; 2450 rx_status->nss = nss; 2451 rx_status->eht.gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(sgi); 2452 rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw); 2453 break; 2454 default: 2455 break; 2456 } 2457 } 2458 2459 void ath12k_dp_rx_h_fetch_info(struct ath12k_base *ab, struct hal_rx_desc *rx_desc, 2460 struct ath12k_dp_rx_info *rx_info) 2461 { 2462 rx_info->ip_csum_fail = ath12k_dp_rx_h_ip_cksum_fail(ab, rx_desc); 2463 rx_info->l4_csum_fail = ath12k_dp_rx_h_l4_cksum_fail(ab, rx_desc); 2464 rx_info->is_mcbc = ath12k_dp_rx_h_is_da_mcbc(ab, rx_desc); 2465 rx_info->decap_type = ath12k_dp_rx_h_decap_type(ab, rx_desc); 2466 rx_info->pkt_type = ath12k_dp_rx_h_pkt_type(ab, rx_desc); 2467 rx_info->sgi = ath12k_dp_rx_h_sgi(ab, rx_desc); 2468 rx_info->rate_mcs = ath12k_dp_rx_h_rate_mcs(ab, rx_desc); 2469 rx_info->bw = ath12k_dp_rx_h_rx_bw(ab, rx_desc); 2470 rx_info->nss = ath12k_dp_rx_h_nss(ab, rx_desc); 2471 rx_info->tid = ath12k_dp_rx_h_tid(ab, rx_desc); 2472 rx_info->peer_id = ath12k_dp_rx_h_peer_id(ab, rx_desc); 2473 rx_info->phy_meta_data = ath12k_dp_rx_h_freq(ab, rx_desc); 2474 2475 if (ath12k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)) { 2476 ether_addr_copy(rx_info->addr2, 2477 ath12k_dp_rxdesc_get_mpdu_start_addr2(ab, rx_desc)); 2478 rx_info->addr2_present = true; 2479 } 2480 2481 ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "rx_desc: ", 2482 rx_desc, sizeof(*rx_desc)); 2483 } 2484 2485 void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info) 2486 { 2487 struct ieee80211_rx_status *rx_status = rx_info->rx_status; 2488 u8 channel_num; 2489 u32 center_freq, meta_data; 2490 struct ieee80211_channel *channel; 2491 2492 rx_status->freq = 0; 2493 rx_status->rate_idx = 0; 2494 rx_status->nss = 0; 2495 rx_status->encoding = RX_ENC_LEGACY; 2496 rx_status->bw = RATE_INFO_BW_20; 2497 rx_status->enc_flags = 0; 2498 2499 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 2500 2501 meta_data = rx_info->phy_meta_data; 2502 channel_num = meta_data; 2503 center_freq = meta_data >> 16; 2504 2505 if (center_freq >= ATH12K_MIN_6GHZ_FREQ && 2506 center_freq <= ATH12K_MAX_6GHZ_FREQ) { 2507 rx_status->band = NL80211_BAND_6GHZ; 2508 rx_status->freq = center_freq; 2509 } else if (channel_num >= 1 && channel_num <= 14) { 2510 rx_status->band = NL80211_BAND_2GHZ; 2511 } else if (channel_num >= 36 && channel_num <= 173) { 2512 rx_status->band = NL80211_BAND_5GHZ; 2513 } else { 2514 spin_lock_bh(&ar->data_lock); 2515 channel = ar->rx_channel; 2516 if (channel) { 2517 rx_status->band = channel->band; 2518 channel_num = 2519 ieee80211_frequency_to_channel(channel->center_freq); 2520 } 2521 spin_unlock_bh(&ar->data_lock); 2522 } 2523 2524 if (rx_status->band != NL80211_BAND_6GHZ) 2525 rx_status->freq = ieee80211_channel_to_frequency(channel_num, 2526 rx_status->band); 2527 2528 ath12k_dp_rx_h_rate(ar, rx_info); 2529 } 2530 2531 static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi, 2532 struct sk_buff *msdu, 2533 struct ath12k_dp_rx_info *rx_info) 2534 { 2535 struct ath12k_base *ab = ar->ab; 2536 static const struct ieee80211_radiotap_he known = { 2537 .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | 2538 IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN), 2539 .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN), 2540 }; 2541 struct ieee80211_radiotap_he *he; 2542 struct ieee80211_rx_status *rx_status; 2543 struct ieee80211_sta *pubsta; 2544 struct ath12k_peer *peer; 2545 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 2546 struct ieee80211_rx_status *status = rx_info->rx_status; 2547 u8 decap = DP_RX_DECAP_TYPE_RAW; 2548 bool is_mcbc = rxcb->is_mcbc; 2549 bool is_eapol = rxcb->is_eapol; 2550 2551 if (status->encoding == RX_ENC_HE && !(status->flag & RX_FLAG_RADIOTAP_HE) && 2552 !(status->flag & RX_FLAG_SKIP_MONITOR)) { 2553 he = skb_push(msdu, sizeof(known)); 2554 memcpy(he, &known, sizeof(known)); 2555 status->flag |= RX_FLAG_RADIOTAP_HE; 2556 } 2557 2558 if (!(status->flag & RX_FLAG_ONLY_MONITOR)) 2559 decap = rx_info->decap_type; 2560 2561 spin_lock_bh(&ab->base_lock); 2562 peer = ath12k_dp_rx_h_find_peer(ab, msdu, rx_info); 2563 2564 pubsta = peer ? peer->sta : NULL; 2565 2566 if (pubsta && pubsta->valid_links) { 2567 status->link_valid = 1; 2568 status->link_id = peer->link_id; 2569 } 2570 2571 spin_unlock_bh(&ab->base_lock); 2572 2573 ath12k_dbg(ab, ATH12K_DBG_DATA, 2574 "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", 2575 msdu, 2576 msdu->len, 2577 peer ? peer->addr : NULL, 2578 rxcb->tid, 2579 is_mcbc ? "mcast" : "ucast", 2580 ath12k_dp_rx_h_seq_no(ab, rxcb->rx_desc), 2581 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", 2582 (status->encoding == RX_ENC_HT) ? "ht" : "", 2583 (status->encoding == RX_ENC_VHT) ? "vht" : "", 2584 (status->encoding == RX_ENC_HE) ? "he" : "", 2585 (status->encoding == RX_ENC_EHT) ? "eht" : "", 2586 (status->bw == RATE_INFO_BW_40) ? "40" : "", 2587 (status->bw == RATE_INFO_BW_80) ? "80" : "", 2588 (status->bw == RATE_INFO_BW_160) ? "160" : "", 2589 (status->bw == RATE_INFO_BW_320) ? "320" : "", 2590 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", 2591 status->rate_idx, 2592 status->nss, 2593 status->freq, 2594 status->band, status->flag, 2595 !!(status->flag & RX_FLAG_FAILED_FCS_CRC), 2596 !!(status->flag & RX_FLAG_MMIC_ERROR), 2597 !!(status->flag & RX_FLAG_AMSDU_MORE)); 2598 2599 ath12k_dbg_dump(ab, ATH12K_DBG_DP_RX, NULL, "dp rx msdu: ", 2600 msdu->data, msdu->len); 2601 2602 rx_status = IEEE80211_SKB_RXCB(msdu); 2603 *rx_status = *status; 2604 2605 /* TODO: trace rx packet */ 2606 2607 /* PN for multicast packets are not validate in HW, 2608 * so skip 802.3 rx path 2609 * Also, fast_rx expects the STA to be authorized, hence 2610 * eapol packets are sent in slow path. 2611 */ 2612 if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol && 2613 !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED)) 2614 rx_status->flag |= RX_FLAG_8023; 2615 2616 ieee80211_rx_napi(ath12k_ar_to_hw(ar), pubsta, msdu, napi); 2617 } 2618 2619 static bool ath12k_dp_rx_check_nwifi_hdr_len_valid(struct ath12k_base *ab, 2620 struct hal_rx_desc *rx_desc, 2621 struct sk_buff *msdu) 2622 { 2623 struct ieee80211_hdr *hdr; 2624 u8 decap_type; 2625 u32 hdr_len; 2626 2627 decap_type = ath12k_dp_rx_h_decap_type(ab, rx_desc); 2628 if (decap_type != DP_RX_DECAP_TYPE_NATIVE_WIFI) 2629 return true; 2630 2631 hdr = (struct ieee80211_hdr *)msdu->data; 2632 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2633 2634 if ((likely(hdr_len <= DP_MAX_NWIFI_HDR_LEN))) 2635 return true; 2636 2637 ab->device_stats.invalid_rbm++; 2638 WARN_ON_ONCE(1); 2639 return false; 2640 } 2641 2642 static int ath12k_dp_rx_process_msdu(struct ath12k *ar, 2643 struct sk_buff *msdu, 2644 struct sk_buff_head *msdu_list, 2645 struct ath12k_dp_rx_info *rx_info) 2646 { 2647 struct ath12k_base *ab = ar->ab; 2648 struct hal_rx_desc *rx_desc, *lrx_desc; 2649 struct ath12k_skb_rxcb *rxcb; 2650 struct sk_buff *last_buf; 2651 u8 l3_pad_bytes; 2652 u16 msdu_len; 2653 int ret; 2654 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 2655 2656 last_buf = ath12k_dp_rx_get_msdu_last_buf(msdu_list, msdu); 2657 if (!last_buf) { 2658 ath12k_warn(ab, 2659 "No valid Rx buffer to access MSDU_END tlv\n"); 2660 ret = -EIO; 2661 goto free_out; 2662 } 2663 2664 rx_desc = (struct hal_rx_desc *)msdu->data; 2665 lrx_desc = (struct hal_rx_desc *)last_buf->data; 2666 if (!ath12k_dp_rx_h_msdu_done(ab, lrx_desc)) { 2667 ath12k_warn(ab, "msdu_done bit in msdu_end is not set\n"); 2668 ret = -EIO; 2669 goto free_out; 2670 } 2671 2672 rxcb = ATH12K_SKB_RXCB(msdu); 2673 rxcb->rx_desc = rx_desc; 2674 msdu_len = ath12k_dp_rx_h_msdu_len(ab, lrx_desc); 2675 l3_pad_bytes = ath12k_dp_rx_h_l3pad(ab, lrx_desc); 2676 2677 if (rxcb->is_frag) { 2678 skb_pull(msdu, hal_rx_desc_sz); 2679 } else if (!rxcb->is_continuation) { 2680 if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) { 2681 ret = -EINVAL; 2682 ath12k_warn(ab, "invalid msdu len %u\n", msdu_len); 2683 ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", rx_desc, 2684 sizeof(*rx_desc)); 2685 goto free_out; 2686 } 2687 skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len); 2688 skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes); 2689 } else { 2690 ret = ath12k_dp_rx_msdu_coalesce(ar, msdu_list, 2691 msdu, last_buf, 2692 l3_pad_bytes, msdu_len); 2693 if (ret) { 2694 ath12k_warn(ab, 2695 "failed to coalesce msdu rx buffer%d\n", ret); 2696 goto free_out; 2697 } 2698 } 2699 2700 if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, rx_desc, msdu))) { 2701 ret = -EINVAL; 2702 goto free_out; 2703 } 2704 2705 ath12k_dp_rx_h_fetch_info(ab, rx_desc, rx_info); 2706 ath12k_dp_rx_h_ppdu(ar, rx_info); 2707 ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_info); 2708 2709 rx_info->rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED; 2710 2711 return 0; 2712 2713 free_out: 2714 return ret; 2715 } 2716 2717 static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab, 2718 struct napi_struct *napi, 2719 struct sk_buff_head *msdu_list, 2720 int ring_id) 2721 { 2722 struct ath12k_hw_group *ag = ab->ag; 2723 struct ieee80211_rx_status rx_status = {0}; 2724 struct ath12k_skb_rxcb *rxcb; 2725 struct sk_buff *msdu; 2726 struct ath12k *ar; 2727 struct ath12k_hw_link *hw_links = ag->hw_links; 2728 struct ath12k_base *partner_ab; 2729 struct ath12k_dp_rx_info rx_info; 2730 u8 hw_link_id, pdev_id; 2731 int ret; 2732 2733 if (skb_queue_empty(msdu_list)) 2734 return; 2735 2736 rx_info.addr2_present = false; 2737 rx_info.rx_status = &rx_status; 2738 2739 rcu_read_lock(); 2740 2741 while ((msdu = __skb_dequeue(msdu_list))) { 2742 rxcb = ATH12K_SKB_RXCB(msdu); 2743 hw_link_id = rxcb->hw_link_id; 2744 partner_ab = ath12k_ag_to_ab(ag, 2745 hw_links[hw_link_id].device_id); 2746 pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params, 2747 hw_links[hw_link_id].pdev_idx); 2748 ar = partner_ab->pdevs[pdev_id].ar; 2749 if (!rcu_dereference(partner_ab->pdevs_active[pdev_id])) { 2750 dev_kfree_skb_any(msdu); 2751 continue; 2752 } 2753 2754 if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) { 2755 dev_kfree_skb_any(msdu); 2756 continue; 2757 } 2758 2759 ret = ath12k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_info); 2760 if (ret) { 2761 ath12k_dbg(ab, ATH12K_DBG_DATA, 2762 "Unable to process msdu %d", ret); 2763 dev_kfree_skb_any(msdu); 2764 continue; 2765 } 2766 2767 ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_info); 2768 } 2769 2770 rcu_read_unlock(); 2771 } 2772 2773 static u16 ath12k_dp_rx_get_peer_id(struct ath12k_base *ab, 2774 enum ath12k_peer_metadata_version ver, 2775 __le32 peer_metadata) 2776 { 2777 switch (ver) { 2778 default: 2779 ath12k_warn(ab, "Unknown peer metadata version: %d", ver); 2780 fallthrough; 2781 case ATH12K_PEER_METADATA_V0: 2782 return le32_get_bits(peer_metadata, 2783 RX_MPDU_DESC_META_DATA_V0_PEER_ID); 2784 case ATH12K_PEER_METADATA_V1: 2785 return le32_get_bits(peer_metadata, 2786 RX_MPDU_DESC_META_DATA_V1_PEER_ID); 2787 case ATH12K_PEER_METADATA_V1A: 2788 return le32_get_bits(peer_metadata, 2789 RX_MPDU_DESC_META_DATA_V1A_PEER_ID); 2790 case ATH12K_PEER_METADATA_V1B: 2791 return le32_get_bits(peer_metadata, 2792 RX_MPDU_DESC_META_DATA_V1B_PEER_ID); 2793 } 2794 } 2795 2796 int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id, 2797 struct napi_struct *napi, int budget) 2798 { 2799 struct ath12k_hw_group *ag = ab->ag; 2800 struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES]; 2801 struct ath12k_hw_link *hw_links = ag->hw_links; 2802 int num_buffs_reaped[ATH12K_MAX_DEVICES] = {}; 2803 struct ath12k_rx_desc_info *desc_info; 2804 struct ath12k_dp *dp = &ab->dp; 2805 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 2806 struct hal_reo_dest_ring *desc; 2807 struct ath12k_base *partner_ab; 2808 struct sk_buff_head msdu_list; 2809 struct ath12k_skb_rxcb *rxcb; 2810 int total_msdu_reaped = 0; 2811 u8 hw_link_id, device_id; 2812 struct hal_srng *srng; 2813 struct sk_buff *msdu; 2814 bool done = false; 2815 u64 desc_va; 2816 2817 __skb_queue_head_init(&msdu_list); 2818 2819 for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) 2820 INIT_LIST_HEAD(&rx_desc_used_list[device_id]); 2821 2822 srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id]; 2823 2824 spin_lock_bh(&srng->lock); 2825 2826 try_again: 2827 ath12k_hal_srng_access_begin(ab, srng); 2828 2829 while ((desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) { 2830 struct rx_mpdu_desc *mpdu_info; 2831 struct rx_msdu_desc *msdu_info; 2832 enum hal_reo_dest_ring_push_reason push_reason; 2833 u32 cookie; 2834 2835 cookie = le32_get_bits(desc->buf_addr_info.info1, 2836 BUFFER_ADDR_INFO1_SW_COOKIE); 2837 2838 hw_link_id = le32_get_bits(desc->info0, 2839 HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); 2840 2841 desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 | 2842 le32_to_cpu(desc->buf_va_lo)); 2843 desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va); 2844 2845 device_id = hw_links[hw_link_id].device_id; 2846 partner_ab = ath12k_ag_to_ab(ag, device_id); 2847 if (unlikely(!partner_ab)) { 2848 if (desc_info->skb) { 2849 dev_kfree_skb_any(desc_info->skb); 2850 desc_info->skb = NULL; 2851 } 2852 2853 continue; 2854 } 2855 2856 /* retry manual desc retrieval */ 2857 if (!desc_info) { 2858 desc_info = ath12k_dp_get_rx_desc(partner_ab, cookie); 2859 if (!desc_info) { 2860 ath12k_warn(partner_ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n", 2861 cookie); 2862 continue; 2863 } 2864 } 2865 2866 if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC) 2867 ath12k_warn(ab, "Check HW CC implementation"); 2868 2869 msdu = desc_info->skb; 2870 desc_info->skb = NULL; 2871 2872 list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]); 2873 2874 rxcb = ATH12K_SKB_RXCB(msdu); 2875 dma_unmap_single(partner_ab->dev, rxcb->paddr, 2876 msdu->len + skb_tailroom(msdu), 2877 DMA_FROM_DEVICE); 2878 2879 num_buffs_reaped[device_id]++; 2880 ab->device_stats.reo_rx[ring_id][ab->device_id]++; 2881 2882 push_reason = le32_get_bits(desc->info0, 2883 HAL_REO_DEST_RING_INFO0_PUSH_REASON); 2884 if (push_reason != 2885 HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) { 2886 dev_kfree_skb_any(msdu); 2887 ab->device_stats.hal_reo_error[ring_id]++; 2888 continue; 2889 } 2890 2891 msdu_info = &desc->rx_msdu_info; 2892 mpdu_info = &desc->rx_mpdu_info; 2893 2894 rxcb->is_first_msdu = !!(le32_to_cpu(msdu_info->info0) & 2895 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU); 2896 rxcb->is_last_msdu = !!(le32_to_cpu(msdu_info->info0) & 2897 RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU); 2898 rxcb->is_continuation = !!(le32_to_cpu(msdu_info->info0) & 2899 RX_MSDU_DESC_INFO0_MSDU_CONTINUATION); 2900 rxcb->hw_link_id = hw_link_id; 2901 rxcb->peer_id = ath12k_dp_rx_get_peer_id(ab, dp->peer_metadata_ver, 2902 mpdu_info->peer_meta_data); 2903 rxcb->tid = le32_get_bits(mpdu_info->info0, 2904 RX_MPDU_DESC_INFO0_TID); 2905 2906 __skb_queue_tail(&msdu_list, msdu); 2907 2908 if (!rxcb->is_continuation) { 2909 total_msdu_reaped++; 2910 done = true; 2911 } else { 2912 done = false; 2913 } 2914 2915 if (total_msdu_reaped >= budget) 2916 break; 2917 } 2918 2919 /* Hw might have updated the head pointer after we cached it. 2920 * In this case, even though there are entries in the ring we'll 2921 * get rx_desc NULL. Give the read another try with updated cached 2922 * head pointer so that we can reap complete MPDU in the current 2923 * rx processing. 2924 */ 2925 if (!done && ath12k_hal_srng_dst_num_free(ab, srng, true)) { 2926 ath12k_hal_srng_access_end(ab, srng); 2927 goto try_again; 2928 } 2929 2930 ath12k_hal_srng_access_end(ab, srng); 2931 2932 spin_unlock_bh(&srng->lock); 2933 2934 if (!total_msdu_reaped) 2935 goto exit; 2936 2937 for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) { 2938 if (!num_buffs_reaped[device_id]) 2939 continue; 2940 2941 partner_ab = ath12k_ag_to_ab(ag, device_id); 2942 rx_ring = &partner_ab->dp.rx_refill_buf_ring; 2943 2944 ath12k_dp_rx_bufs_replenish(partner_ab, rx_ring, 2945 &rx_desc_used_list[device_id], 2946 num_buffs_reaped[device_id]); 2947 } 2948 2949 ath12k_dp_rx_process_received_packets(ab, napi, &msdu_list, 2950 ring_id); 2951 2952 exit: 2953 return total_msdu_reaped; 2954 } 2955 2956 static void ath12k_dp_rx_frag_timer(struct timer_list *timer) 2957 { 2958 struct ath12k_dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer); 2959 2960 spin_lock_bh(&rx_tid->ab->base_lock); 2961 if (rx_tid->last_frag_no && 2962 rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) { 2963 spin_unlock_bh(&rx_tid->ab->base_lock); 2964 return; 2965 } 2966 ath12k_dp_rx_frags_cleanup(rx_tid, true); 2967 spin_unlock_bh(&rx_tid->ab->base_lock); 2968 } 2969 2970 int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id) 2971 { 2972 struct ath12k_base *ab = ar->ab; 2973 struct crypto_shash *tfm; 2974 struct ath12k_peer *peer; 2975 struct ath12k_dp_rx_tid *rx_tid; 2976 int i; 2977 2978 tfm = crypto_alloc_shash("michael_mic", 0, 0); 2979 if (IS_ERR(tfm)) 2980 return PTR_ERR(tfm); 2981 2982 spin_lock_bh(&ab->base_lock); 2983 2984 peer = ath12k_peer_find(ab, vdev_id, peer_mac); 2985 if (!peer) { 2986 spin_unlock_bh(&ab->base_lock); 2987 crypto_free_shash(tfm); 2988 ath12k_warn(ab, "failed to find the peer to set up fragment info\n"); 2989 return -ENOENT; 2990 } 2991 2992 if (!peer->primary_link) { 2993 spin_unlock_bh(&ab->base_lock); 2994 crypto_free_shash(tfm); 2995 return 0; 2996 } 2997 2998 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 2999 rx_tid = &peer->rx_tid[i]; 3000 rx_tid->ab = ab; 3001 timer_setup(&rx_tid->frag_timer, ath12k_dp_rx_frag_timer, 0); 3002 skb_queue_head_init(&rx_tid->rx_frags); 3003 } 3004 3005 peer->tfm_mmic = tfm; 3006 peer->dp_setup_done = true; 3007 spin_unlock_bh(&ab->base_lock); 3008 3009 return 0; 3010 } 3011 3012 static int ath12k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key, 3013 struct ieee80211_hdr *hdr, u8 *data, 3014 size_t data_len, u8 *mic) 3015 { 3016 SHASH_DESC_ON_STACK(desc, tfm); 3017 u8 mic_hdr[16] = {0}; 3018 u8 tid = 0; 3019 int ret; 3020 3021 if (!tfm) 3022 return -EINVAL; 3023 3024 desc->tfm = tfm; 3025 3026 ret = crypto_shash_setkey(tfm, key, 8); 3027 if (ret) 3028 goto out; 3029 3030 ret = crypto_shash_init(desc); 3031 if (ret) 3032 goto out; 3033 3034 /* TKIP MIC header */ 3035 memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN); 3036 memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN); 3037 if (ieee80211_is_data_qos(hdr->frame_control)) 3038 tid = ieee80211_get_tid(hdr); 3039 mic_hdr[12] = tid; 3040 3041 ret = crypto_shash_update(desc, mic_hdr, 16); 3042 if (ret) 3043 goto out; 3044 ret = crypto_shash_update(desc, data, data_len); 3045 if (ret) 3046 goto out; 3047 ret = crypto_shash_final(desc, mic); 3048 out: 3049 shash_desc_zero(desc); 3050 return ret; 3051 } 3052 3053 static int ath12k_dp_rx_h_verify_tkip_mic(struct ath12k *ar, struct ath12k_peer *peer, 3054 struct sk_buff *msdu) 3055 { 3056 struct ath12k_base *ab = ar->ab; 3057 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data; 3058 struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu); 3059 struct ieee80211_key_conf *key_conf; 3060 struct ieee80211_hdr *hdr; 3061 struct ath12k_dp_rx_info rx_info; 3062 u8 mic[IEEE80211_CCMP_MIC_LEN]; 3063 int head_len, tail_len, ret; 3064 size_t data_len; 3065 u32 hdr_len, hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 3066 u8 *key, *data; 3067 u8 key_idx; 3068 3069 if (ath12k_dp_rx_h_enctype(ab, rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC) 3070 return 0; 3071 3072 rx_info.addr2_present = false; 3073 rx_info.rx_status = rxs; 3074 3075 hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz); 3076 hdr_len = ieee80211_hdrlen(hdr->frame_control); 3077 head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN; 3078 tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN; 3079 3080 if (!is_multicast_ether_addr(hdr->addr1)) 3081 key_idx = peer->ucast_keyidx; 3082 else 3083 key_idx = peer->mcast_keyidx; 3084 3085 key_conf = peer->keys[key_idx]; 3086 3087 data = msdu->data + head_len; 3088 data_len = msdu->len - head_len - tail_len; 3089 key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; 3090 3091 ret = ath12k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic); 3092 if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN)) 3093 goto mic_fail; 3094 3095 return 0; 3096 3097 mic_fail: 3098 (ATH12K_SKB_RXCB(msdu))->is_first_msdu = true; 3099 (ATH12K_SKB_RXCB(msdu))->is_last_msdu = true; 3100 3101 ath12k_dp_rx_h_fetch_info(ab, rx_desc, &rx_info); 3102 3103 rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED | 3104 RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED; 3105 skb_pull(msdu, hal_rx_desc_sz); 3106 3107 if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, rx_desc, msdu))) 3108 return -EINVAL; 3109 3110 ath12k_dp_rx_h_ppdu(ar, &rx_info); 3111 ath12k_dp_rx_h_undecap(ar, msdu, rx_desc, 3112 HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true); 3113 ieee80211_rx(ath12k_ar_to_hw(ar), msdu); 3114 return -EINVAL; 3115 } 3116 3117 static void ath12k_dp_rx_h_undecap_frag(struct ath12k *ar, struct sk_buff *msdu, 3118 enum hal_encrypt_type enctype, u32 flags) 3119 { 3120 struct ieee80211_hdr *hdr; 3121 size_t hdr_len; 3122 size_t crypto_len; 3123 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 3124 3125 if (!flags) 3126 return; 3127 3128 hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz); 3129 3130 if (flags & RX_FLAG_MIC_STRIPPED) 3131 skb_trim(msdu, msdu->len - 3132 ath12k_dp_rx_crypto_mic_len(ar, enctype)); 3133 3134 if (flags & RX_FLAG_ICV_STRIPPED) 3135 skb_trim(msdu, msdu->len - 3136 ath12k_dp_rx_crypto_icv_len(ar, enctype)); 3137 3138 if (flags & RX_FLAG_IV_STRIPPED) { 3139 hdr_len = ieee80211_hdrlen(hdr->frame_control); 3140 crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype); 3141 3142 memmove(msdu->data + hal_rx_desc_sz + crypto_len, 3143 msdu->data + hal_rx_desc_sz, hdr_len); 3144 skb_pull(msdu, crypto_len); 3145 } 3146 } 3147 3148 static int ath12k_dp_rx_h_defrag(struct ath12k *ar, 3149 struct ath12k_peer *peer, 3150 struct ath12k_dp_rx_tid *rx_tid, 3151 struct sk_buff **defrag_skb) 3152 { 3153 struct ath12k_base *ab = ar->ab; 3154 struct hal_rx_desc *rx_desc; 3155 struct sk_buff *skb, *first_frag, *last_frag; 3156 struct ieee80211_hdr *hdr; 3157 enum hal_encrypt_type enctype; 3158 bool is_decrypted = false; 3159 int msdu_len = 0; 3160 int extra_space; 3161 u32 flags, hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 3162 3163 first_frag = skb_peek(&rx_tid->rx_frags); 3164 last_frag = skb_peek_tail(&rx_tid->rx_frags); 3165 3166 skb_queue_walk(&rx_tid->rx_frags, skb) { 3167 flags = 0; 3168 rx_desc = (struct hal_rx_desc *)skb->data; 3169 hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz); 3170 3171 enctype = ath12k_dp_rx_h_enctype(ab, rx_desc); 3172 if (enctype != HAL_ENCRYPT_TYPE_OPEN) 3173 is_decrypted = ath12k_dp_rx_h_is_decrypted(ab, 3174 rx_desc); 3175 3176 if (is_decrypted) { 3177 if (skb != first_frag) 3178 flags |= RX_FLAG_IV_STRIPPED; 3179 if (skb != last_frag) 3180 flags |= RX_FLAG_ICV_STRIPPED | 3181 RX_FLAG_MIC_STRIPPED; 3182 } 3183 3184 /* RX fragments are always raw packets */ 3185 if (skb != last_frag) 3186 skb_trim(skb, skb->len - FCS_LEN); 3187 ath12k_dp_rx_h_undecap_frag(ar, skb, enctype, flags); 3188 3189 if (skb != first_frag) 3190 skb_pull(skb, hal_rx_desc_sz + 3191 ieee80211_hdrlen(hdr->frame_control)); 3192 msdu_len += skb->len; 3193 } 3194 3195 extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag)); 3196 if (extra_space > 0 && 3197 (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0)) 3198 return -ENOMEM; 3199 3200 __skb_unlink(first_frag, &rx_tid->rx_frags); 3201 while ((skb = __skb_dequeue(&rx_tid->rx_frags))) { 3202 skb_put_data(first_frag, skb->data, skb->len); 3203 dev_kfree_skb_any(skb); 3204 } 3205 3206 hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz); 3207 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); 3208 ATH12K_SKB_RXCB(first_frag)->is_frag = 1; 3209 3210 if (ath12k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag)) 3211 first_frag = NULL; 3212 3213 *defrag_skb = first_frag; 3214 return 0; 3215 } 3216 3217 static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar, 3218 struct ath12k_dp_rx_tid *rx_tid, 3219 struct sk_buff *defrag_skb) 3220 { 3221 struct ath12k_base *ab = ar->ab; 3222 struct ath12k_dp *dp = &ab->dp; 3223 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data; 3224 struct hal_reo_entrance_ring *reo_ent_ring; 3225 struct hal_reo_dest_ring *reo_dest_ring; 3226 struct dp_link_desc_bank *link_desc_banks; 3227 struct hal_rx_msdu_link *msdu_link; 3228 struct hal_rx_msdu_details *msdu0; 3229 struct hal_srng *srng; 3230 dma_addr_t link_paddr, buf_paddr; 3231 u32 desc_bank, msdu_info, msdu_ext_info, mpdu_info; 3232 u32 cookie, hal_rx_desc_sz, dest_ring_info0, queue_addr_hi; 3233 int ret; 3234 struct ath12k_rx_desc_info *desc_info; 3235 enum hal_rx_buf_return_buf_manager idle_link_rbm = dp->idle_link_rbm; 3236 u8 dst_ind; 3237 3238 hal_rx_desc_sz = ab->hal.hal_desc_sz; 3239 link_desc_banks = dp->link_desc_banks; 3240 reo_dest_ring = rx_tid->dst_ring_desc; 3241 3242 ath12k_hal_rx_reo_ent_paddr_get(ab, &reo_dest_ring->buf_addr_info, 3243 &link_paddr, &cookie); 3244 desc_bank = u32_get_bits(cookie, DP_LINK_DESC_BANK_MASK); 3245 3246 msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr + 3247 (link_paddr - link_desc_banks[desc_bank].paddr)); 3248 msdu0 = &msdu_link->msdu_link[0]; 3249 msdu_ext_info = le32_to_cpu(msdu0->rx_msdu_ext_info.info0); 3250 dst_ind = u32_get_bits(msdu_ext_info, RX_MSDU_EXT_DESC_INFO0_REO_DEST_IND); 3251 3252 memset(msdu0, 0, sizeof(*msdu0)); 3253 3254 msdu_info = u32_encode_bits(1, RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU) | 3255 u32_encode_bits(1, RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU) | 3256 u32_encode_bits(0, RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) | 3257 u32_encode_bits(defrag_skb->len - hal_rx_desc_sz, 3258 RX_MSDU_DESC_INFO0_MSDU_LENGTH) | 3259 u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_SA) | 3260 u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_DA); 3261 msdu0->rx_msdu_info.info0 = cpu_to_le32(msdu_info); 3262 msdu0->rx_msdu_ext_info.info0 = cpu_to_le32(msdu_ext_info); 3263 3264 /* change msdu len in hal rx desc */ 3265 ath12k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz); 3266 3267 buf_paddr = dma_map_single(ab->dev, defrag_skb->data, 3268 defrag_skb->len + skb_tailroom(defrag_skb), 3269 DMA_TO_DEVICE); 3270 if (dma_mapping_error(ab->dev, buf_paddr)) 3271 return -ENOMEM; 3272 3273 spin_lock_bh(&dp->rx_desc_lock); 3274 desc_info = list_first_entry_or_null(&dp->rx_desc_free_list, 3275 struct ath12k_rx_desc_info, 3276 list); 3277 if (!desc_info) { 3278 spin_unlock_bh(&dp->rx_desc_lock); 3279 ath12k_warn(ab, "failed to find rx desc for reinject\n"); 3280 ret = -ENOMEM; 3281 goto err_unmap_dma; 3282 } 3283 3284 desc_info->skb = defrag_skb; 3285 desc_info->in_use = true; 3286 3287 list_del(&desc_info->list); 3288 spin_unlock_bh(&dp->rx_desc_lock); 3289 3290 ATH12K_SKB_RXCB(defrag_skb)->paddr = buf_paddr; 3291 3292 ath12k_hal_rx_buf_addr_info_set(&msdu0->buf_addr_info, buf_paddr, 3293 desc_info->cookie, 3294 HAL_RX_BUF_RBM_SW3_BM); 3295 3296 /* Fill mpdu details into reo entrance ring */ 3297 srng = &ab->hal.srng_list[dp->reo_reinject_ring.ring_id]; 3298 3299 spin_lock_bh(&srng->lock); 3300 ath12k_hal_srng_access_begin(ab, srng); 3301 3302 reo_ent_ring = ath12k_hal_srng_src_get_next_entry(ab, srng); 3303 if (!reo_ent_ring) { 3304 ath12k_hal_srng_access_end(ab, srng); 3305 spin_unlock_bh(&srng->lock); 3306 ret = -ENOSPC; 3307 goto err_free_desc; 3308 } 3309 memset(reo_ent_ring, 0, sizeof(*reo_ent_ring)); 3310 3311 ath12k_hal_rx_buf_addr_info_set(&reo_ent_ring->buf_addr_info, link_paddr, 3312 cookie, 3313 idle_link_rbm); 3314 3315 mpdu_info = u32_encode_bits(1, RX_MPDU_DESC_INFO0_MSDU_COUNT) | 3316 u32_encode_bits(0, RX_MPDU_DESC_INFO0_FRAG_FLAG) | 3317 u32_encode_bits(1, RX_MPDU_DESC_INFO0_RAW_MPDU) | 3318 u32_encode_bits(1, RX_MPDU_DESC_INFO0_VALID_PN) | 3319 u32_encode_bits(rx_tid->tid, RX_MPDU_DESC_INFO0_TID); 3320 3321 reo_ent_ring->rx_mpdu_info.info0 = cpu_to_le32(mpdu_info); 3322 reo_ent_ring->rx_mpdu_info.peer_meta_data = 3323 reo_dest_ring->rx_mpdu_info.peer_meta_data; 3324 3325 if (ab->hw_params->reoq_lut_support) { 3326 reo_ent_ring->queue_addr_lo = reo_dest_ring->rx_mpdu_info.peer_meta_data; 3327 queue_addr_hi = 0; 3328 } else { 3329 reo_ent_ring->queue_addr_lo = 3330 cpu_to_le32(lower_32_bits(rx_tid->qbuf.paddr_aligned)); 3331 queue_addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); 3332 } 3333 3334 reo_ent_ring->info0 = le32_encode_bits(queue_addr_hi, 3335 HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI) | 3336 le32_encode_bits(dst_ind, 3337 HAL_REO_ENTR_RING_INFO0_DEST_IND); 3338 3339 reo_ent_ring->info1 = le32_encode_bits(rx_tid->cur_sn, 3340 HAL_REO_ENTR_RING_INFO1_MPDU_SEQ_NUM); 3341 dest_ring_info0 = le32_get_bits(reo_dest_ring->info0, 3342 HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); 3343 reo_ent_ring->info2 = 3344 cpu_to_le32(u32_get_bits(dest_ring_info0, 3345 HAL_REO_ENTR_RING_INFO2_SRC_LINK_ID)); 3346 3347 ath12k_hal_srng_access_end(ab, srng); 3348 spin_unlock_bh(&srng->lock); 3349 3350 return 0; 3351 3352 err_free_desc: 3353 spin_lock_bh(&dp->rx_desc_lock); 3354 desc_info->in_use = false; 3355 desc_info->skb = NULL; 3356 list_add_tail(&desc_info->list, &dp->rx_desc_free_list); 3357 spin_unlock_bh(&dp->rx_desc_lock); 3358 err_unmap_dma: 3359 dma_unmap_single(ab->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb), 3360 DMA_TO_DEVICE); 3361 return ret; 3362 } 3363 3364 static int ath12k_dp_rx_h_cmp_frags(struct ath12k_base *ab, 3365 struct sk_buff *a, struct sk_buff *b) 3366 { 3367 int frag1, frag2; 3368 3369 frag1 = ath12k_dp_rx_h_frag_no(ab, a); 3370 frag2 = ath12k_dp_rx_h_frag_no(ab, b); 3371 3372 return frag1 - frag2; 3373 } 3374 3375 static void ath12k_dp_rx_h_sort_frags(struct ath12k_base *ab, 3376 struct sk_buff_head *frag_list, 3377 struct sk_buff *cur_frag) 3378 { 3379 struct sk_buff *skb; 3380 int cmp; 3381 3382 skb_queue_walk(frag_list, skb) { 3383 cmp = ath12k_dp_rx_h_cmp_frags(ab, skb, cur_frag); 3384 if (cmp < 0) 3385 continue; 3386 __skb_queue_before(frag_list, skb, cur_frag); 3387 return; 3388 } 3389 __skb_queue_tail(frag_list, cur_frag); 3390 } 3391 3392 static u64 ath12k_dp_rx_h_get_pn(struct ath12k *ar, struct sk_buff *skb) 3393 { 3394 struct ieee80211_hdr *hdr; 3395 u64 pn = 0; 3396 u8 *ehdr; 3397 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 3398 3399 hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz); 3400 ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control); 3401 3402 pn = ehdr[0]; 3403 pn |= (u64)ehdr[1] << 8; 3404 pn |= (u64)ehdr[4] << 16; 3405 pn |= (u64)ehdr[5] << 24; 3406 pn |= (u64)ehdr[6] << 32; 3407 pn |= (u64)ehdr[7] << 40; 3408 3409 return pn; 3410 } 3411 3412 static bool 3413 ath12k_dp_rx_h_defrag_validate_incr_pn(struct ath12k *ar, struct ath12k_dp_rx_tid *rx_tid) 3414 { 3415 struct ath12k_base *ab = ar->ab; 3416 enum hal_encrypt_type encrypt_type; 3417 struct sk_buff *first_frag, *skb; 3418 struct hal_rx_desc *desc; 3419 u64 last_pn; 3420 u64 cur_pn; 3421 3422 first_frag = skb_peek(&rx_tid->rx_frags); 3423 desc = (struct hal_rx_desc *)first_frag->data; 3424 3425 encrypt_type = ath12k_dp_rx_h_enctype(ab, desc); 3426 if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 && 3427 encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 && 3428 encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 && 3429 encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256) 3430 return true; 3431 3432 last_pn = ath12k_dp_rx_h_get_pn(ar, first_frag); 3433 skb_queue_walk(&rx_tid->rx_frags, skb) { 3434 if (skb == first_frag) 3435 continue; 3436 3437 cur_pn = ath12k_dp_rx_h_get_pn(ar, skb); 3438 if (cur_pn != last_pn + 1) 3439 return false; 3440 last_pn = cur_pn; 3441 } 3442 return true; 3443 } 3444 3445 static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar, 3446 struct sk_buff *msdu, 3447 struct hal_reo_dest_ring *ring_desc) 3448 { 3449 struct ath12k_base *ab = ar->ab; 3450 struct hal_rx_desc *rx_desc; 3451 struct ath12k_peer *peer; 3452 struct ath12k_dp_rx_tid *rx_tid; 3453 struct sk_buff *defrag_skb = NULL; 3454 u32 peer_id; 3455 u16 seqno, frag_no; 3456 u8 tid; 3457 int ret = 0; 3458 bool more_frags; 3459 3460 rx_desc = (struct hal_rx_desc *)msdu->data; 3461 peer_id = ath12k_dp_rx_h_peer_id(ab, rx_desc); 3462 tid = ath12k_dp_rx_h_tid(ab, rx_desc); 3463 seqno = ath12k_dp_rx_h_seq_no(ab, rx_desc); 3464 frag_no = ath12k_dp_rx_h_frag_no(ab, msdu); 3465 more_frags = ath12k_dp_rx_h_more_frags(ab, msdu); 3466 3467 if (!ath12k_dp_rx_h_seq_ctrl_valid(ab, rx_desc) || 3468 !ath12k_dp_rx_h_fc_valid(ab, rx_desc) || 3469 tid > IEEE80211_NUM_TIDS) 3470 return -EINVAL; 3471 3472 /* received unfragmented packet in reo 3473 * exception ring, this shouldn't happen 3474 * as these packets typically come from 3475 * reo2sw srngs. 3476 */ 3477 if (WARN_ON_ONCE(!frag_no && !more_frags)) 3478 return -EINVAL; 3479 3480 spin_lock_bh(&ab->base_lock); 3481 peer = ath12k_peer_find_by_id(ab, peer_id); 3482 if (!peer) { 3483 ath12k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n", 3484 peer_id); 3485 ret = -ENOENT; 3486 goto out_unlock; 3487 } 3488 3489 if (!peer->dp_setup_done) { 3490 ath12k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n", 3491 peer->addr, peer_id); 3492 ret = -ENOENT; 3493 goto out_unlock; 3494 } 3495 3496 rx_tid = &peer->rx_tid[tid]; 3497 3498 if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) || 3499 skb_queue_empty(&rx_tid->rx_frags)) { 3500 /* Flush stored fragments and start a new sequence */ 3501 ath12k_dp_rx_frags_cleanup(rx_tid, true); 3502 rx_tid->cur_sn = seqno; 3503 } 3504 3505 if (rx_tid->rx_frag_bitmap & BIT(frag_no)) { 3506 /* Fragment already present */ 3507 ret = -EINVAL; 3508 goto out_unlock; 3509 } 3510 3511 if ((!rx_tid->rx_frag_bitmap || frag_no > __fls(rx_tid->rx_frag_bitmap))) 3512 __skb_queue_tail(&rx_tid->rx_frags, msdu); 3513 else 3514 ath12k_dp_rx_h_sort_frags(ab, &rx_tid->rx_frags, msdu); 3515 3516 rx_tid->rx_frag_bitmap |= BIT(frag_no); 3517 if (!more_frags) 3518 rx_tid->last_frag_no = frag_no; 3519 3520 if (frag_no == 0) { 3521 rx_tid->dst_ring_desc = kmemdup(ring_desc, 3522 sizeof(*rx_tid->dst_ring_desc), 3523 GFP_ATOMIC); 3524 if (!rx_tid->dst_ring_desc) { 3525 ret = -ENOMEM; 3526 goto out_unlock; 3527 } 3528 } else { 3529 ath12k_dp_rx_link_desc_return(ab, &ring_desc->buf_addr_info, 3530 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3531 } 3532 3533 if (!rx_tid->last_frag_no || 3534 rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) { 3535 mod_timer(&rx_tid->frag_timer, jiffies + 3536 ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS); 3537 goto out_unlock; 3538 } 3539 3540 spin_unlock_bh(&ab->base_lock); 3541 timer_delete_sync(&rx_tid->frag_timer); 3542 spin_lock_bh(&ab->base_lock); 3543 3544 peer = ath12k_peer_find_by_id(ab, peer_id); 3545 if (!peer) 3546 goto err_frags_cleanup; 3547 3548 if (!ath12k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid)) 3549 goto err_frags_cleanup; 3550 3551 if (ath12k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb)) 3552 goto err_frags_cleanup; 3553 3554 if (!defrag_skb) 3555 goto err_frags_cleanup; 3556 3557 if (ath12k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb)) 3558 goto err_frags_cleanup; 3559 3560 ath12k_dp_rx_frags_cleanup(rx_tid, false); 3561 goto out_unlock; 3562 3563 err_frags_cleanup: 3564 dev_kfree_skb_any(defrag_skb); 3565 ath12k_dp_rx_frags_cleanup(rx_tid, true); 3566 out_unlock: 3567 spin_unlock_bh(&ab->base_lock); 3568 return ret; 3569 } 3570 3571 static int 3572 ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc, 3573 struct list_head *used_list, 3574 bool drop, u32 cookie) 3575 { 3576 struct ath12k_base *ab = ar->ab; 3577 struct sk_buff *msdu; 3578 struct ath12k_skb_rxcb *rxcb; 3579 struct hal_rx_desc *rx_desc; 3580 u16 msdu_len; 3581 u32 hal_rx_desc_sz = ab->hal.hal_desc_sz; 3582 struct ath12k_rx_desc_info *desc_info; 3583 u64 desc_va; 3584 3585 desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 | 3586 le32_to_cpu(desc->buf_va_lo)); 3587 desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va); 3588 3589 /* retry manual desc retrieval */ 3590 if (!desc_info) { 3591 desc_info = ath12k_dp_get_rx_desc(ab, cookie); 3592 if (!desc_info) { 3593 ath12k_warn(ab, "Invalid cookie in DP rx error descriptor retrieval: 0x%x\n", 3594 cookie); 3595 return -EINVAL; 3596 } 3597 } 3598 3599 if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC) 3600 ath12k_warn(ab, " RX Exception, Check HW CC implementation"); 3601 3602 msdu = desc_info->skb; 3603 desc_info->skb = NULL; 3604 3605 list_add_tail(&desc_info->list, used_list); 3606 3607 rxcb = ATH12K_SKB_RXCB(msdu); 3608 dma_unmap_single(ar->ab->dev, rxcb->paddr, 3609 msdu->len + skb_tailroom(msdu), 3610 DMA_FROM_DEVICE); 3611 3612 if (drop) { 3613 dev_kfree_skb_any(msdu); 3614 return 0; 3615 } 3616 3617 rcu_read_lock(); 3618 if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) { 3619 dev_kfree_skb_any(msdu); 3620 goto exit; 3621 } 3622 3623 if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) { 3624 dev_kfree_skb_any(msdu); 3625 goto exit; 3626 } 3627 3628 rx_desc = (struct hal_rx_desc *)msdu->data; 3629 msdu_len = ath12k_dp_rx_h_msdu_len(ar->ab, rx_desc); 3630 if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) { 3631 ath12k_warn(ar->ab, "invalid msdu leng %u", msdu_len); 3632 ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "", rx_desc, 3633 sizeof(*rx_desc)); 3634 dev_kfree_skb_any(msdu); 3635 goto exit; 3636 } 3637 3638 skb_put(msdu, hal_rx_desc_sz + msdu_len); 3639 3640 if (ath12k_dp_rx_frag_h_mpdu(ar, msdu, desc)) { 3641 dev_kfree_skb_any(msdu); 3642 ath12k_dp_rx_link_desc_return(ar->ab, &desc->buf_addr_info, 3643 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3644 } 3645 exit: 3646 rcu_read_unlock(); 3647 return 0; 3648 } 3649 3650 int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi, 3651 int budget) 3652 { 3653 struct ath12k_hw_group *ag = ab->ag; 3654 struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES]; 3655 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; 3656 int num_buffs_reaped[ATH12K_MAX_DEVICES] = {}; 3657 struct dp_link_desc_bank *link_desc_banks; 3658 enum hal_rx_buf_return_buf_manager rbm; 3659 struct hal_rx_msdu_link *link_desc_va; 3660 int tot_n_bufs_reaped, quota, ret, i; 3661 struct hal_reo_dest_ring *reo_desc; 3662 struct dp_rxdma_ring *rx_ring; 3663 struct dp_srng *reo_except; 3664 struct ath12k_hw_link *hw_links = ag->hw_links; 3665 struct ath12k_base *partner_ab; 3666 u8 hw_link_id, device_id; 3667 u32 desc_bank, num_msdus; 3668 struct hal_srng *srng; 3669 struct ath12k *ar; 3670 dma_addr_t paddr; 3671 bool is_frag; 3672 bool drop; 3673 int pdev_id; 3674 3675 tot_n_bufs_reaped = 0; 3676 quota = budget; 3677 3678 for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) 3679 INIT_LIST_HEAD(&rx_desc_used_list[device_id]); 3680 3681 reo_except = &ab->dp.reo_except_ring; 3682 3683 srng = &ab->hal.srng_list[reo_except->ring_id]; 3684 3685 spin_lock_bh(&srng->lock); 3686 3687 ath12k_hal_srng_access_begin(ab, srng); 3688 3689 while (budget && 3690 (reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) { 3691 drop = false; 3692 ab->device_stats.err_ring_pkts++; 3693 3694 ret = ath12k_hal_desc_reo_parse_err(ab, reo_desc, &paddr, 3695 &desc_bank); 3696 if (ret) { 3697 ath12k_warn(ab, "failed to parse error reo desc %d\n", 3698 ret); 3699 continue; 3700 } 3701 3702 hw_link_id = le32_get_bits(reo_desc->info0, 3703 HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); 3704 device_id = hw_links[hw_link_id].device_id; 3705 partner_ab = ath12k_ag_to_ab(ag, device_id); 3706 3707 pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params, 3708 hw_links[hw_link_id].pdev_idx); 3709 ar = partner_ab->pdevs[pdev_id].ar; 3710 3711 link_desc_banks = partner_ab->dp.link_desc_banks; 3712 link_desc_va = link_desc_banks[desc_bank].vaddr + 3713 (paddr - link_desc_banks[desc_bank].paddr); 3714 ath12k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies, 3715 &rbm); 3716 if (rbm != partner_ab->dp.idle_link_rbm && 3717 rbm != HAL_RX_BUF_RBM_SW3_BM && 3718 rbm != partner_ab->hw_params->hal_params->rx_buf_rbm) { 3719 ab->device_stats.invalid_rbm++; 3720 ath12k_warn(ab, "invalid return buffer manager %d\n", rbm); 3721 ath12k_dp_rx_link_desc_return(partner_ab, 3722 &reo_desc->buf_addr_info, 3723 HAL_WBM_REL_BM_ACT_REL_MSDU); 3724 continue; 3725 } 3726 3727 is_frag = !!(le32_to_cpu(reo_desc->rx_mpdu_info.info0) & 3728 RX_MPDU_DESC_INFO0_FRAG_FLAG); 3729 3730 /* Process only rx fragments with one msdu per link desc below, and drop 3731 * msdu's indicated due to error reasons. 3732 * Dynamic fragmentation not supported in Multi-link client, so drop the 3733 * partner device buffers. 3734 */ 3735 if (!is_frag || num_msdus > 1 || 3736 partner_ab->device_id != ab->device_id) { 3737 drop = true; 3738 3739 /* Return the link desc back to wbm idle list */ 3740 ath12k_dp_rx_link_desc_return(partner_ab, 3741 &reo_desc->buf_addr_info, 3742 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3743 } 3744 3745 for (i = 0; i < num_msdus; i++) { 3746 if (!ath12k_dp_process_rx_err_buf(ar, reo_desc, 3747 &rx_desc_used_list[device_id], 3748 drop, 3749 msdu_cookies[i])) { 3750 num_buffs_reaped[device_id]++; 3751 tot_n_bufs_reaped++; 3752 } 3753 } 3754 3755 if (tot_n_bufs_reaped >= quota) { 3756 tot_n_bufs_reaped = quota; 3757 goto exit; 3758 } 3759 3760 budget = quota - tot_n_bufs_reaped; 3761 } 3762 3763 exit: 3764 ath12k_hal_srng_access_end(ab, srng); 3765 3766 spin_unlock_bh(&srng->lock); 3767 3768 for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) { 3769 if (!num_buffs_reaped[device_id]) 3770 continue; 3771 3772 partner_ab = ath12k_ag_to_ab(ag, device_id); 3773 rx_ring = &partner_ab->dp.rx_refill_buf_ring; 3774 3775 ath12k_dp_rx_bufs_replenish(partner_ab, rx_ring, 3776 &rx_desc_used_list[device_id], 3777 num_buffs_reaped[device_id]); 3778 } 3779 3780 return tot_n_bufs_reaped; 3781 } 3782 3783 static void ath12k_dp_rx_null_q_desc_sg_drop(struct ath12k *ar, 3784 int msdu_len, 3785 struct sk_buff_head *msdu_list) 3786 { 3787 struct sk_buff *skb, *tmp; 3788 struct ath12k_skb_rxcb *rxcb; 3789 int n_buffs; 3790 3791 n_buffs = DIV_ROUND_UP(msdu_len, 3792 (DP_RX_BUFFER_SIZE - ar->ab->hal.hal_desc_sz)); 3793 3794 skb_queue_walk_safe(msdu_list, skb, tmp) { 3795 rxcb = ATH12K_SKB_RXCB(skb); 3796 if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO && 3797 rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) { 3798 if (!n_buffs) 3799 break; 3800 __skb_unlink(skb, msdu_list); 3801 dev_kfree_skb_any(skb); 3802 n_buffs--; 3803 } 3804 } 3805 } 3806 3807 static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu, 3808 struct ath12k_dp_rx_info *rx_info, 3809 struct sk_buff_head *msdu_list) 3810 { 3811 struct ath12k_base *ab = ar->ab; 3812 u16 msdu_len; 3813 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 3814 u8 l3pad_bytes; 3815 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 3816 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 3817 3818 msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc); 3819 3820 if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) { 3821 /* First buffer will be freed by the caller, so deduct it's length */ 3822 msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz); 3823 ath12k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list); 3824 return -EINVAL; 3825 } 3826 3827 /* Even after cleaning up the sg buffers in the msdu list with above check 3828 * any msdu received with continuation flag needs to be dropped as invalid. 3829 * This protects against some random err frame with continuation flag. 3830 */ 3831 if (rxcb->is_continuation) 3832 return -EINVAL; 3833 3834 if (!ath12k_dp_rx_h_msdu_done(ab, desc)) { 3835 ath12k_warn(ar->ab, 3836 "msdu_done bit not set in null_q_des processing\n"); 3837 __skb_queue_purge(msdu_list); 3838 return -EIO; 3839 } 3840 3841 /* Handle NULL queue descriptor violations arising out a missing 3842 * REO queue for a given peer or a given TID. This typically 3843 * may happen if a packet is received on a QOS enabled TID before the 3844 * ADDBA negotiation for that TID, when the TID queue is setup. Or 3845 * it may also happen for MC/BC frames if they are not routed to the 3846 * non-QOS TID queue, in the absence of any other default TID queue. 3847 * This error can show up both in a REO destination or WBM release ring. 3848 */ 3849 3850 if (rxcb->is_frag) { 3851 skb_pull(msdu, hal_rx_desc_sz); 3852 } else { 3853 l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc); 3854 3855 if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) 3856 return -EINVAL; 3857 3858 skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); 3859 skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); 3860 } 3861 if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu))) 3862 return -EINVAL; 3863 3864 ath12k_dp_rx_h_fetch_info(ab, desc, rx_info); 3865 ath12k_dp_rx_h_ppdu(ar, rx_info); 3866 ath12k_dp_rx_h_mpdu(ar, msdu, desc, rx_info); 3867 3868 rxcb->tid = rx_info->tid; 3869 3870 /* Please note that caller will having the access to msdu and completing 3871 * rx with mac80211. Need not worry about cleaning up amsdu_list. 3872 */ 3873 3874 return 0; 3875 } 3876 3877 static bool ath12k_dp_rx_h_reo_err(struct ath12k *ar, struct sk_buff *msdu, 3878 struct ath12k_dp_rx_info *rx_info, 3879 struct sk_buff_head *msdu_list) 3880 { 3881 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 3882 bool drop = false; 3883 3884 ar->ab->device_stats.reo_error[rxcb->err_code]++; 3885 3886 switch (rxcb->err_code) { 3887 case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO: 3888 if (ath12k_dp_rx_h_null_q_desc(ar, msdu, rx_info, msdu_list)) 3889 drop = true; 3890 break; 3891 case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED: 3892 /* TODO: Do not drop PN failed packets in the driver; 3893 * instead, it is good to drop such packets in mac80211 3894 * after incrementing the replay counters. 3895 */ 3896 fallthrough; 3897 default: 3898 /* TODO: Review other errors and process them to mac80211 3899 * as appropriate. 3900 */ 3901 drop = true; 3902 break; 3903 } 3904 3905 return drop; 3906 } 3907 3908 static bool ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu, 3909 struct ath12k_dp_rx_info *rx_info) 3910 { 3911 struct ath12k_base *ab = ar->ab; 3912 u16 msdu_len; 3913 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 3914 u8 l3pad_bytes; 3915 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 3916 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 3917 3918 rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, desc); 3919 rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, desc); 3920 3921 l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc); 3922 msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc); 3923 3924 if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) { 3925 ath12k_dbg(ab, ATH12K_DBG_DATA, 3926 "invalid msdu len in tkip mic err %u\n", msdu_len); 3927 ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", desc, 3928 sizeof(*desc)); 3929 return true; 3930 } 3931 3932 skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); 3933 skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); 3934 3935 if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu))) 3936 return true; 3937 3938 ath12k_dp_rx_h_ppdu(ar, rx_info); 3939 3940 rx_info->rx_status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR | 3941 RX_FLAG_DECRYPTED); 3942 3943 ath12k_dp_rx_h_undecap(ar, msdu, desc, 3944 HAL_ENCRYPT_TYPE_TKIP_MIC, rx_info->rx_status, false); 3945 return false; 3946 } 3947 3948 static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar, struct sk_buff *msdu, 3949 struct ath12k_dp_rx_info *rx_info) 3950 { 3951 struct ath12k_base *ab = ar->ab; 3952 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 3953 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data; 3954 bool drop = false; 3955 u32 err_bitmap; 3956 3957 ar->ab->device_stats.rxdma_error[rxcb->err_code]++; 3958 3959 switch (rxcb->err_code) { 3960 case HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR: 3961 case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR: 3962 err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc); 3963 if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) { 3964 ath12k_dp_rx_h_fetch_info(ab, rx_desc, rx_info); 3965 drop = ath12k_dp_rx_h_tkip_mic_err(ar, msdu, rx_info); 3966 break; 3967 } 3968 fallthrough; 3969 default: 3970 /* TODO: Review other rxdma error code to check if anything is 3971 * worth reporting to mac80211 3972 */ 3973 drop = true; 3974 break; 3975 } 3976 3977 return drop; 3978 } 3979 3980 static void ath12k_dp_rx_wbm_err(struct ath12k *ar, 3981 struct napi_struct *napi, 3982 struct sk_buff *msdu, 3983 struct sk_buff_head *msdu_list) 3984 { 3985 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 3986 struct ieee80211_rx_status rxs = {0}; 3987 struct ath12k_dp_rx_info rx_info; 3988 bool drop = true; 3989 3990 rx_info.addr2_present = false; 3991 rx_info.rx_status = &rxs; 3992 3993 switch (rxcb->err_rel_src) { 3994 case HAL_WBM_REL_SRC_MODULE_REO: 3995 drop = ath12k_dp_rx_h_reo_err(ar, msdu, &rx_info, msdu_list); 3996 break; 3997 case HAL_WBM_REL_SRC_MODULE_RXDMA: 3998 drop = ath12k_dp_rx_h_rxdma_err(ar, msdu, &rx_info); 3999 break; 4000 default: 4001 /* msdu will get freed */ 4002 break; 4003 } 4004 4005 if (drop) { 4006 dev_kfree_skb_any(msdu); 4007 return; 4008 } 4009 4010 ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_info); 4011 } 4012 4013 int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab, 4014 struct napi_struct *napi, int budget) 4015 { 4016 struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES]; 4017 struct ath12k_hw_group *ag = ab->ag; 4018 struct ath12k *ar; 4019 struct ath12k_dp *dp = &ab->dp; 4020 struct dp_rxdma_ring *rx_ring; 4021 struct hal_rx_wbm_rel_info err_info; 4022 struct hal_srng *srng; 4023 struct sk_buff *msdu; 4024 struct sk_buff_head msdu_list, scatter_msdu_list; 4025 struct ath12k_skb_rxcb *rxcb; 4026 void *rx_desc; 4027 int num_buffs_reaped[ATH12K_MAX_DEVICES] = {}; 4028 int total_num_buffs_reaped = 0; 4029 struct ath12k_rx_desc_info *desc_info; 4030 struct ath12k_device_dp_stats *device_stats = &ab->device_stats; 4031 struct ath12k_hw_link *hw_links = ag->hw_links; 4032 struct ath12k_base *partner_ab; 4033 u8 hw_link_id, device_id; 4034 int ret, pdev_id; 4035 struct hal_rx_desc *msdu_data; 4036 4037 __skb_queue_head_init(&msdu_list); 4038 __skb_queue_head_init(&scatter_msdu_list); 4039 4040 for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) 4041 INIT_LIST_HEAD(&rx_desc_used_list[device_id]); 4042 4043 srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id]; 4044 spin_lock_bh(&srng->lock); 4045 4046 ath12k_hal_srng_access_begin(ab, srng); 4047 4048 while (budget) { 4049 rx_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng); 4050 if (!rx_desc) 4051 break; 4052 4053 ret = ath12k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info); 4054 if (ret) { 4055 ath12k_warn(ab, 4056 "failed to parse rx error in wbm_rel ring desc %d\n", 4057 ret); 4058 continue; 4059 } 4060 4061 desc_info = err_info.rx_desc; 4062 4063 /* retry manual desc retrieval if hw cc is not done */ 4064 if (!desc_info) { 4065 desc_info = ath12k_dp_get_rx_desc(ab, err_info.cookie); 4066 if (!desc_info) { 4067 ath12k_warn(ab, "Invalid cookie in DP WBM rx error descriptor retrieval: 0x%x\n", 4068 err_info.cookie); 4069 continue; 4070 } 4071 } 4072 4073 if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC) 4074 ath12k_warn(ab, "WBM RX err, Check HW CC implementation"); 4075 4076 msdu = desc_info->skb; 4077 desc_info->skb = NULL; 4078 4079 device_id = desc_info->device_id; 4080 partner_ab = ath12k_ag_to_ab(ag, device_id); 4081 if (unlikely(!partner_ab)) { 4082 dev_kfree_skb_any(msdu); 4083 4084 /* In any case continuation bit is set 4085 * in the previous record, cleanup scatter_msdu_list 4086 */ 4087 ath12k_dp_clean_up_skb_list(&scatter_msdu_list); 4088 continue; 4089 } 4090 4091 list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]); 4092 4093 rxcb = ATH12K_SKB_RXCB(msdu); 4094 dma_unmap_single(partner_ab->dev, rxcb->paddr, 4095 msdu->len + skb_tailroom(msdu), 4096 DMA_FROM_DEVICE); 4097 4098 num_buffs_reaped[device_id]++; 4099 total_num_buffs_reaped++; 4100 4101 if (!err_info.continuation) 4102 budget--; 4103 4104 if (err_info.push_reason != 4105 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { 4106 dev_kfree_skb_any(msdu); 4107 continue; 4108 } 4109 4110 msdu_data = (struct hal_rx_desc *)msdu->data; 4111 rxcb->err_rel_src = err_info.err_rel_src; 4112 rxcb->err_code = err_info.err_code; 4113 rxcb->is_first_msdu = err_info.first_msdu; 4114 rxcb->is_last_msdu = err_info.last_msdu; 4115 rxcb->is_continuation = err_info.continuation; 4116 rxcb->rx_desc = msdu_data; 4117 4118 if (err_info.continuation) { 4119 __skb_queue_tail(&scatter_msdu_list, msdu); 4120 continue; 4121 } 4122 4123 hw_link_id = ath12k_dp_rx_get_msdu_src_link(partner_ab, 4124 msdu_data); 4125 if (hw_link_id >= ATH12K_GROUP_MAX_RADIO) { 4126 dev_kfree_skb_any(msdu); 4127 4128 /* In any case continuation bit is set 4129 * in the previous record, cleanup scatter_msdu_list 4130 */ 4131 ath12k_dp_clean_up_skb_list(&scatter_msdu_list); 4132 continue; 4133 } 4134 4135 if (!skb_queue_empty(&scatter_msdu_list)) { 4136 struct sk_buff *msdu; 4137 4138 skb_queue_walk(&scatter_msdu_list, msdu) { 4139 rxcb = ATH12K_SKB_RXCB(msdu); 4140 rxcb->hw_link_id = hw_link_id; 4141 } 4142 4143 skb_queue_splice_tail_init(&scatter_msdu_list, 4144 &msdu_list); 4145 } 4146 4147 rxcb = ATH12K_SKB_RXCB(msdu); 4148 rxcb->hw_link_id = hw_link_id; 4149 __skb_queue_tail(&msdu_list, msdu); 4150 } 4151 4152 /* In any case continuation bit is set in the 4153 * last record, cleanup scatter_msdu_list 4154 */ 4155 ath12k_dp_clean_up_skb_list(&scatter_msdu_list); 4156 4157 ath12k_hal_srng_access_end(ab, srng); 4158 4159 spin_unlock_bh(&srng->lock); 4160 4161 if (!total_num_buffs_reaped) 4162 goto done; 4163 4164 for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) { 4165 if (!num_buffs_reaped[device_id]) 4166 continue; 4167 4168 partner_ab = ath12k_ag_to_ab(ag, device_id); 4169 rx_ring = &partner_ab->dp.rx_refill_buf_ring; 4170 4171 ath12k_dp_rx_bufs_replenish(ab, rx_ring, 4172 &rx_desc_used_list[device_id], 4173 num_buffs_reaped[device_id]); 4174 } 4175 4176 rcu_read_lock(); 4177 while ((msdu = __skb_dequeue(&msdu_list))) { 4178 rxcb = ATH12K_SKB_RXCB(msdu); 4179 hw_link_id = rxcb->hw_link_id; 4180 4181 device_id = hw_links[hw_link_id].device_id; 4182 partner_ab = ath12k_ag_to_ab(ag, device_id); 4183 if (unlikely(!partner_ab)) { 4184 ath12k_dbg(ab, ATH12K_DBG_DATA, 4185 "Unable to process WBM error msdu due to invalid hw link id %d device id %d\n", 4186 hw_link_id, device_id); 4187 dev_kfree_skb_any(msdu); 4188 continue; 4189 } 4190 4191 pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params, 4192 hw_links[hw_link_id].pdev_idx); 4193 ar = partner_ab->pdevs[pdev_id].ar; 4194 4195 if (!ar || !rcu_dereference(ar->ab->pdevs_active[pdev_id])) { 4196 dev_kfree_skb_any(msdu); 4197 continue; 4198 } 4199 4200 if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) { 4201 dev_kfree_skb_any(msdu); 4202 continue; 4203 } 4204 4205 if (rxcb->err_rel_src < HAL_WBM_REL_SRC_MODULE_MAX) { 4206 device_id = ar->ab->device_id; 4207 device_stats->rx_wbm_rel_source[rxcb->err_rel_src][device_id]++; 4208 } 4209 4210 ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list); 4211 } 4212 rcu_read_unlock(); 4213 done: 4214 return total_num_buffs_reaped; 4215 } 4216 4217 void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab) 4218 { 4219 struct ath12k_dp *dp = &ab->dp; 4220 struct hal_tlv_64_hdr *hdr; 4221 struct hal_srng *srng; 4222 struct ath12k_dp_rx_reo_cmd *cmd, *tmp; 4223 bool found = false; 4224 u16 tag; 4225 struct hal_reo_status reo_status; 4226 4227 srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id]; 4228 4229 memset(&reo_status, 0, sizeof(reo_status)); 4230 4231 spin_lock_bh(&srng->lock); 4232 4233 ath12k_hal_srng_access_begin(ab, srng); 4234 4235 while ((hdr = ath12k_hal_srng_dst_get_next_entry(ab, srng))) { 4236 tag = le64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG); 4237 4238 switch (tag) { 4239 case HAL_REO_GET_QUEUE_STATS_STATUS: 4240 ath12k_hal_reo_status_queue_stats(ab, hdr, 4241 &reo_status); 4242 break; 4243 case HAL_REO_FLUSH_QUEUE_STATUS: 4244 ath12k_hal_reo_flush_queue_status(ab, hdr, 4245 &reo_status); 4246 break; 4247 case HAL_REO_FLUSH_CACHE_STATUS: 4248 ath12k_hal_reo_flush_cache_status(ab, hdr, 4249 &reo_status); 4250 break; 4251 case HAL_REO_UNBLOCK_CACHE_STATUS: 4252 ath12k_hal_reo_unblk_cache_status(ab, hdr, 4253 &reo_status); 4254 break; 4255 case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS: 4256 ath12k_hal_reo_flush_timeout_list_status(ab, hdr, 4257 &reo_status); 4258 break; 4259 case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS: 4260 ath12k_hal_reo_desc_thresh_reached_status(ab, hdr, 4261 &reo_status); 4262 break; 4263 case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS: 4264 ath12k_hal_reo_update_rx_reo_queue_status(ab, hdr, 4265 &reo_status); 4266 break; 4267 default: 4268 ath12k_warn(ab, "Unknown reo status type %d\n", tag); 4269 continue; 4270 } 4271 4272 spin_lock_bh(&dp->reo_cmd_lock); 4273 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 4274 if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) { 4275 found = true; 4276 list_del(&cmd->list); 4277 break; 4278 } 4279 } 4280 spin_unlock_bh(&dp->reo_cmd_lock); 4281 4282 if (found) { 4283 cmd->handler(dp, (void *)&cmd->data, 4284 reo_status.uniform_hdr.cmd_status); 4285 kfree(cmd); 4286 } 4287 4288 found = false; 4289 } 4290 4291 ath12k_hal_srng_access_end(ab, srng); 4292 4293 spin_unlock_bh(&srng->lock); 4294 } 4295 4296 void ath12k_dp_rx_free(struct ath12k_base *ab) 4297 { 4298 struct ath12k_dp *dp = &ab->dp; 4299 struct dp_srng *srng; 4300 int i; 4301 4302 ath12k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring); 4303 4304 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4305 if (ab->hw_params->rx_mac_buf_ring) 4306 ath12k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]); 4307 if (!ab->hw_params->rxdma1_enable) { 4308 srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring; 4309 ath12k_dp_srng_cleanup(ab, srng); 4310 } 4311 } 4312 4313 for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) 4314 ath12k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]); 4315 4316 ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring); 4317 4318 ath12k_dp_rxdma_buf_free(ab); 4319 } 4320 4321 void ath12k_dp_rx_pdev_free(struct ath12k_base *ab, int mac_id) 4322 { 4323 struct ath12k *ar = ab->pdevs[mac_id].ar; 4324 4325 ath12k_dp_rx_pdev_srng_free(ar); 4326 } 4327 4328 int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab) 4329 { 4330 struct ath12k_dp *dp = &ab->dp; 4331 struct htt_rx_ring_tlv_filter tlv_filter = {0}; 4332 u32 ring_id; 4333 int ret; 4334 u32 hal_rx_desc_sz = ab->hal.hal_desc_sz; 4335 4336 ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; 4337 4338 tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING; 4339 tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR; 4340 tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST | 4341 HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST | 4342 HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA; 4343 tlv_filter.offset_valid = true; 4344 tlv_filter.rx_packet_offset = hal_rx_desc_sz; 4345 4346 tlv_filter.rx_mpdu_start_offset = 4347 ab->hal_rx_ops->rx_desc_get_mpdu_start_offset(); 4348 tlv_filter.rx_msdu_end_offset = 4349 ab->hal_rx_ops->rx_desc_get_msdu_end_offset(); 4350 4351 if (ath12k_dp_wmask_compaction_rx_tlv_supported(ab)) { 4352 tlv_filter.rx_mpdu_start_wmask = 4353 ab->hw_params->hal_ops->rxdma_ring_wmask_rx_mpdu_start(); 4354 tlv_filter.rx_msdu_end_wmask = 4355 ab->hw_params->hal_ops->rxdma_ring_wmask_rx_msdu_end(); 4356 ath12k_dbg(ab, ATH12K_DBG_DATA, 4357 "Configuring compact tlv masks rx_mpdu_start_wmask 0x%x rx_msdu_end_wmask 0x%x\n", 4358 tlv_filter.rx_mpdu_start_wmask, tlv_filter.rx_msdu_end_wmask); 4359 } 4360 4361 ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, 0, 4362 HAL_RXDMA_BUF, 4363 DP_RXDMA_REFILL_RING_SIZE, 4364 &tlv_filter); 4365 4366 return ret; 4367 } 4368 4369 int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab) 4370 { 4371 struct ath12k_dp *dp = &ab->dp; 4372 struct htt_rx_ring_tlv_filter tlv_filter = {0}; 4373 u32 ring_id; 4374 int ret = 0; 4375 u32 hal_rx_desc_sz = ab->hal.hal_desc_sz; 4376 int i; 4377 4378 ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; 4379 4380 tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING; 4381 tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR; 4382 tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST | 4383 HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST | 4384 HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA; 4385 tlv_filter.offset_valid = true; 4386 tlv_filter.rx_packet_offset = hal_rx_desc_sz; 4387 4388 tlv_filter.rx_header_offset = offsetof(struct hal_rx_desc_wcn7850, pkt_hdr_tlv); 4389 4390 tlv_filter.rx_mpdu_start_offset = 4391 ab->hal_rx_ops->rx_desc_get_mpdu_start_offset(); 4392 tlv_filter.rx_msdu_end_offset = 4393 ab->hal_rx_ops->rx_desc_get_msdu_end_offset(); 4394 4395 /* TODO: Selectively subscribe to required qwords within msdu_end 4396 * and mpdu_start and setup the mask in below msg 4397 * and modify the rx_desc struct 4398 */ 4399 4400 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4401 ring_id = dp->rx_mac_buf_ring[i].ring_id; 4402 ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, i, 4403 HAL_RXDMA_BUF, 4404 DP_RXDMA_REFILL_RING_SIZE, 4405 &tlv_filter); 4406 } 4407 4408 return ret; 4409 } 4410 4411 int ath12k_dp_rx_htt_setup(struct ath12k_base *ab) 4412 { 4413 struct ath12k_dp *dp = &ab->dp; 4414 u32 ring_id; 4415 int i, ret; 4416 4417 /* TODO: Need to verify the HTT setup for QCN9224 */ 4418 ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; 4419 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 0, HAL_RXDMA_BUF); 4420 if (ret) { 4421 ath12k_warn(ab, "failed to configure rx_refill_buf_ring %d\n", 4422 ret); 4423 return ret; 4424 } 4425 4426 if (ab->hw_params->rx_mac_buf_ring) { 4427 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4428 ring_id = dp->rx_mac_buf_ring[i].ring_id; 4429 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 4430 i, HAL_RXDMA_BUF); 4431 if (ret) { 4432 ath12k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n", 4433 i, ret); 4434 return ret; 4435 } 4436 } 4437 } 4438 4439 for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) { 4440 ring_id = dp->rxdma_err_dst_ring[i].ring_id; 4441 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 4442 i, HAL_RXDMA_DST); 4443 if (ret) { 4444 ath12k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n", 4445 i, ret); 4446 return ret; 4447 } 4448 } 4449 4450 if (ab->hw_params->rxdma1_enable) { 4451 ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id; 4452 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 4453 0, HAL_RXDMA_MONITOR_BUF); 4454 if (ret) { 4455 ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n", 4456 ret); 4457 return ret; 4458 } 4459 } else { 4460 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4461 ring_id = 4462 dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; 4463 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, i, 4464 HAL_RXDMA_MONITOR_STATUS); 4465 if (ret) { 4466 ath12k_warn(ab, 4467 "failed to configure mon_status_refill_ring%d %d\n", 4468 i, ret); 4469 return ret; 4470 } 4471 } 4472 } 4473 4474 ret = ab->hw_params->hw_ops->rxdma_ring_sel_config(ab); 4475 if (ret) { 4476 ath12k_warn(ab, "failed to setup rxdma ring selection config\n"); 4477 return ret; 4478 } 4479 4480 return 0; 4481 } 4482 4483 int ath12k_dp_rx_alloc(struct ath12k_base *ab) 4484 { 4485 struct ath12k_dp *dp = &ab->dp; 4486 struct dp_srng *srng; 4487 int i, ret; 4488 4489 idr_init(&dp->rxdma_mon_buf_ring.bufs_idr); 4490 spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock); 4491 4492 ret = ath12k_dp_srng_setup(ab, 4493 &dp->rx_refill_buf_ring.refill_buf_ring, 4494 HAL_RXDMA_BUF, 0, 0, 4495 DP_RXDMA_BUF_RING_SIZE); 4496 if (ret) { 4497 ath12k_warn(ab, "failed to setup rx_refill_buf_ring\n"); 4498 return ret; 4499 } 4500 4501 if (ab->hw_params->rx_mac_buf_ring) { 4502 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4503 ret = ath12k_dp_srng_setup(ab, 4504 &dp->rx_mac_buf_ring[i], 4505 HAL_RXDMA_BUF, 1, 4506 i, DP_RX_MAC_BUF_RING_SIZE); 4507 if (ret) { 4508 ath12k_warn(ab, "failed to setup rx_mac_buf_ring %d\n", 4509 i); 4510 return ret; 4511 } 4512 } 4513 } 4514 4515 for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) { 4516 ret = ath12k_dp_srng_setup(ab, &dp->rxdma_err_dst_ring[i], 4517 HAL_RXDMA_DST, 0, i, 4518 DP_RXDMA_ERR_DST_RING_SIZE); 4519 if (ret) { 4520 ath12k_warn(ab, "failed to setup rxdma_err_dst_ring %d\n", i); 4521 return ret; 4522 } 4523 } 4524 4525 if (ab->hw_params->rxdma1_enable) { 4526 ret = ath12k_dp_srng_setup(ab, 4527 &dp->rxdma_mon_buf_ring.refill_buf_ring, 4528 HAL_RXDMA_MONITOR_BUF, 0, 0, 4529 DP_RXDMA_MONITOR_BUF_RING_SIZE); 4530 if (ret) { 4531 ath12k_warn(ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n"); 4532 return ret; 4533 } 4534 } else { 4535 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4536 idr_init(&dp->rx_mon_status_refill_ring[i].bufs_idr); 4537 spin_lock_init(&dp->rx_mon_status_refill_ring[i].idr_lock); 4538 } 4539 4540 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4541 srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring; 4542 ret = ath12k_dp_srng_setup(ab, srng, 4543 HAL_RXDMA_MONITOR_STATUS, 0, i, 4544 DP_RXDMA_MON_STATUS_RING_SIZE); 4545 if (ret) { 4546 ath12k_warn(ab, "failed to setup mon status ring %d\n", 4547 i); 4548 return ret; 4549 } 4550 } 4551 } 4552 4553 ret = ath12k_dp_rxdma_buf_setup(ab); 4554 if (ret) { 4555 ath12k_warn(ab, "failed to setup rxdma ring\n"); 4556 return ret; 4557 } 4558 4559 return 0; 4560 } 4561 4562 int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int mac_id) 4563 { 4564 struct ath12k *ar = ab->pdevs[mac_id].ar; 4565 struct ath12k_pdev_dp *dp = &ar->dp; 4566 u32 ring_id; 4567 int i; 4568 int ret; 4569 4570 if (!ab->hw_params->rxdma1_enable) 4571 goto out; 4572 4573 ret = ath12k_dp_rx_pdev_srng_alloc(ar); 4574 if (ret) { 4575 ath12k_warn(ab, "failed to setup rx srngs\n"); 4576 return ret; 4577 } 4578 4579 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4580 ring_id = dp->rxdma_mon_dst_ring[i].ring_id; 4581 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 4582 mac_id + i, 4583 HAL_RXDMA_MONITOR_DST); 4584 if (ret) { 4585 ath12k_warn(ab, 4586 "failed to configure rxdma_mon_dst_ring %d %d\n", 4587 i, ret); 4588 return ret; 4589 } 4590 } 4591 out: 4592 return 0; 4593 } 4594 4595 static int ath12k_dp_rx_pdev_mon_status_attach(struct ath12k *ar) 4596 { 4597 struct ath12k_pdev_dp *dp = &ar->dp; 4598 struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&dp->mon_data; 4599 4600 skb_queue_head_init(&pmon->rx_status_q); 4601 4602 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 4603 4604 memset(&pmon->rx_mon_stats, 0, 4605 sizeof(pmon->rx_mon_stats)); 4606 return 0; 4607 } 4608 4609 int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar) 4610 { 4611 struct ath12k_pdev_dp *dp = &ar->dp; 4612 struct ath12k_mon_data *pmon = &dp->mon_data; 4613 int ret = 0; 4614 4615 ret = ath12k_dp_rx_pdev_mon_status_attach(ar); 4616 if (ret) { 4617 ath12k_warn(ar->ab, "pdev_mon_status_attach() failed"); 4618 return ret; 4619 } 4620 4621 pmon->mon_last_linkdesc_paddr = 0; 4622 pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1; 4623 spin_lock_init(&pmon->mon_lock); 4624 4625 if (!ar->ab->hw_params->rxdma1_enable) 4626 return 0; 4627 4628 INIT_LIST_HEAD(&pmon->dp_rx_mon_mpdu_list); 4629 pmon->mon_mpdu = NULL; 4630 4631 return 0; 4632 } 4633