1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 */ 6 7 #include <linux/ieee80211.h> 8 #include <linux/kernel.h> 9 #include <linux/skbuff.h> 10 #include <crypto/hash.h> 11 #include "core.h" 12 #include "debug.h" 13 #include "hal_desc.h" 14 #include "hw.h" 15 #include "dp_rx.h" 16 #include "hal_rx.h" 17 #include "dp_tx.h" 18 #include "peer.h" 19 #include "dp_mon.h" 20 #include "debugfs_htt_stats.h" 21 22 #define ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ) 23 24 static enum hal_encrypt_type ath12k_dp_rx_h_enctype(struct ath12k_base *ab, 25 struct hal_rx_desc *desc) 26 { 27 if (!ab->hal_rx_ops->rx_desc_encrypt_valid(desc)) 28 return HAL_ENCRYPT_TYPE_OPEN; 29 30 return ab->hal_rx_ops->rx_desc_get_encrypt_type(desc); 31 } 32 33 u8 ath12k_dp_rx_h_decap_type(struct ath12k_base *ab, 34 struct hal_rx_desc *desc) 35 { 36 return ab->hal_rx_ops->rx_desc_get_decap_type(desc); 37 } 38 39 static u8 ath12k_dp_rx_h_mesh_ctl_present(struct ath12k_base *ab, 40 struct hal_rx_desc *desc) 41 { 42 return ab->hal_rx_ops->rx_desc_get_mesh_ctl(desc); 43 } 44 45 static bool ath12k_dp_rx_h_seq_ctrl_valid(struct ath12k_base *ab, 46 struct hal_rx_desc *desc) 47 { 48 return ab->hal_rx_ops->rx_desc_get_mpdu_seq_ctl_vld(desc); 49 } 50 51 static bool ath12k_dp_rx_h_fc_valid(struct ath12k_base *ab, 52 struct hal_rx_desc *desc) 53 { 54 return ab->hal_rx_ops->rx_desc_get_mpdu_fc_valid(desc); 55 } 56 57 static bool ath12k_dp_rx_h_more_frags(struct ath12k_base *ab, 58 struct sk_buff *skb) 59 { 60 struct ieee80211_hdr *hdr; 61 62 hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz); 63 return ieee80211_has_morefrags(hdr->frame_control); 64 } 65 66 static u16 ath12k_dp_rx_h_frag_no(struct ath12k_base *ab, 67 struct sk_buff *skb) 68 { 69 struct ieee80211_hdr *hdr; 70 71 hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz); 72 return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; 73 } 74 75 static u16 ath12k_dp_rx_h_seq_no(struct ath12k_base *ab, 76 struct hal_rx_desc *desc) 77 { 78 return ab->hal_rx_ops->rx_desc_get_mpdu_start_seq_no(desc); 79 } 80 81 static bool ath12k_dp_rx_h_msdu_done(struct ath12k_base *ab, 82 struct hal_rx_desc *desc) 83 { 84 return ab->hal_rx_ops->dp_rx_h_msdu_done(desc); 85 } 86 87 static bool ath12k_dp_rx_h_l4_cksum_fail(struct ath12k_base *ab, 88 struct hal_rx_desc *desc) 89 { 90 return ab->hal_rx_ops->dp_rx_h_l4_cksum_fail(desc); 91 } 92 93 static bool ath12k_dp_rx_h_ip_cksum_fail(struct ath12k_base *ab, 94 struct hal_rx_desc *desc) 95 { 96 return ab->hal_rx_ops->dp_rx_h_ip_cksum_fail(desc); 97 } 98 99 static bool ath12k_dp_rx_h_is_decrypted(struct ath12k_base *ab, 100 struct hal_rx_desc *desc) 101 { 102 return ab->hal_rx_ops->dp_rx_h_is_decrypted(desc); 103 } 104 105 u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab, 106 struct hal_rx_desc *desc) 107 { 108 return ab->hal_rx_ops->dp_rx_h_mpdu_err(desc); 109 } 110 111 static u16 ath12k_dp_rx_h_msdu_len(struct ath12k_base *ab, 112 struct hal_rx_desc *desc) 113 { 114 return ab->hal_rx_ops->rx_desc_get_msdu_len(desc); 115 } 116 117 static u8 ath12k_dp_rx_h_sgi(struct ath12k_base *ab, 118 struct hal_rx_desc *desc) 119 { 120 return ab->hal_rx_ops->rx_desc_get_msdu_sgi(desc); 121 } 122 123 static u8 ath12k_dp_rx_h_rate_mcs(struct ath12k_base *ab, 124 struct hal_rx_desc *desc) 125 { 126 return ab->hal_rx_ops->rx_desc_get_msdu_rate_mcs(desc); 127 } 128 129 static u8 ath12k_dp_rx_h_rx_bw(struct ath12k_base *ab, 130 struct hal_rx_desc *desc) 131 { 132 return ab->hal_rx_ops->rx_desc_get_msdu_rx_bw(desc); 133 } 134 135 static u32 ath12k_dp_rx_h_freq(struct ath12k_base *ab, 136 struct hal_rx_desc *desc) 137 { 138 return ab->hal_rx_ops->rx_desc_get_msdu_freq(desc); 139 } 140 141 static u8 ath12k_dp_rx_h_pkt_type(struct ath12k_base *ab, 142 struct hal_rx_desc *desc) 143 { 144 return ab->hal_rx_ops->rx_desc_get_msdu_pkt_type(desc); 145 } 146 147 static u8 ath12k_dp_rx_h_nss(struct ath12k_base *ab, 148 struct hal_rx_desc *desc) 149 { 150 return hweight8(ab->hal_rx_ops->rx_desc_get_msdu_nss(desc)); 151 } 152 153 static u8 ath12k_dp_rx_h_tid(struct ath12k_base *ab, 154 struct hal_rx_desc *desc) 155 { 156 return ab->hal_rx_ops->rx_desc_get_mpdu_tid(desc); 157 } 158 159 static u16 ath12k_dp_rx_h_peer_id(struct ath12k_base *ab, 160 struct hal_rx_desc *desc) 161 { 162 return ab->hal_rx_ops->rx_desc_get_mpdu_peer_id(desc); 163 } 164 165 u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab, 166 struct hal_rx_desc *desc) 167 { 168 return ab->hal_rx_ops->rx_desc_get_l3_pad_bytes(desc); 169 } 170 171 static bool ath12k_dp_rx_h_first_msdu(struct ath12k_base *ab, 172 struct hal_rx_desc *desc) 173 { 174 return ab->hal_rx_ops->rx_desc_get_first_msdu(desc); 175 } 176 177 static bool ath12k_dp_rx_h_last_msdu(struct ath12k_base *ab, 178 struct hal_rx_desc *desc) 179 { 180 return ab->hal_rx_ops->rx_desc_get_last_msdu(desc); 181 } 182 183 static void ath12k_dp_rx_desc_end_tlv_copy(struct ath12k_base *ab, 184 struct hal_rx_desc *fdesc, 185 struct hal_rx_desc *ldesc) 186 { 187 ab->hal_rx_ops->rx_desc_copy_end_tlv(fdesc, ldesc); 188 } 189 190 static void ath12k_dp_rxdesc_set_msdu_len(struct ath12k_base *ab, 191 struct hal_rx_desc *desc, 192 u16 len) 193 { 194 ab->hal_rx_ops->rx_desc_set_msdu_len(desc, len); 195 } 196 197 u32 ath12k_dp_rxdesc_get_ppduid(struct ath12k_base *ab, 198 struct hal_rx_desc *rx_desc) 199 { 200 return ab->hal_rx_ops->rx_desc_get_mpdu_ppdu_id(rx_desc); 201 } 202 203 bool ath12k_dp_rxdesc_mpdu_valid(struct ath12k_base *ab, 204 struct hal_rx_desc *rx_desc) 205 { 206 u32 tlv_tag; 207 208 tlv_tag = ab->hal_rx_ops->rx_desc_get_mpdu_start_tag(rx_desc); 209 210 return tlv_tag == HAL_RX_MPDU_START; 211 } 212 213 static bool ath12k_dp_rx_h_is_da_mcbc(struct ath12k_base *ab, 214 struct hal_rx_desc *desc) 215 { 216 return (ath12k_dp_rx_h_first_msdu(ab, desc) && 217 ab->hal_rx_ops->rx_desc_is_da_mcbc(desc)); 218 } 219 220 static bool ath12k_dp_rxdesc_mac_addr2_valid(struct ath12k_base *ab, 221 struct hal_rx_desc *desc) 222 { 223 return ab->hal_rx_ops->rx_desc_mac_addr2_valid(desc); 224 } 225 226 static u8 *ath12k_dp_rxdesc_get_mpdu_start_addr2(struct ath12k_base *ab, 227 struct hal_rx_desc *desc) 228 { 229 return ab->hal_rx_ops->rx_desc_mpdu_start_addr2(desc); 230 } 231 232 static void ath12k_dp_rx_desc_get_dot11_hdr(struct ath12k_base *ab, 233 struct hal_rx_desc *desc, 234 struct ieee80211_hdr *hdr) 235 { 236 ab->hal_rx_ops->rx_desc_get_dot11_hdr(desc, hdr); 237 } 238 239 static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab, 240 struct hal_rx_desc *desc, 241 u8 *crypto_hdr, 242 enum hal_encrypt_type enctype) 243 { 244 ab->hal_rx_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype); 245 } 246 247 static inline u8 ath12k_dp_rx_get_msdu_src_link(struct ath12k_base *ab, 248 struct hal_rx_desc *desc) 249 { 250 return ab->hal_rx_ops->rx_desc_get_msdu_src_link_id(desc); 251 } 252 253 static void ath12k_dp_clean_up_skb_list(struct sk_buff_head *skb_list) 254 { 255 struct sk_buff *skb; 256 257 while ((skb = __skb_dequeue(skb_list))) 258 dev_kfree_skb_any(skb); 259 } 260 261 static size_t ath12k_dp_list_cut_nodes(struct list_head *list, 262 struct list_head *head, 263 size_t count) 264 { 265 struct list_head *cur; 266 struct ath12k_rx_desc_info *rx_desc; 267 size_t nodes = 0; 268 269 if (!count) { 270 INIT_LIST_HEAD(list); 271 goto out; 272 } 273 274 list_for_each(cur, head) { 275 if (!count) 276 break; 277 278 rx_desc = list_entry(cur, struct ath12k_rx_desc_info, list); 279 rx_desc->in_use = true; 280 281 count--; 282 nodes++; 283 } 284 285 list_cut_before(list, head, cur); 286 out: 287 return nodes; 288 } 289 290 static void ath12k_dp_rx_enqueue_free(struct ath12k_dp *dp, 291 struct list_head *used_list) 292 { 293 struct ath12k_rx_desc_info *rx_desc, *safe; 294 295 /* Reset the use flag */ 296 list_for_each_entry_safe(rx_desc, safe, used_list, list) 297 rx_desc->in_use = false; 298 299 spin_lock_bh(&dp->rx_desc_lock); 300 list_splice_tail(used_list, &dp->rx_desc_free_list); 301 spin_unlock_bh(&dp->rx_desc_lock); 302 } 303 304 /* Returns number of Rx buffers replenished */ 305 int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab, 306 struct dp_rxdma_ring *rx_ring, 307 struct list_head *used_list, 308 int req_entries) 309 { 310 struct ath12k_buffer_addr *desc; 311 struct hal_srng *srng; 312 struct sk_buff *skb; 313 int num_free; 314 int num_remain; 315 u32 cookie; 316 dma_addr_t paddr; 317 struct ath12k_dp *dp = &ab->dp; 318 struct ath12k_rx_desc_info *rx_desc; 319 enum hal_rx_buf_return_buf_manager mgr = ab->hw_params->hal_params->rx_buf_rbm; 320 321 req_entries = min(req_entries, rx_ring->bufs_max); 322 323 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 324 325 spin_lock_bh(&srng->lock); 326 327 ath12k_hal_srng_access_begin(ab, srng); 328 329 num_free = ath12k_hal_srng_src_num_free(ab, srng, true); 330 if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4)) 331 req_entries = num_free; 332 333 req_entries = min(num_free, req_entries); 334 num_remain = req_entries; 335 336 if (!num_remain) 337 goto out; 338 339 /* Get the descriptor from free list */ 340 if (list_empty(used_list)) { 341 spin_lock_bh(&dp->rx_desc_lock); 342 req_entries = ath12k_dp_list_cut_nodes(used_list, 343 &dp->rx_desc_free_list, 344 num_remain); 345 spin_unlock_bh(&dp->rx_desc_lock); 346 num_remain = req_entries; 347 } 348 349 while (num_remain > 0) { 350 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + 351 DP_RX_BUFFER_ALIGN_SIZE); 352 if (!skb) 353 break; 354 355 if (!IS_ALIGNED((unsigned long)skb->data, 356 DP_RX_BUFFER_ALIGN_SIZE)) { 357 skb_pull(skb, 358 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - 359 skb->data); 360 } 361 362 paddr = dma_map_single(ab->dev, skb->data, 363 skb->len + skb_tailroom(skb), 364 DMA_FROM_DEVICE); 365 if (dma_mapping_error(ab->dev, paddr)) 366 goto fail_free_skb; 367 368 rx_desc = list_first_entry_or_null(used_list, 369 struct ath12k_rx_desc_info, 370 list); 371 if (!rx_desc) 372 goto fail_dma_unmap; 373 374 rx_desc->skb = skb; 375 cookie = rx_desc->cookie; 376 377 desc = ath12k_hal_srng_src_get_next_entry(ab, srng); 378 if (!desc) 379 goto fail_dma_unmap; 380 381 list_del(&rx_desc->list); 382 ATH12K_SKB_RXCB(skb)->paddr = paddr; 383 384 num_remain--; 385 386 ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); 387 } 388 389 goto out; 390 391 fail_dma_unmap: 392 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 393 DMA_FROM_DEVICE); 394 fail_free_skb: 395 dev_kfree_skb_any(skb); 396 out: 397 ath12k_hal_srng_access_end(ab, srng); 398 399 if (!list_empty(used_list)) 400 ath12k_dp_rx_enqueue_free(dp, used_list); 401 402 spin_unlock_bh(&srng->lock); 403 404 return req_entries - num_remain; 405 } 406 407 static int ath12k_dp_rxdma_mon_buf_ring_free(struct ath12k_base *ab, 408 struct dp_rxdma_mon_ring *rx_ring) 409 { 410 struct sk_buff *skb; 411 int buf_id; 412 413 spin_lock_bh(&rx_ring->idr_lock); 414 idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { 415 idr_remove(&rx_ring->bufs_idr, buf_id); 416 /* TODO: Understand where internal driver does this dma_unmap 417 * of rxdma_buffer. 418 */ 419 dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr, 420 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); 421 dev_kfree_skb_any(skb); 422 } 423 424 idr_destroy(&rx_ring->bufs_idr); 425 spin_unlock_bh(&rx_ring->idr_lock); 426 427 return 0; 428 } 429 430 static int ath12k_dp_rxdma_buf_free(struct ath12k_base *ab) 431 { 432 struct ath12k_dp *dp = &ab->dp; 433 int i; 434 435 ath12k_dp_rxdma_mon_buf_ring_free(ab, &dp->rxdma_mon_buf_ring); 436 437 if (ab->hw_params->rxdma1_enable) 438 return 0; 439 440 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) 441 ath12k_dp_rxdma_mon_buf_ring_free(ab, 442 &dp->rx_mon_status_refill_ring[i]); 443 444 return 0; 445 } 446 447 static int ath12k_dp_rxdma_mon_ring_buf_setup(struct ath12k_base *ab, 448 struct dp_rxdma_mon_ring *rx_ring, 449 u32 ringtype) 450 { 451 int num_entries; 452 453 num_entries = rx_ring->refill_buf_ring.size / 454 ath12k_hal_srng_get_entrysize(ab, ringtype); 455 456 rx_ring->bufs_max = num_entries; 457 458 if (ringtype == HAL_RXDMA_MONITOR_STATUS) 459 ath12k_dp_mon_status_bufs_replenish(ab, rx_ring, 460 num_entries); 461 else 462 ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries); 463 464 return 0; 465 } 466 467 static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab, 468 struct dp_rxdma_ring *rx_ring) 469 { 470 LIST_HEAD(list); 471 472 rx_ring->bufs_max = rx_ring->refill_buf_ring.size / 473 ath12k_hal_srng_get_entrysize(ab, HAL_RXDMA_BUF); 474 475 ath12k_dp_rx_bufs_replenish(ab, rx_ring, &list, 0); 476 477 return 0; 478 } 479 480 static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab) 481 { 482 struct ath12k_dp *dp = &ab->dp; 483 struct dp_rxdma_mon_ring *mon_ring; 484 int ret, i; 485 486 ret = ath12k_dp_rxdma_ring_buf_setup(ab, &dp->rx_refill_buf_ring); 487 if (ret) { 488 ath12k_warn(ab, 489 "failed to setup HAL_RXDMA_BUF\n"); 490 return ret; 491 } 492 493 if (ab->hw_params->rxdma1_enable) { 494 ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab, 495 &dp->rxdma_mon_buf_ring, 496 HAL_RXDMA_MONITOR_BUF); 497 if (ret) 498 ath12k_warn(ab, 499 "failed to setup HAL_RXDMA_MONITOR_BUF\n"); 500 return ret; 501 } 502 503 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 504 mon_ring = &dp->rx_mon_status_refill_ring[i]; 505 ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab, mon_ring, 506 HAL_RXDMA_MONITOR_STATUS); 507 if (ret) { 508 ath12k_warn(ab, 509 "failed to setup HAL_RXDMA_MONITOR_STATUS\n"); 510 return ret; 511 } 512 } 513 514 return 0; 515 } 516 517 static void ath12k_dp_rx_pdev_srng_free(struct ath12k *ar) 518 { 519 struct ath12k_pdev_dp *dp = &ar->dp; 520 struct ath12k_base *ab = ar->ab; 521 int i; 522 523 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) 524 ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_dst_ring[i]); 525 } 526 527 void ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base *ab) 528 { 529 struct ath12k_dp *dp = &ab->dp; 530 int i; 531 532 for (i = 0; i < DP_REO_DST_RING_MAX; i++) 533 ath12k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]); 534 } 535 536 int ath12k_dp_rx_pdev_reo_setup(struct ath12k_base *ab) 537 { 538 struct ath12k_dp *dp = &ab->dp; 539 int ret; 540 int i; 541 542 for (i = 0; i < DP_REO_DST_RING_MAX; i++) { 543 ret = ath12k_dp_srng_setup(ab, &dp->reo_dst_ring[i], 544 HAL_REO_DST, i, 0, 545 DP_REO_DST_RING_SIZE); 546 if (ret) { 547 ath12k_warn(ab, "failed to setup reo_dst_ring\n"); 548 goto err_reo_cleanup; 549 } 550 } 551 552 return 0; 553 554 err_reo_cleanup: 555 ath12k_dp_rx_pdev_reo_cleanup(ab); 556 557 return ret; 558 } 559 560 static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar) 561 { 562 struct ath12k_pdev_dp *dp = &ar->dp; 563 struct ath12k_base *ab = ar->ab; 564 int i; 565 int ret; 566 u32 mac_id = dp->mac_id; 567 568 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 569 ret = ath12k_dp_srng_setup(ar->ab, 570 &dp->rxdma_mon_dst_ring[i], 571 HAL_RXDMA_MONITOR_DST, 572 0, mac_id + i, 573 DP_RXDMA_MONITOR_DST_RING_SIZE(ab)); 574 if (ret) { 575 ath12k_warn(ar->ab, 576 "failed to setup HAL_RXDMA_MONITOR_DST\n"); 577 return ret; 578 } 579 } 580 581 return 0; 582 } 583 584 void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab) 585 { 586 struct ath12k_dp *dp = &ab->dp; 587 struct ath12k_dp_rx_reo_cmd *cmd, *tmp; 588 struct ath12k_dp_rx_reo_cache_flush_elem *cmd_cache, *tmp_cache; 589 590 spin_lock_bh(&dp->reo_cmd_lock); 591 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 592 list_del(&cmd->list); 593 dma_unmap_single(ab->dev, cmd->data.qbuf.paddr_aligned, 594 cmd->data.qbuf.size, DMA_BIDIRECTIONAL); 595 kfree(cmd->data.qbuf.vaddr); 596 kfree(cmd); 597 } 598 599 list_for_each_entry_safe(cmd_cache, tmp_cache, 600 &dp->reo_cmd_cache_flush_list, list) { 601 list_del(&cmd_cache->list); 602 dp->reo_cmd_cache_flush_count--; 603 dma_unmap_single(ab->dev, cmd_cache->data.qbuf.paddr_aligned, 604 cmd_cache->data.qbuf.size, DMA_BIDIRECTIONAL); 605 kfree(cmd_cache->data.qbuf.vaddr); 606 kfree(cmd_cache); 607 } 608 spin_unlock_bh(&dp->reo_cmd_lock); 609 } 610 611 static void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx, 612 enum hal_reo_cmd_status status) 613 { 614 struct ath12k_dp_rx_tid *rx_tid = ctx; 615 616 if (status != HAL_REO_CMD_SUCCESS) 617 ath12k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n", 618 rx_tid->tid, status); 619 620 dma_unmap_single(dp->ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size, 621 DMA_BIDIRECTIONAL); 622 kfree(rx_tid->qbuf.vaddr); 623 rx_tid->qbuf.vaddr = NULL; 624 } 625 626 static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab, struct ath12k_dp_rx_tid *rx_tid, 627 enum hal_reo_cmd_type type, 628 struct ath12k_hal_reo_cmd *cmd, 629 void (*cb)(struct ath12k_dp *dp, void *ctx, 630 enum hal_reo_cmd_status status)) 631 { 632 struct ath12k_dp *dp = &ab->dp; 633 struct ath12k_dp_rx_reo_cmd *dp_cmd; 634 struct hal_srng *cmd_ring; 635 int cmd_num; 636 637 cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id]; 638 cmd_num = ath12k_hal_reo_cmd_send(ab, cmd_ring, type, cmd); 639 640 /* cmd_num should start from 1, during failure return the error code */ 641 if (cmd_num < 0) 642 return cmd_num; 643 644 /* reo cmd ring descriptors has cmd_num starting from 1 */ 645 if (cmd_num == 0) 646 return -EINVAL; 647 648 if (!cb) 649 return 0; 650 651 /* Can this be optimized so that we keep the pending command list only 652 * for tid delete command to free up the resource on the command status 653 * indication? 654 */ 655 dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC); 656 657 if (!dp_cmd) 658 return -ENOMEM; 659 660 memcpy(&dp_cmd->data, rx_tid, sizeof(*rx_tid)); 661 dp_cmd->cmd_num = cmd_num; 662 dp_cmd->handler = cb; 663 664 spin_lock_bh(&dp->reo_cmd_lock); 665 list_add_tail(&dp_cmd->list, &dp->reo_cmd_list); 666 spin_unlock_bh(&dp->reo_cmd_lock); 667 668 return 0; 669 } 670 671 static void ath12k_dp_reo_cache_flush(struct ath12k_base *ab, 672 struct ath12k_dp_rx_tid *rx_tid) 673 { 674 struct ath12k_hal_reo_cmd cmd = {}; 675 unsigned long tot_desc_sz, desc_sz; 676 int ret; 677 678 tot_desc_sz = rx_tid->qbuf.size; 679 desc_sz = ath12k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID); 680 681 while (tot_desc_sz > desc_sz) { 682 tot_desc_sz -= desc_sz; 683 cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned + tot_desc_sz); 684 cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); 685 ret = ath12k_dp_reo_cmd_send(ab, rx_tid, 686 HAL_REO_CMD_FLUSH_CACHE, &cmd, 687 NULL); 688 if (ret) 689 ath12k_warn(ab, 690 "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n", 691 rx_tid->tid, ret); 692 } 693 694 memset(&cmd, 0, sizeof(cmd)); 695 cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned); 696 cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); 697 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 698 ret = ath12k_dp_reo_cmd_send(ab, rx_tid, 699 HAL_REO_CMD_FLUSH_CACHE, 700 &cmd, ath12k_dp_reo_cmd_free); 701 if (ret) { 702 ath12k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n", 703 rx_tid->tid, ret); 704 dma_unmap_single(ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size, 705 DMA_BIDIRECTIONAL); 706 kfree(rx_tid->qbuf.vaddr); 707 rx_tid->qbuf.vaddr = NULL; 708 } 709 } 710 711 static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx, 712 enum hal_reo_cmd_status status) 713 { 714 struct ath12k_base *ab = dp->ab; 715 struct ath12k_dp_rx_tid *rx_tid = ctx; 716 struct ath12k_dp_rx_reo_cache_flush_elem *elem, *tmp; 717 718 if (status == HAL_REO_CMD_DRAIN) { 719 goto free_desc; 720 } else if (status != HAL_REO_CMD_SUCCESS) { 721 /* Shouldn't happen! Cleanup in case of other failure? */ 722 ath12k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n", 723 rx_tid->tid, status); 724 return; 725 } 726 727 elem = kzalloc(sizeof(*elem), GFP_ATOMIC); 728 if (!elem) 729 goto free_desc; 730 731 elem->ts = jiffies; 732 memcpy(&elem->data, rx_tid, sizeof(*rx_tid)); 733 734 spin_lock_bh(&dp->reo_cmd_lock); 735 list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list); 736 dp->reo_cmd_cache_flush_count++; 737 738 /* Flush and invalidate aged REO desc from HW cache */ 739 list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list, 740 list) { 741 if (dp->reo_cmd_cache_flush_count > ATH12K_DP_RX_REO_DESC_FREE_THRES || 742 time_after(jiffies, elem->ts + 743 msecs_to_jiffies(ATH12K_DP_RX_REO_DESC_FREE_TIMEOUT_MS))) { 744 list_del(&elem->list); 745 dp->reo_cmd_cache_flush_count--; 746 747 /* Unlock the reo_cmd_lock before using ath12k_dp_reo_cmd_send() 748 * within ath12k_dp_reo_cache_flush. The reo_cmd_cache_flush_list 749 * is used in only two contexts, one is in this function called 750 * from napi and the other in ath12k_dp_free during core destroy. 751 * Before dp_free, the irqs would be disabled and would wait to 752 * synchronize. Hence there wouldn’t be any race against add or 753 * delete to this list. Hence unlock-lock is safe here. 754 */ 755 spin_unlock_bh(&dp->reo_cmd_lock); 756 757 ath12k_dp_reo_cache_flush(ab, &elem->data); 758 kfree(elem); 759 spin_lock_bh(&dp->reo_cmd_lock); 760 } 761 } 762 spin_unlock_bh(&dp->reo_cmd_lock); 763 764 return; 765 free_desc: 766 dma_unmap_single(ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size, 767 DMA_BIDIRECTIONAL); 768 kfree(rx_tid->qbuf.vaddr); 769 rx_tid->qbuf.vaddr = NULL; 770 } 771 772 static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u16 tid, 773 dma_addr_t paddr) 774 { 775 struct ath12k_reo_queue_ref *qref; 776 struct ath12k_dp *dp = &ab->dp; 777 bool ml_peer = false; 778 779 if (!ab->hw_params->reoq_lut_support) 780 return; 781 782 if (peer_id & ATH12K_PEER_ML_ID_VALID) { 783 peer_id &= ~ATH12K_PEER_ML_ID_VALID; 784 ml_peer = true; 785 } 786 787 if (ml_peer) 788 qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr + 789 (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); 790 else 791 qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr + 792 (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); 793 794 qref->info0 = u32_encode_bits(lower_32_bits(paddr), 795 BUFFER_ADDR_INFO0_ADDR); 796 qref->info1 = u32_encode_bits(upper_32_bits(paddr), 797 BUFFER_ADDR_INFO1_ADDR) | 798 u32_encode_bits(tid, DP_REO_QREF_NUM); 799 ath12k_hal_reo_shared_qaddr_cache_clear(ab); 800 } 801 802 static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u16 tid) 803 { 804 struct ath12k_reo_queue_ref *qref; 805 struct ath12k_dp *dp = &ab->dp; 806 bool ml_peer = false; 807 808 if (!ab->hw_params->reoq_lut_support) 809 return; 810 811 if (peer_id & ATH12K_PEER_ML_ID_VALID) { 812 peer_id &= ~ATH12K_PEER_ML_ID_VALID; 813 ml_peer = true; 814 } 815 816 if (ml_peer) 817 qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr + 818 (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); 819 else 820 qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr + 821 (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); 822 823 qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR); 824 qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) | 825 u32_encode_bits(tid, DP_REO_QREF_NUM); 826 } 827 828 void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar, 829 struct ath12k_peer *peer, u8 tid) 830 { 831 struct ath12k_hal_reo_cmd cmd = {}; 832 struct ath12k_dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 833 int ret; 834 835 if (!rx_tid->active) 836 return; 837 838 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 839 cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned); 840 cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); 841 cmd.upd0 = HAL_REO_CMD_UPD0_VLD; 842 ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid, 843 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 844 ath12k_dp_rx_tid_del_func); 845 if (ret) { 846 ath12k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n", 847 tid, ret); 848 dma_unmap_single(ar->ab->dev, rx_tid->qbuf.paddr_aligned, 849 rx_tid->qbuf.size, DMA_BIDIRECTIONAL); 850 kfree(rx_tid->qbuf.vaddr); 851 rx_tid->qbuf.vaddr = NULL; 852 } 853 854 if (peer->mlo) 855 ath12k_peer_rx_tid_qref_reset(ar->ab, peer->ml_id, tid); 856 else 857 ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid); 858 859 rx_tid->active = false; 860 } 861 862 int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab, 863 struct ath12k_buffer_addr *buf_addr_info, 864 enum hal_wbm_rel_bm_act action) 865 { 866 struct hal_wbm_release_ring *desc; 867 struct ath12k_dp *dp = &ab->dp; 868 struct hal_srng *srng; 869 int ret = 0; 870 871 srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id]; 872 873 spin_lock_bh(&srng->lock); 874 875 ath12k_hal_srng_access_begin(ab, srng); 876 877 desc = ath12k_hal_srng_src_get_next_entry(ab, srng); 878 if (!desc) { 879 ret = -ENOBUFS; 880 goto exit; 881 } 882 883 ath12k_hal_rx_msdu_link_desc_set(ab, desc, buf_addr_info, action); 884 885 exit: 886 ath12k_hal_srng_access_end(ab, srng); 887 888 spin_unlock_bh(&srng->lock); 889 890 return ret; 891 } 892 893 static void ath12k_dp_rx_frags_cleanup(struct ath12k_dp_rx_tid *rx_tid, 894 bool rel_link_desc) 895 { 896 struct ath12k_buffer_addr *buf_addr_info; 897 struct ath12k_base *ab = rx_tid->ab; 898 899 lockdep_assert_held(&ab->base_lock); 900 901 if (rx_tid->dst_ring_desc) { 902 if (rel_link_desc) { 903 buf_addr_info = &rx_tid->dst_ring_desc->buf_addr_info; 904 ath12k_dp_rx_link_desc_return(ab, buf_addr_info, 905 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 906 } 907 kfree(rx_tid->dst_ring_desc); 908 rx_tid->dst_ring_desc = NULL; 909 } 910 911 rx_tid->cur_sn = 0; 912 rx_tid->last_frag_no = 0; 913 rx_tid->rx_frag_bitmap = 0; 914 __skb_queue_purge(&rx_tid->rx_frags); 915 } 916 917 void ath12k_dp_rx_peer_tid_cleanup(struct ath12k *ar, struct ath12k_peer *peer) 918 { 919 struct ath12k_dp_rx_tid *rx_tid; 920 int i; 921 922 lockdep_assert_held(&ar->ab->base_lock); 923 924 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 925 rx_tid = &peer->rx_tid[i]; 926 927 ath12k_dp_rx_peer_tid_delete(ar, peer, i); 928 ath12k_dp_rx_frags_cleanup(rx_tid, true); 929 930 spin_unlock_bh(&ar->ab->base_lock); 931 timer_delete_sync(&rx_tid->frag_timer); 932 spin_lock_bh(&ar->ab->base_lock); 933 } 934 } 935 936 static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar, 937 struct ath12k_peer *peer, 938 struct ath12k_dp_rx_tid *rx_tid, 939 u32 ba_win_sz, u16 ssn, 940 bool update_ssn) 941 { 942 struct ath12k_hal_reo_cmd cmd = {}; 943 int ret; 944 945 cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned); 946 cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); 947 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 948 cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE; 949 cmd.ba_window_size = ba_win_sz; 950 951 if (update_ssn) { 952 cmd.upd0 |= HAL_REO_CMD_UPD0_SSN; 953 cmd.upd2 = u32_encode_bits(ssn, HAL_REO_CMD_UPD2_SSN); 954 } 955 956 ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid, 957 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 958 NULL); 959 if (ret) { 960 ath12k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n", 961 rx_tid->tid, ret); 962 return ret; 963 } 964 965 rx_tid->ba_win_sz = ba_win_sz; 966 967 return 0; 968 } 969 970 static int ath12k_dp_rx_assign_reoq(struct ath12k_base *ab, 971 struct ath12k_sta *ahsta, 972 struct ath12k_dp_rx_tid *rx_tid, 973 u16 ssn, enum hal_pn_type pn_type) 974 { 975 u32 ba_win_sz = rx_tid->ba_win_sz; 976 struct ath12k_reoq_buf *buf; 977 void *vaddr, *vaddr_aligned; 978 dma_addr_t paddr_aligned; 979 u8 tid = rx_tid->tid; 980 u32 hw_desc_sz; 981 int ret; 982 983 buf = &ahsta->reoq_bufs[tid]; 984 if (!buf->vaddr) { 985 /* TODO: Optimize the memory allocation for qos tid based on 986 * the actual BA window size in REO tid update path. 987 */ 988 if (tid == HAL_DESC_REO_NON_QOS_TID) 989 hw_desc_sz = ath12k_hal_reo_qdesc_size(ba_win_sz, tid); 990 else 991 hw_desc_sz = ath12k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid); 992 993 vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC); 994 if (!vaddr) 995 return -ENOMEM; 996 997 vaddr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN); 998 999 ath12k_hal_reo_qdesc_setup(vaddr_aligned, tid, ba_win_sz, 1000 ssn, pn_type); 1001 1002 paddr_aligned = dma_map_single(ab->dev, vaddr_aligned, hw_desc_sz, 1003 DMA_BIDIRECTIONAL); 1004 ret = dma_mapping_error(ab->dev, paddr_aligned); 1005 if (ret) { 1006 kfree(vaddr); 1007 return ret; 1008 } 1009 1010 buf->vaddr = vaddr; 1011 buf->paddr_aligned = paddr_aligned; 1012 buf->size = hw_desc_sz; 1013 } 1014 1015 rx_tid->qbuf = *buf; 1016 rx_tid->active = true; 1017 1018 return 0; 1019 } 1020 1021 int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id, 1022 u8 tid, u32 ba_win_sz, u16 ssn, 1023 enum hal_pn_type pn_type) 1024 { 1025 struct ath12k_base *ab = ar->ab; 1026 struct ath12k_dp *dp = &ab->dp; 1027 struct ath12k_peer *peer; 1028 struct ath12k_sta *ahsta; 1029 struct ath12k_dp_rx_tid *rx_tid; 1030 dma_addr_t paddr_aligned; 1031 int ret; 1032 1033 spin_lock_bh(&ab->base_lock); 1034 1035 peer = ath12k_peer_find(ab, vdev_id, peer_mac); 1036 if (!peer) { 1037 spin_unlock_bh(&ab->base_lock); 1038 ath12k_warn(ab, "failed to find the peer to set up rx tid\n"); 1039 return -ENOENT; 1040 } 1041 1042 if (ab->hw_params->dp_primary_link_only && 1043 !peer->primary_link) { 1044 spin_unlock_bh(&ab->base_lock); 1045 return 0; 1046 } 1047 1048 if (ab->hw_params->reoq_lut_support && 1049 (!dp->reoq_lut.vaddr || !dp->ml_reoq_lut.vaddr)) { 1050 spin_unlock_bh(&ab->base_lock); 1051 ath12k_warn(ab, "reo qref table is not setup\n"); 1052 return -EINVAL; 1053 } 1054 1055 if (peer->peer_id > DP_MAX_PEER_ID || tid > IEEE80211_NUM_TIDS) { 1056 ath12k_warn(ab, "peer id of peer %d or tid %d doesn't allow reoq setup\n", 1057 peer->peer_id, tid); 1058 spin_unlock_bh(&ab->base_lock); 1059 return -EINVAL; 1060 } 1061 1062 rx_tid = &peer->rx_tid[tid]; 1063 /* Update the tid queue if it is already setup */ 1064 if (rx_tid->active) { 1065 ret = ath12k_peer_rx_tid_reo_update(ar, peer, rx_tid, 1066 ba_win_sz, ssn, true); 1067 spin_unlock_bh(&ab->base_lock); 1068 if (ret) { 1069 ath12k_warn(ab, "failed to update reo for rx tid %d\n", tid); 1070 return ret; 1071 } 1072 1073 if (!ab->hw_params->reoq_lut_support) { 1074 paddr_aligned = rx_tid->qbuf.paddr_aligned; 1075 ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, 1076 peer_mac, 1077 paddr_aligned, tid, 1078 1, ba_win_sz); 1079 if (ret) { 1080 ath12k_warn(ab, "failed to setup peer rx reorder queuefor tid %d: %d\n", 1081 tid, ret); 1082 return ret; 1083 } 1084 } 1085 1086 return 0; 1087 } 1088 1089 rx_tid->tid = tid; 1090 1091 rx_tid->ba_win_sz = ba_win_sz; 1092 1093 ahsta = ath12k_sta_to_ahsta(peer->sta); 1094 ret = ath12k_dp_rx_assign_reoq(ab, ahsta, rx_tid, ssn, pn_type); 1095 if (ret) { 1096 spin_unlock_bh(&ab->base_lock); 1097 ath12k_warn(ab, "failed to assign reoq buf for rx tid %u\n", tid); 1098 return ret; 1099 } 1100 1101 paddr_aligned = rx_tid->qbuf.paddr_aligned; 1102 if (ab->hw_params->reoq_lut_support) { 1103 /* Update the REO queue LUT at the corresponding peer id 1104 * and tid with qaddr. 1105 */ 1106 if (peer->mlo) 1107 ath12k_peer_rx_tid_qref_setup(ab, peer->ml_id, tid, 1108 paddr_aligned); 1109 else 1110 ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid, 1111 paddr_aligned); 1112 1113 spin_unlock_bh(&ab->base_lock); 1114 } else { 1115 spin_unlock_bh(&ab->base_lock); 1116 ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, 1117 paddr_aligned, tid, 1, 1118 ba_win_sz); 1119 } 1120 1121 return ret; 1122 } 1123 1124 int ath12k_dp_rx_ampdu_start(struct ath12k *ar, 1125 struct ieee80211_ampdu_params *params, 1126 u8 link_id) 1127 { 1128 struct ath12k_base *ab = ar->ab; 1129 struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta); 1130 struct ath12k_link_sta *arsta; 1131 int vdev_id; 1132 int ret; 1133 1134 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 1135 1136 arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy, 1137 ahsta->link[link_id]); 1138 if (!arsta) 1139 return -ENOLINK; 1140 1141 vdev_id = arsta->arvif->vdev_id; 1142 1143 ret = ath12k_dp_rx_peer_tid_setup(ar, arsta->addr, vdev_id, 1144 params->tid, params->buf_size, 1145 params->ssn, arsta->ahsta->pn_type); 1146 if (ret) 1147 ath12k_warn(ab, "failed to setup rx tid %d\n", ret); 1148 1149 return ret; 1150 } 1151 1152 int ath12k_dp_rx_ampdu_stop(struct ath12k *ar, 1153 struct ieee80211_ampdu_params *params, 1154 u8 link_id) 1155 { 1156 struct ath12k_base *ab = ar->ab; 1157 struct ath12k_peer *peer; 1158 struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta); 1159 struct ath12k_link_sta *arsta; 1160 int vdev_id; 1161 bool active; 1162 int ret; 1163 1164 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 1165 1166 arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy, 1167 ahsta->link[link_id]); 1168 if (!arsta) 1169 return -ENOLINK; 1170 1171 vdev_id = arsta->arvif->vdev_id; 1172 1173 spin_lock_bh(&ab->base_lock); 1174 1175 peer = ath12k_peer_find(ab, vdev_id, arsta->addr); 1176 if (!peer) { 1177 spin_unlock_bh(&ab->base_lock); 1178 ath12k_warn(ab, "failed to find the peer to stop rx aggregation\n"); 1179 return -ENOENT; 1180 } 1181 1182 active = peer->rx_tid[params->tid].active; 1183 1184 if (!active) { 1185 spin_unlock_bh(&ab->base_lock); 1186 return 0; 1187 } 1188 1189 ret = ath12k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false); 1190 spin_unlock_bh(&ab->base_lock); 1191 if (ret) { 1192 ath12k_warn(ab, "failed to update reo for rx tid %d: %d\n", 1193 params->tid, ret); 1194 return ret; 1195 } 1196 1197 return ret; 1198 } 1199 1200 int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif, 1201 const u8 *peer_addr, 1202 enum set_key_cmd key_cmd, 1203 struct ieee80211_key_conf *key) 1204 { 1205 struct ath12k *ar = arvif->ar; 1206 struct ath12k_base *ab = ar->ab; 1207 struct ath12k_hal_reo_cmd cmd = {}; 1208 struct ath12k_peer *peer; 1209 struct ath12k_dp_rx_tid *rx_tid; 1210 u8 tid; 1211 int ret = 0; 1212 1213 /* NOTE: Enable PN/TSC replay check offload only for unicast frames. 1214 * We use mac80211 PN/TSC replay check functionality for bcast/mcast 1215 * for now. 1216 */ 1217 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) 1218 return 0; 1219 1220 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 1221 cmd.upd0 = HAL_REO_CMD_UPD0_PN | 1222 HAL_REO_CMD_UPD0_PN_SIZE | 1223 HAL_REO_CMD_UPD0_PN_VALID | 1224 HAL_REO_CMD_UPD0_PN_CHECK | 1225 HAL_REO_CMD_UPD0_SVLD; 1226 1227 switch (key->cipher) { 1228 case WLAN_CIPHER_SUITE_TKIP: 1229 case WLAN_CIPHER_SUITE_CCMP: 1230 case WLAN_CIPHER_SUITE_CCMP_256: 1231 case WLAN_CIPHER_SUITE_GCMP: 1232 case WLAN_CIPHER_SUITE_GCMP_256: 1233 if (key_cmd == SET_KEY) { 1234 cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK; 1235 cmd.pn_size = 48; 1236 } 1237 break; 1238 default: 1239 break; 1240 } 1241 1242 spin_lock_bh(&ab->base_lock); 1243 1244 peer = ath12k_peer_find(ab, arvif->vdev_id, peer_addr); 1245 if (!peer) { 1246 spin_unlock_bh(&ab->base_lock); 1247 ath12k_warn(ab, "failed to find the peer %pM to configure pn replay detection\n", 1248 peer_addr); 1249 return -ENOENT; 1250 } 1251 1252 for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) { 1253 rx_tid = &peer->rx_tid[tid]; 1254 if (!rx_tid->active) 1255 continue; 1256 cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned); 1257 cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); 1258 ret = ath12k_dp_reo_cmd_send(ab, rx_tid, 1259 HAL_REO_CMD_UPDATE_RX_QUEUE, 1260 &cmd, NULL); 1261 if (ret) { 1262 ath12k_warn(ab, "failed to configure rx tid %d queue of peer %pM for pn replay detection %d\n", 1263 tid, peer_addr, ret); 1264 break; 1265 } 1266 } 1267 1268 spin_unlock_bh(&ab->base_lock); 1269 1270 return ret; 1271 } 1272 1273 static int ath12k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats, 1274 u16 peer_id) 1275 { 1276 int i; 1277 1278 for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) { 1279 if (ppdu_stats->user_stats[i].is_valid_peer_id) { 1280 if (peer_id == ppdu_stats->user_stats[i].peer_id) 1281 return i; 1282 } else { 1283 return i; 1284 } 1285 } 1286 1287 return -EINVAL; 1288 } 1289 1290 static int ath12k_htt_tlv_ppdu_stats_parse(struct ath12k_base *ab, 1291 u16 tag, u16 len, const void *ptr, 1292 void *data) 1293 { 1294 const struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *ba_status; 1295 const struct htt_ppdu_stats_usr_cmpltn_cmn *cmplt_cmn; 1296 const struct htt_ppdu_stats_user_rate *user_rate; 1297 struct htt_ppdu_stats_info *ppdu_info; 1298 struct htt_ppdu_user_stats *user_stats; 1299 int cur_user; 1300 u16 peer_id; 1301 1302 ppdu_info = data; 1303 1304 switch (tag) { 1305 case HTT_PPDU_STATS_TAG_COMMON: 1306 if (len < sizeof(struct htt_ppdu_stats_common)) { 1307 ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1308 len, tag); 1309 return -EINVAL; 1310 } 1311 memcpy(&ppdu_info->ppdu_stats.common, ptr, 1312 sizeof(struct htt_ppdu_stats_common)); 1313 break; 1314 case HTT_PPDU_STATS_TAG_USR_RATE: 1315 if (len < sizeof(struct htt_ppdu_stats_user_rate)) { 1316 ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1317 len, tag); 1318 return -EINVAL; 1319 } 1320 user_rate = ptr; 1321 peer_id = le16_to_cpu(user_rate->sw_peer_id); 1322 cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1323 peer_id); 1324 if (cur_user < 0) 1325 return -EINVAL; 1326 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1327 user_stats->peer_id = peer_id; 1328 user_stats->is_valid_peer_id = true; 1329 memcpy(&user_stats->rate, ptr, 1330 sizeof(struct htt_ppdu_stats_user_rate)); 1331 user_stats->tlv_flags |= BIT(tag); 1332 break; 1333 case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON: 1334 if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) { 1335 ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1336 len, tag); 1337 return -EINVAL; 1338 } 1339 1340 cmplt_cmn = ptr; 1341 peer_id = le16_to_cpu(cmplt_cmn->sw_peer_id); 1342 cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1343 peer_id); 1344 if (cur_user < 0) 1345 return -EINVAL; 1346 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1347 user_stats->peer_id = peer_id; 1348 user_stats->is_valid_peer_id = true; 1349 memcpy(&user_stats->cmpltn_cmn, ptr, 1350 sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)); 1351 user_stats->tlv_flags |= BIT(tag); 1352 break; 1353 case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS: 1354 if (len < 1355 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) { 1356 ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1357 len, tag); 1358 return -EINVAL; 1359 } 1360 1361 ba_status = ptr; 1362 peer_id = le16_to_cpu(ba_status->sw_peer_id); 1363 cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1364 peer_id); 1365 if (cur_user < 0) 1366 return -EINVAL; 1367 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1368 user_stats->peer_id = peer_id; 1369 user_stats->is_valid_peer_id = true; 1370 memcpy(&user_stats->ack_ba, ptr, 1371 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)); 1372 user_stats->tlv_flags |= BIT(tag); 1373 break; 1374 } 1375 return 0; 1376 } 1377 1378 int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len, 1379 int (*iter)(struct ath12k_base *ar, u16 tag, u16 len, 1380 const void *ptr, void *data), 1381 void *data) 1382 { 1383 const struct htt_tlv *tlv; 1384 const void *begin = ptr; 1385 u16 tlv_tag, tlv_len; 1386 int ret = -EINVAL; 1387 1388 while (len > 0) { 1389 if (len < sizeof(*tlv)) { 1390 ath12k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", 1391 ptr - begin, len, sizeof(*tlv)); 1392 return -EINVAL; 1393 } 1394 tlv = (struct htt_tlv *)ptr; 1395 tlv_tag = le32_get_bits(tlv->header, HTT_TLV_TAG); 1396 tlv_len = le32_get_bits(tlv->header, HTT_TLV_LEN); 1397 ptr += sizeof(*tlv); 1398 len -= sizeof(*tlv); 1399 1400 if (tlv_len > len) { 1401 ath12k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n", 1402 tlv_tag, ptr - begin, len, tlv_len); 1403 return -EINVAL; 1404 } 1405 ret = iter(ab, tlv_tag, tlv_len, ptr, data); 1406 if (ret == -ENOMEM) 1407 return ret; 1408 1409 ptr += tlv_len; 1410 len -= tlv_len; 1411 } 1412 return 0; 1413 } 1414 1415 static void 1416 ath12k_update_per_peer_tx_stats(struct ath12k *ar, 1417 struct htt_ppdu_stats *ppdu_stats, u8 user) 1418 { 1419 struct ath12k_base *ab = ar->ab; 1420 struct ath12k_peer *peer; 1421 struct ath12k_link_sta *arsta; 1422 struct htt_ppdu_stats_user_rate *user_rate; 1423 struct ath12k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats; 1424 struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user]; 1425 struct htt_ppdu_stats_common *common = &ppdu_stats->common; 1426 int ret; 1427 u8 flags, mcs, nss, bw, sgi, dcm, ppdu_type, rate_idx = 0; 1428 u32 v, succ_bytes = 0; 1429 u16 tones, rate = 0, succ_pkts = 0; 1430 u32 tx_duration = 0; 1431 u8 tid = HTT_PPDU_STATS_NON_QOS_TID; 1432 u16 tx_retry_failed = 0, tx_retry_count = 0; 1433 bool is_ampdu = false, is_ofdma; 1434 1435 if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE))) 1436 return; 1437 1438 if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) { 1439 is_ampdu = 1440 HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags); 1441 tx_retry_failed = 1442 __le16_to_cpu(usr_stats->cmpltn_cmn.mpdu_tried) - 1443 __le16_to_cpu(usr_stats->cmpltn_cmn.mpdu_success); 1444 tx_retry_count = 1445 HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) + 1446 HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags); 1447 } 1448 1449 if (usr_stats->tlv_flags & 1450 BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) { 1451 succ_bytes = le32_to_cpu(usr_stats->ack_ba.success_bytes); 1452 succ_pkts = le32_get_bits(usr_stats->ack_ba.info, 1453 HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M); 1454 tid = le32_get_bits(usr_stats->ack_ba.info, 1455 HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM); 1456 } 1457 1458 if (common->fes_duration_us) 1459 tx_duration = le32_to_cpu(common->fes_duration_us); 1460 1461 user_rate = &usr_stats->rate; 1462 flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags); 1463 bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2; 1464 nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1; 1465 mcs = HTT_USR_RATE_MCS(user_rate->rate_flags); 1466 sgi = HTT_USR_RATE_GI(user_rate->rate_flags); 1467 dcm = HTT_USR_RATE_DCM(user_rate->rate_flags); 1468 1469 ppdu_type = HTT_USR_RATE_PPDU_TYPE(user_rate->info1); 1470 is_ofdma = (ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA) || 1471 (ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA); 1472 1473 /* Note: If host configured fixed rates and in some other special 1474 * cases, the broadcast/management frames are sent in different rates. 1475 * Firmware rate's control to be skipped for this? 1476 */ 1477 1478 if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH12K_HE_MCS_MAX) { 1479 ath12k_warn(ab, "Invalid HE mcs %d peer stats", mcs); 1480 return; 1481 } 1482 1483 if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH12K_VHT_MCS_MAX) { 1484 ath12k_warn(ab, "Invalid VHT mcs %d peer stats", mcs); 1485 return; 1486 } 1487 1488 if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH12K_HT_MCS_MAX || nss < 1)) { 1489 ath12k_warn(ab, "Invalid HT mcs %d nss %d peer stats", 1490 mcs, nss); 1491 return; 1492 } 1493 1494 if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) { 1495 ret = ath12k_mac_hw_ratecode_to_legacy_rate(mcs, 1496 flags, 1497 &rate_idx, 1498 &rate); 1499 if (ret < 0) 1500 return; 1501 } 1502 1503 rcu_read_lock(); 1504 spin_lock_bh(&ab->base_lock); 1505 peer = ath12k_peer_find_by_id(ab, usr_stats->peer_id); 1506 1507 if (!peer || !peer->sta) { 1508 spin_unlock_bh(&ab->base_lock); 1509 rcu_read_unlock(); 1510 return; 1511 } 1512 1513 arsta = ath12k_peer_get_link_sta(ab, peer); 1514 if (!arsta) { 1515 spin_unlock_bh(&ab->base_lock); 1516 rcu_read_unlock(); 1517 return; 1518 } 1519 1520 memset(&arsta->txrate, 0, sizeof(arsta->txrate)); 1521 1522 arsta->txrate.bw = ath12k_mac_bw_to_mac80211_bw(bw); 1523 1524 switch (flags) { 1525 case WMI_RATE_PREAMBLE_OFDM: 1526 arsta->txrate.legacy = rate; 1527 break; 1528 case WMI_RATE_PREAMBLE_CCK: 1529 arsta->txrate.legacy = rate; 1530 break; 1531 case WMI_RATE_PREAMBLE_HT: 1532 arsta->txrate.mcs = mcs + 8 * (nss - 1); 1533 arsta->txrate.flags = RATE_INFO_FLAGS_MCS; 1534 if (sgi) 1535 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1536 break; 1537 case WMI_RATE_PREAMBLE_VHT: 1538 arsta->txrate.mcs = mcs; 1539 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; 1540 if (sgi) 1541 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1542 break; 1543 case WMI_RATE_PREAMBLE_HE: 1544 arsta->txrate.mcs = mcs; 1545 arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS; 1546 arsta->txrate.he_dcm = dcm; 1547 arsta->txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi); 1548 tones = le16_to_cpu(user_rate->ru_end) - 1549 le16_to_cpu(user_rate->ru_start) + 1; 1550 v = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(tones); 1551 arsta->txrate.he_ru_alloc = v; 1552 if (is_ofdma) 1553 arsta->txrate.bw = RATE_INFO_BW_HE_RU; 1554 break; 1555 case WMI_RATE_PREAMBLE_EHT: 1556 arsta->txrate.mcs = mcs; 1557 arsta->txrate.flags = RATE_INFO_FLAGS_EHT_MCS; 1558 arsta->txrate.he_dcm = dcm; 1559 arsta->txrate.eht_gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(sgi); 1560 tones = le16_to_cpu(user_rate->ru_end) - 1561 le16_to_cpu(user_rate->ru_start) + 1; 1562 v = ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(tones); 1563 arsta->txrate.eht_ru_alloc = v; 1564 if (is_ofdma) 1565 arsta->txrate.bw = RATE_INFO_BW_EHT_RU; 1566 break; 1567 } 1568 1569 arsta->tx_retry_failed += tx_retry_failed; 1570 arsta->tx_retry_count += tx_retry_count; 1571 arsta->txrate.nss = nss; 1572 arsta->tx_duration += tx_duration; 1573 memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info)); 1574 1575 /* PPDU stats reported for mgmt packet doesn't have valid tx bytes. 1576 * So skip peer stats update for mgmt packets. 1577 */ 1578 if (tid < HTT_PPDU_STATS_NON_QOS_TID) { 1579 memset(peer_stats, 0, sizeof(*peer_stats)); 1580 peer_stats->succ_pkts = succ_pkts; 1581 peer_stats->succ_bytes = succ_bytes; 1582 peer_stats->is_ampdu = is_ampdu; 1583 peer_stats->duration = tx_duration; 1584 peer_stats->ba_fails = 1585 HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) + 1586 HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags); 1587 } 1588 1589 spin_unlock_bh(&ab->base_lock); 1590 rcu_read_unlock(); 1591 } 1592 1593 static void ath12k_htt_update_ppdu_stats(struct ath12k *ar, 1594 struct htt_ppdu_stats *ppdu_stats) 1595 { 1596 u8 user; 1597 1598 for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++) 1599 ath12k_update_per_peer_tx_stats(ar, ppdu_stats, user); 1600 } 1601 1602 static 1603 struct htt_ppdu_stats_info *ath12k_dp_htt_get_ppdu_desc(struct ath12k *ar, 1604 u32 ppdu_id) 1605 { 1606 struct htt_ppdu_stats_info *ppdu_info; 1607 1608 lockdep_assert_held(&ar->data_lock); 1609 if (!list_empty(&ar->ppdu_stats_info)) { 1610 list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) { 1611 if (ppdu_info->ppdu_id == ppdu_id) 1612 return ppdu_info; 1613 } 1614 1615 if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) { 1616 ppdu_info = list_first_entry(&ar->ppdu_stats_info, 1617 typeof(*ppdu_info), list); 1618 list_del(&ppdu_info->list); 1619 ar->ppdu_stat_list_depth--; 1620 ath12k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats); 1621 kfree(ppdu_info); 1622 } 1623 } 1624 1625 ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC); 1626 if (!ppdu_info) 1627 return NULL; 1628 1629 list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info); 1630 ar->ppdu_stat_list_depth++; 1631 1632 return ppdu_info; 1633 } 1634 1635 static void ath12k_copy_to_delay_stats(struct ath12k_peer *peer, 1636 struct htt_ppdu_user_stats *usr_stats) 1637 { 1638 peer->ppdu_stats_delayba.sw_peer_id = le16_to_cpu(usr_stats->rate.sw_peer_id); 1639 peer->ppdu_stats_delayba.info0 = le32_to_cpu(usr_stats->rate.info0); 1640 peer->ppdu_stats_delayba.ru_end = le16_to_cpu(usr_stats->rate.ru_end); 1641 peer->ppdu_stats_delayba.ru_start = le16_to_cpu(usr_stats->rate.ru_start); 1642 peer->ppdu_stats_delayba.info1 = le32_to_cpu(usr_stats->rate.info1); 1643 peer->ppdu_stats_delayba.rate_flags = le32_to_cpu(usr_stats->rate.rate_flags); 1644 peer->ppdu_stats_delayba.resp_rate_flags = 1645 le32_to_cpu(usr_stats->rate.resp_rate_flags); 1646 1647 peer->delayba_flag = true; 1648 } 1649 1650 static void ath12k_copy_to_bar(struct ath12k_peer *peer, 1651 struct htt_ppdu_user_stats *usr_stats) 1652 { 1653 usr_stats->rate.sw_peer_id = cpu_to_le16(peer->ppdu_stats_delayba.sw_peer_id); 1654 usr_stats->rate.info0 = cpu_to_le32(peer->ppdu_stats_delayba.info0); 1655 usr_stats->rate.ru_end = cpu_to_le16(peer->ppdu_stats_delayba.ru_end); 1656 usr_stats->rate.ru_start = cpu_to_le16(peer->ppdu_stats_delayba.ru_start); 1657 usr_stats->rate.info1 = cpu_to_le32(peer->ppdu_stats_delayba.info1); 1658 usr_stats->rate.rate_flags = cpu_to_le32(peer->ppdu_stats_delayba.rate_flags); 1659 usr_stats->rate.resp_rate_flags = 1660 cpu_to_le32(peer->ppdu_stats_delayba.resp_rate_flags); 1661 1662 peer->delayba_flag = false; 1663 } 1664 1665 static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab, 1666 struct sk_buff *skb) 1667 { 1668 struct ath12k_htt_ppdu_stats_msg *msg; 1669 struct htt_ppdu_stats_info *ppdu_info; 1670 struct ath12k_peer *peer = NULL; 1671 struct htt_ppdu_user_stats *usr_stats = NULL; 1672 u32 peer_id = 0; 1673 struct ath12k *ar; 1674 int ret, i; 1675 u8 pdev_id; 1676 u32 ppdu_id, len; 1677 1678 msg = (struct ath12k_htt_ppdu_stats_msg *)skb->data; 1679 len = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE); 1680 if (len > (skb->len - struct_size(msg, data, 0))) { 1681 ath12k_warn(ab, 1682 "HTT PPDU STATS event has unexpected payload size %u, should be smaller than %u\n", 1683 len, skb->len); 1684 return -EINVAL; 1685 } 1686 1687 pdev_id = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PDEV_ID); 1688 ppdu_id = le32_to_cpu(msg->ppdu_id); 1689 1690 rcu_read_lock(); 1691 ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id); 1692 if (!ar) { 1693 ret = -EINVAL; 1694 goto exit; 1695 } 1696 1697 spin_lock_bh(&ar->data_lock); 1698 ppdu_info = ath12k_dp_htt_get_ppdu_desc(ar, ppdu_id); 1699 if (!ppdu_info) { 1700 spin_unlock_bh(&ar->data_lock); 1701 ret = -EINVAL; 1702 goto exit; 1703 } 1704 1705 ppdu_info->ppdu_id = ppdu_id; 1706 ret = ath12k_dp_htt_tlv_iter(ab, msg->data, len, 1707 ath12k_htt_tlv_ppdu_stats_parse, 1708 (void *)ppdu_info); 1709 if (ret) { 1710 spin_unlock_bh(&ar->data_lock); 1711 ath12k_warn(ab, "Failed to parse tlv %d\n", ret); 1712 goto exit; 1713 } 1714 1715 if (ppdu_info->ppdu_stats.common.num_users >= HTT_PPDU_STATS_MAX_USERS) { 1716 spin_unlock_bh(&ar->data_lock); 1717 ath12k_warn(ab, 1718 "HTT PPDU STATS event has unexpected num_users %u, should be smaller than %u\n", 1719 ppdu_info->ppdu_stats.common.num_users, 1720 HTT_PPDU_STATS_MAX_USERS); 1721 ret = -EINVAL; 1722 goto exit; 1723 } 1724 1725 /* back up data rate tlv for all peers */ 1726 if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_DATA && 1727 (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON)) && 1728 ppdu_info->delay_ba) { 1729 for (i = 0; i < ppdu_info->ppdu_stats.common.num_users; i++) { 1730 peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id; 1731 spin_lock_bh(&ab->base_lock); 1732 peer = ath12k_peer_find_by_id(ab, peer_id); 1733 if (!peer) { 1734 spin_unlock_bh(&ab->base_lock); 1735 continue; 1736 } 1737 1738 usr_stats = &ppdu_info->ppdu_stats.user_stats[i]; 1739 if (usr_stats->delay_ba) 1740 ath12k_copy_to_delay_stats(peer, usr_stats); 1741 spin_unlock_bh(&ab->base_lock); 1742 } 1743 } 1744 1745 /* restore all peers' data rate tlv to mu-bar tlv */ 1746 if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_BAR && 1747 (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON))) { 1748 for (i = 0; i < ppdu_info->bar_num_users; i++) { 1749 peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id; 1750 spin_lock_bh(&ab->base_lock); 1751 peer = ath12k_peer_find_by_id(ab, peer_id); 1752 if (!peer) { 1753 spin_unlock_bh(&ab->base_lock); 1754 continue; 1755 } 1756 1757 usr_stats = &ppdu_info->ppdu_stats.user_stats[i]; 1758 if (peer->delayba_flag) 1759 ath12k_copy_to_bar(peer, usr_stats); 1760 spin_unlock_bh(&ab->base_lock); 1761 } 1762 } 1763 1764 spin_unlock_bh(&ar->data_lock); 1765 1766 exit: 1767 rcu_read_unlock(); 1768 1769 return ret; 1770 } 1771 1772 static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab, 1773 struct sk_buff *skb) 1774 { 1775 struct ath12k_htt_mlo_offset_msg *msg; 1776 struct ath12k_pdev *pdev; 1777 struct ath12k *ar; 1778 u8 pdev_id; 1779 1780 msg = (struct ath12k_htt_mlo_offset_msg *)skb->data; 1781 pdev_id = u32_get_bits(__le32_to_cpu(msg->info), 1782 HTT_T2H_MLO_OFFSET_INFO_PDEV_ID); 1783 1784 rcu_read_lock(); 1785 ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id); 1786 if (!ar) { 1787 /* It is possible that the ar is not yet active (started). 1788 * The above function will only look for the active pdev 1789 * and hence %NULL return is possible. Just silently 1790 * discard this message 1791 */ 1792 goto exit; 1793 } 1794 1795 spin_lock_bh(&ar->data_lock); 1796 pdev = ar->pdev; 1797 1798 pdev->timestamp.info = __le32_to_cpu(msg->info); 1799 pdev->timestamp.sync_timestamp_lo_us = __le32_to_cpu(msg->sync_timestamp_lo_us); 1800 pdev->timestamp.sync_timestamp_hi_us = __le32_to_cpu(msg->sync_timestamp_hi_us); 1801 pdev->timestamp.mlo_offset_lo = __le32_to_cpu(msg->mlo_offset_lo); 1802 pdev->timestamp.mlo_offset_hi = __le32_to_cpu(msg->mlo_offset_hi); 1803 pdev->timestamp.mlo_offset_clks = __le32_to_cpu(msg->mlo_offset_clks); 1804 pdev->timestamp.mlo_comp_clks = __le32_to_cpu(msg->mlo_comp_clks); 1805 pdev->timestamp.mlo_comp_timer = __le32_to_cpu(msg->mlo_comp_timer); 1806 1807 spin_unlock_bh(&ar->data_lock); 1808 exit: 1809 rcu_read_unlock(); 1810 } 1811 1812 void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab, 1813 struct sk_buff *skb) 1814 { 1815 struct ath12k_dp *dp = &ab->dp; 1816 struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data; 1817 enum htt_t2h_msg_type type; 1818 u16 peer_id; 1819 u8 vdev_id; 1820 u8 mac_addr[ETH_ALEN]; 1821 u16 peer_mac_h16; 1822 u16 ast_hash = 0; 1823 u16 hw_peer_id; 1824 1825 type = le32_get_bits(resp->version_msg.version, HTT_T2H_MSG_TYPE); 1826 1827 ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type); 1828 1829 switch (type) { 1830 case HTT_T2H_MSG_TYPE_VERSION_CONF: 1831 dp->htt_tgt_ver_major = le32_get_bits(resp->version_msg.version, 1832 HTT_T2H_VERSION_CONF_MAJOR); 1833 dp->htt_tgt_ver_minor = le32_get_bits(resp->version_msg.version, 1834 HTT_T2H_VERSION_CONF_MINOR); 1835 complete(&dp->htt_tgt_version_received); 1836 break; 1837 /* TODO: remove unused peer map versions after testing */ 1838 case HTT_T2H_MSG_TYPE_PEER_MAP: 1839 vdev_id = le32_get_bits(resp->peer_map_ev.info, 1840 HTT_T2H_PEER_MAP_INFO_VDEV_ID); 1841 peer_id = le32_get_bits(resp->peer_map_ev.info, 1842 HTT_T2H_PEER_MAP_INFO_PEER_ID); 1843 peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1, 1844 HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16); 1845 ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32), 1846 peer_mac_h16, mac_addr); 1847 ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0); 1848 break; 1849 case HTT_T2H_MSG_TYPE_PEER_MAP2: 1850 vdev_id = le32_get_bits(resp->peer_map_ev.info, 1851 HTT_T2H_PEER_MAP_INFO_VDEV_ID); 1852 peer_id = le32_get_bits(resp->peer_map_ev.info, 1853 HTT_T2H_PEER_MAP_INFO_PEER_ID); 1854 peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1, 1855 HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16); 1856 ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32), 1857 peer_mac_h16, mac_addr); 1858 ast_hash = le32_get_bits(resp->peer_map_ev.info2, 1859 HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL); 1860 hw_peer_id = le32_get_bits(resp->peer_map_ev.info1, 1861 HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID); 1862 ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash, 1863 hw_peer_id); 1864 break; 1865 case HTT_T2H_MSG_TYPE_PEER_MAP3: 1866 vdev_id = le32_get_bits(resp->peer_map_ev.info, 1867 HTT_T2H_PEER_MAP_INFO_VDEV_ID); 1868 peer_id = le32_get_bits(resp->peer_map_ev.info, 1869 HTT_T2H_PEER_MAP_INFO_PEER_ID); 1870 peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1, 1871 HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16); 1872 ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32), 1873 peer_mac_h16, mac_addr); 1874 ast_hash = le32_get_bits(resp->peer_map_ev.info2, 1875 HTT_T2H_PEER_MAP3_INFO2_AST_HASH_VAL); 1876 hw_peer_id = le32_get_bits(resp->peer_map_ev.info2, 1877 HTT_T2H_PEER_MAP3_INFO2_HW_PEER_ID); 1878 ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash, 1879 hw_peer_id); 1880 break; 1881 case HTT_T2H_MSG_TYPE_PEER_UNMAP: 1882 case HTT_T2H_MSG_TYPE_PEER_UNMAP2: 1883 peer_id = le32_get_bits(resp->peer_unmap_ev.info, 1884 HTT_T2H_PEER_UNMAP_INFO_PEER_ID); 1885 ath12k_peer_unmap_event(ab, peer_id); 1886 break; 1887 case HTT_T2H_MSG_TYPE_PPDU_STATS_IND: 1888 ath12k_htt_pull_ppdu_stats(ab, skb); 1889 break; 1890 case HTT_T2H_MSG_TYPE_EXT_STATS_CONF: 1891 ath12k_debugfs_htt_ext_stats_handler(ab, skb); 1892 break; 1893 case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND: 1894 ath12k_htt_mlo_offset_event_handler(ab, skb); 1895 break; 1896 default: 1897 ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt event %d not handled\n", 1898 type); 1899 break; 1900 } 1901 1902 dev_kfree_skb_any(skb); 1903 } 1904 1905 static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar, 1906 struct sk_buff_head *msdu_list, 1907 struct sk_buff *first, struct sk_buff *last, 1908 u8 l3pad_bytes, int msdu_len) 1909 { 1910 struct ath12k_base *ab = ar->ab; 1911 struct sk_buff *skb; 1912 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first); 1913 int buf_first_hdr_len, buf_first_len; 1914 struct hal_rx_desc *ldesc; 1915 int space_extra, rem_len, buf_len; 1916 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 1917 bool is_continuation; 1918 1919 /* As the msdu is spread across multiple rx buffers, 1920 * find the offset to the start of msdu for computing 1921 * the length of the msdu in the first buffer. 1922 */ 1923 buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes; 1924 buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len; 1925 1926 if (WARN_ON_ONCE(msdu_len <= buf_first_len)) { 1927 skb_put(first, buf_first_hdr_len + msdu_len); 1928 skb_pull(first, buf_first_hdr_len); 1929 return 0; 1930 } 1931 1932 ldesc = (struct hal_rx_desc *)last->data; 1933 rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, ldesc); 1934 rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, ldesc); 1935 1936 /* MSDU spans over multiple buffers because the length of the MSDU 1937 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data 1938 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. 1939 */ 1940 skb_put(first, DP_RX_BUFFER_SIZE); 1941 skb_pull(first, buf_first_hdr_len); 1942 1943 /* When an MSDU spread over multiple buffers MSDU_END 1944 * tlvs are valid only in the last buffer. Copy those tlvs. 1945 */ 1946 ath12k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc); 1947 1948 space_extra = msdu_len - (buf_first_len + skb_tailroom(first)); 1949 if (space_extra > 0 && 1950 (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) { 1951 /* Free up all buffers of the MSDU */ 1952 while ((skb = __skb_dequeue(msdu_list)) != NULL) { 1953 rxcb = ATH12K_SKB_RXCB(skb); 1954 if (!rxcb->is_continuation) { 1955 dev_kfree_skb_any(skb); 1956 break; 1957 } 1958 dev_kfree_skb_any(skb); 1959 } 1960 return -ENOMEM; 1961 } 1962 1963 rem_len = msdu_len - buf_first_len; 1964 while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) { 1965 rxcb = ATH12K_SKB_RXCB(skb); 1966 is_continuation = rxcb->is_continuation; 1967 if (is_continuation) 1968 buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz; 1969 else 1970 buf_len = rem_len; 1971 1972 if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) { 1973 WARN_ON_ONCE(1); 1974 dev_kfree_skb_any(skb); 1975 return -EINVAL; 1976 } 1977 1978 skb_put(skb, buf_len + hal_rx_desc_sz); 1979 skb_pull(skb, hal_rx_desc_sz); 1980 skb_copy_from_linear_data(skb, skb_put(first, buf_len), 1981 buf_len); 1982 dev_kfree_skb_any(skb); 1983 1984 rem_len -= buf_len; 1985 if (!is_continuation) 1986 break; 1987 } 1988 1989 return 0; 1990 } 1991 1992 static struct sk_buff *ath12k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list, 1993 struct sk_buff *first) 1994 { 1995 struct sk_buff *skb; 1996 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first); 1997 1998 if (!rxcb->is_continuation) 1999 return first; 2000 2001 skb_queue_walk(msdu_list, skb) { 2002 rxcb = ATH12K_SKB_RXCB(skb); 2003 if (!rxcb->is_continuation) 2004 return skb; 2005 } 2006 2007 return NULL; 2008 } 2009 2010 static void ath12k_dp_rx_h_csum_offload(struct sk_buff *msdu, 2011 struct ath12k_dp_rx_info *rx_info) 2012 { 2013 msdu->ip_summed = (rx_info->ip_csum_fail || rx_info->l4_csum_fail) ? 2014 CHECKSUM_NONE : CHECKSUM_UNNECESSARY; 2015 } 2016 2017 int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar, enum hal_encrypt_type enctype) 2018 { 2019 switch (enctype) { 2020 case HAL_ENCRYPT_TYPE_OPEN: 2021 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 2022 case HAL_ENCRYPT_TYPE_TKIP_MIC: 2023 return 0; 2024 case HAL_ENCRYPT_TYPE_CCMP_128: 2025 return IEEE80211_CCMP_MIC_LEN; 2026 case HAL_ENCRYPT_TYPE_CCMP_256: 2027 return IEEE80211_CCMP_256_MIC_LEN; 2028 case HAL_ENCRYPT_TYPE_GCMP_128: 2029 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 2030 return IEEE80211_GCMP_MIC_LEN; 2031 case HAL_ENCRYPT_TYPE_WEP_40: 2032 case HAL_ENCRYPT_TYPE_WEP_104: 2033 case HAL_ENCRYPT_TYPE_WEP_128: 2034 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 2035 case HAL_ENCRYPT_TYPE_WAPI: 2036 break; 2037 } 2038 2039 ath12k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype); 2040 return 0; 2041 } 2042 2043 static int ath12k_dp_rx_crypto_param_len(struct ath12k *ar, 2044 enum hal_encrypt_type enctype) 2045 { 2046 switch (enctype) { 2047 case HAL_ENCRYPT_TYPE_OPEN: 2048 return 0; 2049 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 2050 case HAL_ENCRYPT_TYPE_TKIP_MIC: 2051 return IEEE80211_TKIP_IV_LEN; 2052 case HAL_ENCRYPT_TYPE_CCMP_128: 2053 return IEEE80211_CCMP_HDR_LEN; 2054 case HAL_ENCRYPT_TYPE_CCMP_256: 2055 return IEEE80211_CCMP_256_HDR_LEN; 2056 case HAL_ENCRYPT_TYPE_GCMP_128: 2057 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 2058 return IEEE80211_GCMP_HDR_LEN; 2059 case HAL_ENCRYPT_TYPE_WEP_40: 2060 case HAL_ENCRYPT_TYPE_WEP_104: 2061 case HAL_ENCRYPT_TYPE_WEP_128: 2062 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 2063 case HAL_ENCRYPT_TYPE_WAPI: 2064 break; 2065 } 2066 2067 ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 2068 return 0; 2069 } 2070 2071 static int ath12k_dp_rx_crypto_icv_len(struct ath12k *ar, 2072 enum hal_encrypt_type enctype) 2073 { 2074 switch (enctype) { 2075 case HAL_ENCRYPT_TYPE_OPEN: 2076 case HAL_ENCRYPT_TYPE_CCMP_128: 2077 case HAL_ENCRYPT_TYPE_CCMP_256: 2078 case HAL_ENCRYPT_TYPE_GCMP_128: 2079 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 2080 return 0; 2081 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 2082 case HAL_ENCRYPT_TYPE_TKIP_MIC: 2083 return IEEE80211_TKIP_ICV_LEN; 2084 case HAL_ENCRYPT_TYPE_WEP_40: 2085 case HAL_ENCRYPT_TYPE_WEP_104: 2086 case HAL_ENCRYPT_TYPE_WEP_128: 2087 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 2088 case HAL_ENCRYPT_TYPE_WAPI: 2089 break; 2090 } 2091 2092 ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 2093 return 0; 2094 } 2095 2096 static void ath12k_dp_rx_h_undecap_nwifi(struct ath12k *ar, 2097 struct sk_buff *msdu, 2098 enum hal_encrypt_type enctype, 2099 struct ieee80211_rx_status *status) 2100 { 2101 struct ath12k_base *ab = ar->ab; 2102 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 2103 u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN]; 2104 struct ieee80211_hdr *hdr; 2105 size_t hdr_len; 2106 u8 *crypto_hdr; 2107 u16 qos_ctl; 2108 2109 /* pull decapped header */ 2110 hdr = (struct ieee80211_hdr *)msdu->data; 2111 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2112 skb_pull(msdu, hdr_len); 2113 2114 /* Rebuild qos header */ 2115 hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 2116 2117 /* Reset the order bit as the HT_Control header is stripped */ 2118 hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER)); 2119 2120 qos_ctl = rxcb->tid; 2121 2122 if (ath12k_dp_rx_h_mesh_ctl_present(ab, rxcb->rx_desc)) 2123 qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT; 2124 2125 /* TODO: Add other QoS ctl fields when required */ 2126 2127 /* copy decap header before overwriting for reuse below */ 2128 memcpy(decap_hdr, hdr, hdr_len); 2129 2130 /* Rebuild crypto header for mac80211 use */ 2131 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 2132 crypto_hdr = skb_push(msdu, ath12k_dp_rx_crypto_param_len(ar, enctype)); 2133 ath12k_dp_rx_desc_get_crypto_header(ar->ab, 2134 rxcb->rx_desc, crypto_hdr, 2135 enctype); 2136 } 2137 2138 memcpy(skb_push(msdu, 2139 IEEE80211_QOS_CTL_LEN), &qos_ctl, 2140 IEEE80211_QOS_CTL_LEN); 2141 memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len); 2142 } 2143 2144 static void ath12k_dp_rx_h_undecap_raw(struct ath12k *ar, struct sk_buff *msdu, 2145 enum hal_encrypt_type enctype, 2146 struct ieee80211_rx_status *status, 2147 bool decrypted) 2148 { 2149 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 2150 struct ieee80211_hdr *hdr; 2151 size_t hdr_len; 2152 size_t crypto_len; 2153 2154 if (!rxcb->is_first_msdu || 2155 !(rxcb->is_first_msdu && rxcb->is_last_msdu)) { 2156 WARN_ON_ONCE(1); 2157 return; 2158 } 2159 2160 skb_trim(msdu, msdu->len - FCS_LEN); 2161 2162 if (!decrypted) 2163 return; 2164 2165 hdr = (void *)msdu->data; 2166 2167 /* Tail */ 2168 if (status->flag & RX_FLAG_IV_STRIPPED) { 2169 skb_trim(msdu, msdu->len - 2170 ath12k_dp_rx_crypto_mic_len(ar, enctype)); 2171 2172 skb_trim(msdu, msdu->len - 2173 ath12k_dp_rx_crypto_icv_len(ar, enctype)); 2174 } else { 2175 /* MIC */ 2176 if (status->flag & RX_FLAG_MIC_STRIPPED) 2177 skb_trim(msdu, msdu->len - 2178 ath12k_dp_rx_crypto_mic_len(ar, enctype)); 2179 2180 /* ICV */ 2181 if (status->flag & RX_FLAG_ICV_STRIPPED) 2182 skb_trim(msdu, msdu->len - 2183 ath12k_dp_rx_crypto_icv_len(ar, enctype)); 2184 } 2185 2186 /* MMIC */ 2187 if ((status->flag & RX_FLAG_MMIC_STRIPPED) && 2188 !ieee80211_has_morefrags(hdr->frame_control) && 2189 enctype == HAL_ENCRYPT_TYPE_TKIP_MIC) 2190 skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN); 2191 2192 /* Head */ 2193 if (status->flag & RX_FLAG_IV_STRIPPED) { 2194 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2195 crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype); 2196 2197 memmove(msdu->data + crypto_len, msdu->data, hdr_len); 2198 skb_pull(msdu, crypto_len); 2199 } 2200 } 2201 2202 static void ath12k_get_dot11_hdr_from_rx_desc(struct ath12k *ar, 2203 struct sk_buff *msdu, 2204 struct ath12k_skb_rxcb *rxcb, 2205 struct ieee80211_rx_status *status, 2206 enum hal_encrypt_type enctype) 2207 { 2208 struct hal_rx_desc *rx_desc = rxcb->rx_desc; 2209 struct ath12k_base *ab = ar->ab; 2210 size_t hdr_len, crypto_len; 2211 struct ieee80211_hdr hdr; 2212 __le16 qos_ctl; 2213 u8 *crypto_hdr, mesh_ctrl; 2214 2215 ath12k_dp_rx_desc_get_dot11_hdr(ab, rx_desc, &hdr); 2216 hdr_len = ieee80211_hdrlen(hdr.frame_control); 2217 mesh_ctrl = ath12k_dp_rx_h_mesh_ctl_present(ab, rx_desc); 2218 2219 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 2220 crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype); 2221 crypto_hdr = skb_push(msdu, crypto_len); 2222 ath12k_dp_rx_desc_get_crypto_header(ab, rx_desc, crypto_hdr, enctype); 2223 } 2224 2225 skb_push(msdu, hdr_len); 2226 memcpy(msdu->data, &hdr, min(hdr_len, sizeof(hdr))); 2227 2228 if (rxcb->is_mcbc) 2229 status->flag &= ~RX_FLAG_PN_VALIDATED; 2230 2231 /* Add QOS header */ 2232 if (ieee80211_is_data_qos(hdr.frame_control)) { 2233 struct ieee80211_hdr *qos_ptr = (struct ieee80211_hdr *)msdu->data; 2234 2235 qos_ctl = cpu_to_le16(rxcb->tid & IEEE80211_QOS_CTL_TID_MASK); 2236 if (mesh_ctrl) 2237 qos_ctl |= cpu_to_le16(IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT); 2238 2239 memcpy(ieee80211_get_qos_ctl(qos_ptr), &qos_ctl, IEEE80211_QOS_CTL_LEN); 2240 } 2241 } 2242 2243 static void ath12k_dp_rx_h_undecap_eth(struct ath12k *ar, 2244 struct sk_buff *msdu, 2245 enum hal_encrypt_type enctype, 2246 struct ieee80211_rx_status *status) 2247 { 2248 struct ieee80211_hdr *hdr; 2249 struct ethhdr *eth; 2250 u8 da[ETH_ALEN]; 2251 u8 sa[ETH_ALEN]; 2252 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 2253 struct ath12k_dp_rx_rfc1042_hdr rfc = {0xaa, 0xaa, 0x03, {0x00, 0x00, 0x00}}; 2254 2255 eth = (struct ethhdr *)msdu->data; 2256 ether_addr_copy(da, eth->h_dest); 2257 ether_addr_copy(sa, eth->h_source); 2258 rfc.snap_type = eth->h_proto; 2259 skb_pull(msdu, sizeof(*eth)); 2260 memcpy(skb_push(msdu, sizeof(rfc)), &rfc, 2261 sizeof(rfc)); 2262 ath12k_get_dot11_hdr_from_rx_desc(ar, msdu, rxcb, status, enctype); 2263 2264 /* original 802.11 header has a different DA and in 2265 * case of 4addr it may also have different SA 2266 */ 2267 hdr = (struct ieee80211_hdr *)msdu->data; 2268 ether_addr_copy(ieee80211_get_DA(hdr), da); 2269 ether_addr_copy(ieee80211_get_SA(hdr), sa); 2270 } 2271 2272 static void ath12k_dp_rx_h_undecap(struct ath12k *ar, struct sk_buff *msdu, 2273 struct hal_rx_desc *rx_desc, 2274 enum hal_encrypt_type enctype, 2275 struct ieee80211_rx_status *status, 2276 bool decrypted) 2277 { 2278 struct ath12k_base *ab = ar->ab; 2279 u8 decap; 2280 struct ethhdr *ehdr; 2281 2282 decap = ath12k_dp_rx_h_decap_type(ab, rx_desc); 2283 2284 switch (decap) { 2285 case DP_RX_DECAP_TYPE_NATIVE_WIFI: 2286 ath12k_dp_rx_h_undecap_nwifi(ar, msdu, enctype, status); 2287 break; 2288 case DP_RX_DECAP_TYPE_RAW: 2289 ath12k_dp_rx_h_undecap_raw(ar, msdu, enctype, status, 2290 decrypted); 2291 break; 2292 case DP_RX_DECAP_TYPE_ETHERNET2_DIX: 2293 ehdr = (struct ethhdr *)msdu->data; 2294 2295 /* mac80211 allows fast path only for authorized STA */ 2296 if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) { 2297 ATH12K_SKB_RXCB(msdu)->is_eapol = true; 2298 ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status); 2299 break; 2300 } 2301 2302 /* PN for mcast packets will be validated in mac80211; 2303 * remove eth header and add 802.11 header. 2304 */ 2305 if (ATH12K_SKB_RXCB(msdu)->is_mcbc && decrypted) 2306 ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status); 2307 break; 2308 case DP_RX_DECAP_TYPE_8023: 2309 /* TODO: Handle undecap for these formats */ 2310 break; 2311 } 2312 } 2313 2314 struct ath12k_peer * 2315 ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu, 2316 struct ath12k_dp_rx_info *rx_info) 2317 { 2318 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 2319 struct ath12k_peer *peer = NULL; 2320 2321 lockdep_assert_held(&ab->base_lock); 2322 2323 if (rxcb->peer_id) 2324 peer = ath12k_peer_find_by_id(ab, rxcb->peer_id); 2325 2326 if (peer) 2327 return peer; 2328 2329 if (rx_info->addr2_present) 2330 peer = ath12k_peer_find_by_addr(ab, rx_info->addr2); 2331 2332 return peer; 2333 } 2334 2335 static void ath12k_dp_rx_h_mpdu(struct ath12k *ar, 2336 struct sk_buff *msdu, 2337 struct hal_rx_desc *rx_desc, 2338 struct ath12k_dp_rx_info *rx_info) 2339 { 2340 struct ath12k_base *ab = ar->ab; 2341 struct ath12k_skb_rxcb *rxcb; 2342 enum hal_encrypt_type enctype; 2343 bool is_decrypted = false; 2344 struct ieee80211_hdr *hdr; 2345 struct ath12k_peer *peer; 2346 struct ieee80211_rx_status *rx_status = rx_info->rx_status; 2347 u32 err_bitmap; 2348 2349 /* PN for multicast packets will be checked in mac80211 */ 2350 rxcb = ATH12K_SKB_RXCB(msdu); 2351 rxcb->is_mcbc = rx_info->is_mcbc; 2352 2353 if (rxcb->is_mcbc) 2354 rxcb->peer_id = rx_info->peer_id; 2355 2356 spin_lock_bh(&ar->ab->base_lock); 2357 peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu, rx_info); 2358 if (peer) { 2359 /* resetting mcbc bit because mcbc packets are unicast 2360 * packets only for AP as STA sends unicast packets. 2361 */ 2362 rxcb->is_mcbc = rxcb->is_mcbc && !peer->ucast_ra_only; 2363 2364 if (rxcb->is_mcbc) 2365 enctype = peer->sec_type_grp; 2366 else 2367 enctype = peer->sec_type; 2368 } else { 2369 enctype = HAL_ENCRYPT_TYPE_OPEN; 2370 } 2371 spin_unlock_bh(&ar->ab->base_lock); 2372 2373 err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc); 2374 if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap) 2375 is_decrypted = ath12k_dp_rx_h_is_decrypted(ab, rx_desc); 2376 2377 /* Clear per-MPDU flags while leaving per-PPDU flags intact */ 2378 rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | 2379 RX_FLAG_MMIC_ERROR | 2380 RX_FLAG_DECRYPTED | 2381 RX_FLAG_IV_STRIPPED | 2382 RX_FLAG_MMIC_STRIPPED); 2383 2384 if (err_bitmap & HAL_RX_MPDU_ERR_FCS) 2385 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 2386 if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) 2387 rx_status->flag |= RX_FLAG_MMIC_ERROR; 2388 2389 if (is_decrypted) { 2390 rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED; 2391 2392 if (rx_info->is_mcbc) 2393 rx_status->flag |= RX_FLAG_MIC_STRIPPED | 2394 RX_FLAG_ICV_STRIPPED; 2395 else 2396 rx_status->flag |= RX_FLAG_IV_STRIPPED | 2397 RX_FLAG_PN_VALIDATED; 2398 } 2399 2400 ath12k_dp_rx_h_csum_offload(msdu, rx_info); 2401 ath12k_dp_rx_h_undecap(ar, msdu, rx_desc, 2402 enctype, rx_status, is_decrypted); 2403 2404 if (!is_decrypted || rx_info->is_mcbc) 2405 return; 2406 2407 if (rx_info->decap_type != DP_RX_DECAP_TYPE_ETHERNET2_DIX) { 2408 hdr = (void *)msdu->data; 2409 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 2410 } 2411 } 2412 2413 static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info) 2414 { 2415 struct ieee80211_supported_band *sband; 2416 struct ieee80211_rx_status *rx_status = rx_info->rx_status; 2417 enum rx_msdu_start_pkt_type pkt_type = rx_info->pkt_type; 2418 u8 bw = rx_info->bw, sgi = rx_info->sgi; 2419 u8 rate_mcs = rx_info->rate_mcs, nss = rx_info->nss; 2420 bool is_cck; 2421 2422 switch (pkt_type) { 2423 case RX_MSDU_START_PKT_TYPE_11A: 2424 case RX_MSDU_START_PKT_TYPE_11B: 2425 is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B); 2426 sband = &ar->mac.sbands[rx_status->band]; 2427 rx_status->rate_idx = ath12k_mac_hw_rate_to_idx(sband, rate_mcs, 2428 is_cck); 2429 break; 2430 case RX_MSDU_START_PKT_TYPE_11N: 2431 rx_status->encoding = RX_ENC_HT; 2432 if (rate_mcs > ATH12K_HT_MCS_MAX) { 2433 ath12k_warn(ar->ab, 2434 "Received with invalid mcs in HT mode %d\n", 2435 rate_mcs); 2436 break; 2437 } 2438 rx_status->rate_idx = rate_mcs + (8 * (nss - 1)); 2439 if (sgi) 2440 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 2441 rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw); 2442 break; 2443 case RX_MSDU_START_PKT_TYPE_11AC: 2444 rx_status->encoding = RX_ENC_VHT; 2445 rx_status->rate_idx = rate_mcs; 2446 if (rate_mcs > ATH12K_VHT_MCS_MAX) { 2447 ath12k_warn(ar->ab, 2448 "Received with invalid mcs in VHT mode %d\n", 2449 rate_mcs); 2450 break; 2451 } 2452 rx_status->nss = nss; 2453 if (sgi) 2454 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 2455 rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw); 2456 break; 2457 case RX_MSDU_START_PKT_TYPE_11AX: 2458 rx_status->rate_idx = rate_mcs; 2459 if (rate_mcs > ATH12K_HE_MCS_MAX) { 2460 ath12k_warn(ar->ab, 2461 "Received with invalid mcs in HE mode %d\n", 2462 rate_mcs); 2463 break; 2464 } 2465 rx_status->encoding = RX_ENC_HE; 2466 rx_status->nss = nss; 2467 rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi); 2468 rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw); 2469 break; 2470 case RX_MSDU_START_PKT_TYPE_11BE: 2471 rx_status->rate_idx = rate_mcs; 2472 2473 if (rate_mcs > ATH12K_EHT_MCS_MAX) { 2474 ath12k_warn(ar->ab, 2475 "Received with invalid mcs in EHT mode %d\n", 2476 rate_mcs); 2477 break; 2478 } 2479 2480 rx_status->encoding = RX_ENC_EHT; 2481 rx_status->nss = nss; 2482 rx_status->eht.gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(sgi); 2483 rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw); 2484 break; 2485 default: 2486 break; 2487 } 2488 } 2489 2490 void ath12k_dp_rx_h_fetch_info(struct ath12k_base *ab, struct hal_rx_desc *rx_desc, 2491 struct ath12k_dp_rx_info *rx_info) 2492 { 2493 rx_info->ip_csum_fail = ath12k_dp_rx_h_ip_cksum_fail(ab, rx_desc); 2494 rx_info->l4_csum_fail = ath12k_dp_rx_h_l4_cksum_fail(ab, rx_desc); 2495 rx_info->is_mcbc = ath12k_dp_rx_h_is_da_mcbc(ab, rx_desc); 2496 rx_info->decap_type = ath12k_dp_rx_h_decap_type(ab, rx_desc); 2497 rx_info->pkt_type = ath12k_dp_rx_h_pkt_type(ab, rx_desc); 2498 rx_info->sgi = ath12k_dp_rx_h_sgi(ab, rx_desc); 2499 rx_info->rate_mcs = ath12k_dp_rx_h_rate_mcs(ab, rx_desc); 2500 rx_info->bw = ath12k_dp_rx_h_rx_bw(ab, rx_desc); 2501 rx_info->nss = ath12k_dp_rx_h_nss(ab, rx_desc); 2502 rx_info->tid = ath12k_dp_rx_h_tid(ab, rx_desc); 2503 rx_info->peer_id = ath12k_dp_rx_h_peer_id(ab, rx_desc); 2504 rx_info->phy_meta_data = ath12k_dp_rx_h_freq(ab, rx_desc); 2505 2506 if (ath12k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)) { 2507 ether_addr_copy(rx_info->addr2, 2508 ath12k_dp_rxdesc_get_mpdu_start_addr2(ab, rx_desc)); 2509 rx_info->addr2_present = true; 2510 } 2511 2512 ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "rx_desc: ", 2513 rx_desc, sizeof(*rx_desc)); 2514 } 2515 2516 void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info) 2517 { 2518 struct ieee80211_rx_status *rx_status = rx_info->rx_status; 2519 u8 channel_num; 2520 u32 center_freq, meta_data; 2521 struct ieee80211_channel *channel; 2522 2523 rx_status->freq = 0; 2524 rx_status->rate_idx = 0; 2525 rx_status->nss = 0; 2526 rx_status->encoding = RX_ENC_LEGACY; 2527 rx_status->bw = RATE_INFO_BW_20; 2528 rx_status->enc_flags = 0; 2529 2530 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 2531 2532 meta_data = rx_info->phy_meta_data; 2533 channel_num = meta_data; 2534 center_freq = meta_data >> 16; 2535 2536 if (center_freq >= ATH12K_MIN_6GHZ_FREQ && 2537 center_freq <= ATH12K_MAX_6GHZ_FREQ) { 2538 rx_status->band = NL80211_BAND_6GHZ; 2539 rx_status->freq = center_freq; 2540 } else if (channel_num >= 1 && channel_num <= 14) { 2541 rx_status->band = NL80211_BAND_2GHZ; 2542 } else if (channel_num >= 36 && channel_num <= 173) { 2543 rx_status->band = NL80211_BAND_5GHZ; 2544 } else { 2545 spin_lock_bh(&ar->data_lock); 2546 channel = ar->rx_channel; 2547 if (channel) { 2548 rx_status->band = channel->band; 2549 channel_num = 2550 ieee80211_frequency_to_channel(channel->center_freq); 2551 } 2552 spin_unlock_bh(&ar->data_lock); 2553 } 2554 2555 if (rx_status->band != NL80211_BAND_6GHZ) 2556 rx_status->freq = ieee80211_channel_to_frequency(channel_num, 2557 rx_status->band); 2558 2559 ath12k_dp_rx_h_rate(ar, rx_info); 2560 } 2561 2562 static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi, 2563 struct sk_buff *msdu, 2564 struct ath12k_dp_rx_info *rx_info) 2565 { 2566 struct ath12k_base *ab = ar->ab; 2567 struct ieee80211_rx_status *rx_status; 2568 struct ieee80211_sta *pubsta; 2569 struct ath12k_peer *peer; 2570 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 2571 struct ieee80211_rx_status *status = rx_info->rx_status; 2572 u8 decap = rx_info->decap_type; 2573 bool is_mcbc = rxcb->is_mcbc; 2574 bool is_eapol = rxcb->is_eapol; 2575 2576 spin_lock_bh(&ab->base_lock); 2577 peer = ath12k_dp_rx_h_find_peer(ab, msdu, rx_info); 2578 2579 pubsta = peer ? peer->sta : NULL; 2580 2581 if (pubsta && pubsta->valid_links) { 2582 status->link_valid = 1; 2583 status->link_id = peer->link_id; 2584 } 2585 2586 spin_unlock_bh(&ab->base_lock); 2587 2588 ath12k_dbg(ab, ATH12K_DBG_DATA, 2589 "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", 2590 msdu, 2591 msdu->len, 2592 peer ? peer->addr : NULL, 2593 rxcb->tid, 2594 is_mcbc ? "mcast" : "ucast", 2595 ath12k_dp_rx_h_seq_no(ab, rxcb->rx_desc), 2596 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", 2597 (status->encoding == RX_ENC_HT) ? "ht" : "", 2598 (status->encoding == RX_ENC_VHT) ? "vht" : "", 2599 (status->encoding == RX_ENC_HE) ? "he" : "", 2600 (status->encoding == RX_ENC_EHT) ? "eht" : "", 2601 (status->bw == RATE_INFO_BW_40) ? "40" : "", 2602 (status->bw == RATE_INFO_BW_80) ? "80" : "", 2603 (status->bw == RATE_INFO_BW_160) ? "160" : "", 2604 (status->bw == RATE_INFO_BW_320) ? "320" : "", 2605 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", 2606 status->rate_idx, 2607 status->nss, 2608 status->freq, 2609 status->band, status->flag, 2610 !!(status->flag & RX_FLAG_FAILED_FCS_CRC), 2611 !!(status->flag & RX_FLAG_MMIC_ERROR), 2612 !!(status->flag & RX_FLAG_AMSDU_MORE)); 2613 2614 ath12k_dbg_dump(ab, ATH12K_DBG_DP_RX, NULL, "dp rx msdu: ", 2615 msdu->data, msdu->len); 2616 2617 rx_status = IEEE80211_SKB_RXCB(msdu); 2618 *rx_status = *status; 2619 2620 /* TODO: trace rx packet */ 2621 2622 /* PN for multicast packets are not validate in HW, 2623 * so skip 802.3 rx path 2624 * Also, fast_rx expects the STA to be authorized, hence 2625 * eapol packets are sent in slow path. 2626 */ 2627 if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol && 2628 !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED)) 2629 rx_status->flag |= RX_FLAG_8023; 2630 2631 ieee80211_rx_napi(ath12k_ar_to_hw(ar), pubsta, msdu, napi); 2632 } 2633 2634 static bool ath12k_dp_rx_check_nwifi_hdr_len_valid(struct ath12k_base *ab, 2635 struct hal_rx_desc *rx_desc, 2636 struct sk_buff *msdu) 2637 { 2638 struct ieee80211_hdr *hdr; 2639 u8 decap_type; 2640 u32 hdr_len; 2641 2642 decap_type = ath12k_dp_rx_h_decap_type(ab, rx_desc); 2643 if (decap_type != DP_RX_DECAP_TYPE_NATIVE_WIFI) 2644 return true; 2645 2646 hdr = (struct ieee80211_hdr *)msdu->data; 2647 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2648 2649 if ((likely(hdr_len <= DP_MAX_NWIFI_HDR_LEN))) 2650 return true; 2651 2652 ab->device_stats.invalid_rbm++; 2653 WARN_ON_ONCE(1); 2654 return false; 2655 } 2656 2657 static int ath12k_dp_rx_process_msdu(struct ath12k *ar, 2658 struct sk_buff *msdu, 2659 struct sk_buff_head *msdu_list, 2660 struct ath12k_dp_rx_info *rx_info) 2661 { 2662 struct ath12k_base *ab = ar->ab; 2663 struct hal_rx_desc *rx_desc, *lrx_desc; 2664 struct ath12k_skb_rxcb *rxcb; 2665 struct sk_buff *last_buf; 2666 u8 l3_pad_bytes; 2667 u16 msdu_len; 2668 int ret; 2669 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 2670 2671 last_buf = ath12k_dp_rx_get_msdu_last_buf(msdu_list, msdu); 2672 if (!last_buf) { 2673 ath12k_warn(ab, 2674 "No valid Rx buffer to access MSDU_END tlv\n"); 2675 ret = -EIO; 2676 goto free_out; 2677 } 2678 2679 rx_desc = (struct hal_rx_desc *)msdu->data; 2680 lrx_desc = (struct hal_rx_desc *)last_buf->data; 2681 if (!ath12k_dp_rx_h_msdu_done(ab, lrx_desc)) { 2682 ath12k_warn(ab, "msdu_done bit in msdu_end is not set\n"); 2683 ret = -EIO; 2684 goto free_out; 2685 } 2686 2687 rxcb = ATH12K_SKB_RXCB(msdu); 2688 rxcb->rx_desc = rx_desc; 2689 msdu_len = ath12k_dp_rx_h_msdu_len(ab, lrx_desc); 2690 l3_pad_bytes = ath12k_dp_rx_h_l3pad(ab, lrx_desc); 2691 2692 if (rxcb->is_frag) { 2693 skb_pull(msdu, hal_rx_desc_sz); 2694 } else if (!rxcb->is_continuation) { 2695 if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) { 2696 ret = -EINVAL; 2697 ath12k_warn(ab, "invalid msdu len %u\n", msdu_len); 2698 ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", rx_desc, 2699 sizeof(*rx_desc)); 2700 goto free_out; 2701 } 2702 skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len); 2703 skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes); 2704 } else { 2705 ret = ath12k_dp_rx_msdu_coalesce(ar, msdu_list, 2706 msdu, last_buf, 2707 l3_pad_bytes, msdu_len); 2708 if (ret) { 2709 ath12k_warn(ab, 2710 "failed to coalesce msdu rx buffer%d\n", ret); 2711 goto free_out; 2712 } 2713 } 2714 2715 if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, rx_desc, msdu))) { 2716 ret = -EINVAL; 2717 goto free_out; 2718 } 2719 2720 ath12k_dp_rx_h_fetch_info(ab, rx_desc, rx_info); 2721 ath12k_dp_rx_h_ppdu(ar, rx_info); 2722 ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_info); 2723 2724 rx_info->rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED; 2725 2726 return 0; 2727 2728 free_out: 2729 return ret; 2730 } 2731 2732 static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab, 2733 struct napi_struct *napi, 2734 struct sk_buff_head *msdu_list, 2735 int ring_id) 2736 { 2737 struct ath12k_hw_group *ag = ab->ag; 2738 struct ieee80211_rx_status rx_status = {}; 2739 struct ath12k_skb_rxcb *rxcb; 2740 struct sk_buff *msdu; 2741 struct ath12k *ar; 2742 struct ath12k_hw_link *hw_links = ag->hw_links; 2743 struct ath12k_base *partner_ab; 2744 struct ath12k_dp_rx_info rx_info; 2745 u8 hw_link_id, pdev_id; 2746 int ret; 2747 2748 if (skb_queue_empty(msdu_list)) 2749 return; 2750 2751 rx_info.addr2_present = false; 2752 rx_info.rx_status = &rx_status; 2753 2754 rcu_read_lock(); 2755 2756 while ((msdu = __skb_dequeue(msdu_list))) { 2757 rxcb = ATH12K_SKB_RXCB(msdu); 2758 hw_link_id = rxcb->hw_link_id; 2759 partner_ab = ath12k_ag_to_ab(ag, 2760 hw_links[hw_link_id].device_id); 2761 pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params, 2762 hw_links[hw_link_id].pdev_idx); 2763 ar = partner_ab->pdevs[pdev_id].ar; 2764 if (!rcu_dereference(partner_ab->pdevs_active[pdev_id])) { 2765 dev_kfree_skb_any(msdu); 2766 continue; 2767 } 2768 2769 if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) { 2770 dev_kfree_skb_any(msdu); 2771 continue; 2772 } 2773 2774 ret = ath12k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_info); 2775 if (ret) { 2776 ath12k_dbg(ab, ATH12K_DBG_DATA, 2777 "Unable to process msdu %d", ret); 2778 dev_kfree_skb_any(msdu); 2779 continue; 2780 } 2781 2782 ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_info); 2783 } 2784 2785 rcu_read_unlock(); 2786 } 2787 2788 static u16 ath12k_dp_rx_get_peer_id(struct ath12k_base *ab, 2789 enum ath12k_peer_metadata_version ver, 2790 __le32 peer_metadata) 2791 { 2792 switch (ver) { 2793 default: 2794 ath12k_warn(ab, "Unknown peer metadata version: %d", ver); 2795 fallthrough; 2796 case ATH12K_PEER_METADATA_V0: 2797 return le32_get_bits(peer_metadata, 2798 RX_MPDU_DESC_META_DATA_V0_PEER_ID); 2799 case ATH12K_PEER_METADATA_V1: 2800 return le32_get_bits(peer_metadata, 2801 RX_MPDU_DESC_META_DATA_V1_PEER_ID); 2802 case ATH12K_PEER_METADATA_V1A: 2803 return le32_get_bits(peer_metadata, 2804 RX_MPDU_DESC_META_DATA_V1A_PEER_ID); 2805 case ATH12K_PEER_METADATA_V1B: 2806 return le32_get_bits(peer_metadata, 2807 RX_MPDU_DESC_META_DATA_V1B_PEER_ID); 2808 } 2809 } 2810 2811 int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id, 2812 struct napi_struct *napi, int budget) 2813 { 2814 struct ath12k_hw_group *ag = ab->ag; 2815 struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES]; 2816 struct ath12k_hw_link *hw_links = ag->hw_links; 2817 int num_buffs_reaped[ATH12K_MAX_DEVICES] = {}; 2818 struct ath12k_rx_desc_info *desc_info; 2819 struct ath12k_dp *dp = &ab->dp; 2820 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 2821 struct hal_reo_dest_ring *desc; 2822 struct ath12k_base *partner_ab; 2823 struct sk_buff_head msdu_list; 2824 struct ath12k_skb_rxcb *rxcb; 2825 int total_msdu_reaped = 0; 2826 u8 hw_link_id, device_id; 2827 struct hal_srng *srng; 2828 struct sk_buff *msdu; 2829 bool done = false; 2830 u64 desc_va; 2831 2832 __skb_queue_head_init(&msdu_list); 2833 2834 for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) 2835 INIT_LIST_HEAD(&rx_desc_used_list[device_id]); 2836 2837 srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id]; 2838 2839 spin_lock_bh(&srng->lock); 2840 2841 try_again: 2842 ath12k_hal_srng_access_begin(ab, srng); 2843 2844 while ((desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) { 2845 struct rx_mpdu_desc *mpdu_info; 2846 struct rx_msdu_desc *msdu_info; 2847 enum hal_reo_dest_ring_push_reason push_reason; 2848 u32 cookie; 2849 2850 cookie = le32_get_bits(desc->buf_addr_info.info1, 2851 BUFFER_ADDR_INFO1_SW_COOKIE); 2852 2853 hw_link_id = le32_get_bits(desc->info0, 2854 HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); 2855 2856 desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 | 2857 le32_to_cpu(desc->buf_va_lo)); 2858 desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va); 2859 2860 device_id = hw_links[hw_link_id].device_id; 2861 partner_ab = ath12k_ag_to_ab(ag, device_id); 2862 if (unlikely(!partner_ab)) { 2863 if (desc_info->skb) { 2864 dev_kfree_skb_any(desc_info->skb); 2865 desc_info->skb = NULL; 2866 } 2867 2868 continue; 2869 } 2870 2871 /* retry manual desc retrieval */ 2872 if (!desc_info) { 2873 desc_info = ath12k_dp_get_rx_desc(partner_ab, cookie); 2874 if (!desc_info) { 2875 ath12k_warn(partner_ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n", 2876 cookie); 2877 continue; 2878 } 2879 } 2880 2881 if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC) 2882 ath12k_warn(ab, "Check HW CC implementation"); 2883 2884 msdu = desc_info->skb; 2885 desc_info->skb = NULL; 2886 2887 list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]); 2888 2889 rxcb = ATH12K_SKB_RXCB(msdu); 2890 dma_unmap_single(partner_ab->dev, rxcb->paddr, 2891 msdu->len + skb_tailroom(msdu), 2892 DMA_FROM_DEVICE); 2893 2894 num_buffs_reaped[device_id]++; 2895 ab->device_stats.reo_rx[ring_id][ab->device_id]++; 2896 2897 push_reason = le32_get_bits(desc->info0, 2898 HAL_REO_DEST_RING_INFO0_PUSH_REASON); 2899 if (push_reason != 2900 HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) { 2901 dev_kfree_skb_any(msdu); 2902 ab->device_stats.hal_reo_error[ring_id]++; 2903 continue; 2904 } 2905 2906 msdu_info = &desc->rx_msdu_info; 2907 mpdu_info = &desc->rx_mpdu_info; 2908 2909 rxcb->is_first_msdu = !!(le32_to_cpu(msdu_info->info0) & 2910 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU); 2911 rxcb->is_last_msdu = !!(le32_to_cpu(msdu_info->info0) & 2912 RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU); 2913 rxcb->is_continuation = !!(le32_to_cpu(msdu_info->info0) & 2914 RX_MSDU_DESC_INFO0_MSDU_CONTINUATION); 2915 rxcb->hw_link_id = hw_link_id; 2916 rxcb->peer_id = ath12k_dp_rx_get_peer_id(ab, dp->peer_metadata_ver, 2917 mpdu_info->peer_meta_data); 2918 rxcb->tid = le32_get_bits(mpdu_info->info0, 2919 RX_MPDU_DESC_INFO0_TID); 2920 2921 __skb_queue_tail(&msdu_list, msdu); 2922 2923 if (!rxcb->is_continuation) { 2924 total_msdu_reaped++; 2925 done = true; 2926 } else { 2927 done = false; 2928 } 2929 2930 if (total_msdu_reaped >= budget) 2931 break; 2932 } 2933 2934 /* Hw might have updated the head pointer after we cached it. 2935 * In this case, even though there are entries in the ring we'll 2936 * get rx_desc NULL. Give the read another try with updated cached 2937 * head pointer so that we can reap complete MPDU in the current 2938 * rx processing. 2939 */ 2940 if (!done && ath12k_hal_srng_dst_num_free(ab, srng, true)) { 2941 ath12k_hal_srng_access_end(ab, srng); 2942 goto try_again; 2943 } 2944 2945 ath12k_hal_srng_access_end(ab, srng); 2946 2947 spin_unlock_bh(&srng->lock); 2948 2949 if (!total_msdu_reaped) 2950 goto exit; 2951 2952 for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) { 2953 if (!num_buffs_reaped[device_id]) 2954 continue; 2955 2956 partner_ab = ath12k_ag_to_ab(ag, device_id); 2957 rx_ring = &partner_ab->dp.rx_refill_buf_ring; 2958 2959 ath12k_dp_rx_bufs_replenish(partner_ab, rx_ring, 2960 &rx_desc_used_list[device_id], 2961 num_buffs_reaped[device_id]); 2962 } 2963 2964 ath12k_dp_rx_process_received_packets(ab, napi, &msdu_list, 2965 ring_id); 2966 2967 exit: 2968 return total_msdu_reaped; 2969 } 2970 2971 static void ath12k_dp_rx_frag_timer(struct timer_list *timer) 2972 { 2973 struct ath12k_dp_rx_tid *rx_tid = timer_container_of(rx_tid, timer, 2974 frag_timer); 2975 2976 spin_lock_bh(&rx_tid->ab->base_lock); 2977 if (rx_tid->last_frag_no && 2978 rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) { 2979 spin_unlock_bh(&rx_tid->ab->base_lock); 2980 return; 2981 } 2982 ath12k_dp_rx_frags_cleanup(rx_tid, true); 2983 spin_unlock_bh(&rx_tid->ab->base_lock); 2984 } 2985 2986 int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id) 2987 { 2988 struct ath12k_base *ab = ar->ab; 2989 struct crypto_shash *tfm; 2990 struct ath12k_peer *peer; 2991 struct ath12k_dp_rx_tid *rx_tid; 2992 int i; 2993 2994 tfm = crypto_alloc_shash("michael_mic", 0, 0); 2995 if (IS_ERR(tfm)) 2996 return PTR_ERR(tfm); 2997 2998 spin_lock_bh(&ab->base_lock); 2999 3000 peer = ath12k_peer_find(ab, vdev_id, peer_mac); 3001 if (!peer) { 3002 spin_unlock_bh(&ab->base_lock); 3003 crypto_free_shash(tfm); 3004 ath12k_warn(ab, "failed to find the peer to set up fragment info\n"); 3005 return -ENOENT; 3006 } 3007 3008 if (!peer->primary_link) { 3009 spin_unlock_bh(&ab->base_lock); 3010 crypto_free_shash(tfm); 3011 return 0; 3012 } 3013 3014 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 3015 rx_tid = &peer->rx_tid[i]; 3016 rx_tid->ab = ab; 3017 timer_setup(&rx_tid->frag_timer, ath12k_dp_rx_frag_timer, 0); 3018 skb_queue_head_init(&rx_tid->rx_frags); 3019 } 3020 3021 peer->tfm_mmic = tfm; 3022 peer->dp_setup_done = true; 3023 spin_unlock_bh(&ab->base_lock); 3024 3025 return 0; 3026 } 3027 3028 static int ath12k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key, 3029 struct ieee80211_hdr *hdr, u8 *data, 3030 size_t data_len, u8 *mic) 3031 { 3032 SHASH_DESC_ON_STACK(desc, tfm); 3033 u8 mic_hdr[16] = {}; 3034 u8 tid = 0; 3035 int ret; 3036 3037 if (!tfm) 3038 return -EINVAL; 3039 3040 desc->tfm = tfm; 3041 3042 ret = crypto_shash_setkey(tfm, key, 8); 3043 if (ret) 3044 goto out; 3045 3046 ret = crypto_shash_init(desc); 3047 if (ret) 3048 goto out; 3049 3050 /* TKIP MIC header */ 3051 memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN); 3052 memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN); 3053 if (ieee80211_is_data_qos(hdr->frame_control)) 3054 tid = ieee80211_get_tid(hdr); 3055 mic_hdr[12] = tid; 3056 3057 ret = crypto_shash_update(desc, mic_hdr, 16); 3058 if (ret) 3059 goto out; 3060 ret = crypto_shash_update(desc, data, data_len); 3061 if (ret) 3062 goto out; 3063 ret = crypto_shash_final(desc, mic); 3064 out: 3065 shash_desc_zero(desc); 3066 return ret; 3067 } 3068 3069 static int ath12k_dp_rx_h_verify_tkip_mic(struct ath12k *ar, struct ath12k_peer *peer, 3070 struct sk_buff *msdu) 3071 { 3072 struct ath12k_base *ab = ar->ab; 3073 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data; 3074 struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu); 3075 struct ieee80211_key_conf *key_conf; 3076 struct ieee80211_hdr *hdr; 3077 struct ath12k_dp_rx_info rx_info; 3078 u8 mic[IEEE80211_CCMP_MIC_LEN]; 3079 int head_len, tail_len, ret; 3080 size_t data_len; 3081 u32 hdr_len, hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 3082 u8 *key, *data; 3083 u8 key_idx; 3084 3085 if (ath12k_dp_rx_h_enctype(ab, rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC) 3086 return 0; 3087 3088 rx_info.addr2_present = false; 3089 rx_info.rx_status = rxs; 3090 3091 hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz); 3092 hdr_len = ieee80211_hdrlen(hdr->frame_control); 3093 head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN; 3094 tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN; 3095 3096 if (!is_multicast_ether_addr(hdr->addr1)) 3097 key_idx = peer->ucast_keyidx; 3098 else 3099 key_idx = peer->mcast_keyidx; 3100 3101 key_conf = peer->keys[key_idx]; 3102 3103 data = msdu->data + head_len; 3104 data_len = msdu->len - head_len - tail_len; 3105 key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; 3106 3107 ret = ath12k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic); 3108 if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN)) 3109 goto mic_fail; 3110 3111 return 0; 3112 3113 mic_fail: 3114 (ATH12K_SKB_RXCB(msdu))->is_first_msdu = true; 3115 (ATH12K_SKB_RXCB(msdu))->is_last_msdu = true; 3116 3117 ath12k_dp_rx_h_fetch_info(ab, rx_desc, &rx_info); 3118 3119 rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED | 3120 RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED; 3121 skb_pull(msdu, hal_rx_desc_sz); 3122 3123 if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, rx_desc, msdu))) 3124 return -EINVAL; 3125 3126 ath12k_dp_rx_h_ppdu(ar, &rx_info); 3127 ath12k_dp_rx_h_undecap(ar, msdu, rx_desc, 3128 HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true); 3129 ieee80211_rx(ath12k_ar_to_hw(ar), msdu); 3130 return -EINVAL; 3131 } 3132 3133 static void ath12k_dp_rx_h_undecap_frag(struct ath12k *ar, struct sk_buff *msdu, 3134 enum hal_encrypt_type enctype, u32 flags) 3135 { 3136 struct ieee80211_hdr *hdr; 3137 size_t hdr_len; 3138 size_t crypto_len; 3139 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 3140 3141 if (!flags) 3142 return; 3143 3144 hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz); 3145 3146 if (flags & RX_FLAG_MIC_STRIPPED) 3147 skb_trim(msdu, msdu->len - 3148 ath12k_dp_rx_crypto_mic_len(ar, enctype)); 3149 3150 if (flags & RX_FLAG_ICV_STRIPPED) 3151 skb_trim(msdu, msdu->len - 3152 ath12k_dp_rx_crypto_icv_len(ar, enctype)); 3153 3154 if (flags & RX_FLAG_IV_STRIPPED) { 3155 hdr_len = ieee80211_hdrlen(hdr->frame_control); 3156 crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype); 3157 3158 memmove(msdu->data + hal_rx_desc_sz + crypto_len, 3159 msdu->data + hal_rx_desc_sz, hdr_len); 3160 skb_pull(msdu, crypto_len); 3161 } 3162 } 3163 3164 static int ath12k_dp_rx_h_defrag(struct ath12k *ar, 3165 struct ath12k_peer *peer, 3166 struct ath12k_dp_rx_tid *rx_tid, 3167 struct sk_buff **defrag_skb) 3168 { 3169 struct ath12k_base *ab = ar->ab; 3170 struct hal_rx_desc *rx_desc; 3171 struct sk_buff *skb, *first_frag, *last_frag; 3172 struct ieee80211_hdr *hdr; 3173 enum hal_encrypt_type enctype; 3174 bool is_decrypted = false; 3175 int msdu_len = 0; 3176 int extra_space; 3177 u32 flags, hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 3178 3179 first_frag = skb_peek(&rx_tid->rx_frags); 3180 last_frag = skb_peek_tail(&rx_tid->rx_frags); 3181 3182 skb_queue_walk(&rx_tid->rx_frags, skb) { 3183 flags = 0; 3184 rx_desc = (struct hal_rx_desc *)skb->data; 3185 hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz); 3186 3187 enctype = ath12k_dp_rx_h_enctype(ab, rx_desc); 3188 if (enctype != HAL_ENCRYPT_TYPE_OPEN) 3189 is_decrypted = ath12k_dp_rx_h_is_decrypted(ab, 3190 rx_desc); 3191 3192 if (is_decrypted) { 3193 if (skb != first_frag) 3194 flags |= RX_FLAG_IV_STRIPPED; 3195 if (skb != last_frag) 3196 flags |= RX_FLAG_ICV_STRIPPED | 3197 RX_FLAG_MIC_STRIPPED; 3198 } 3199 3200 /* RX fragments are always raw packets */ 3201 if (skb != last_frag) 3202 skb_trim(skb, skb->len - FCS_LEN); 3203 ath12k_dp_rx_h_undecap_frag(ar, skb, enctype, flags); 3204 3205 if (skb != first_frag) 3206 skb_pull(skb, hal_rx_desc_sz + 3207 ieee80211_hdrlen(hdr->frame_control)); 3208 msdu_len += skb->len; 3209 } 3210 3211 extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag)); 3212 if (extra_space > 0 && 3213 (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0)) 3214 return -ENOMEM; 3215 3216 __skb_unlink(first_frag, &rx_tid->rx_frags); 3217 while ((skb = __skb_dequeue(&rx_tid->rx_frags))) { 3218 skb_put_data(first_frag, skb->data, skb->len); 3219 dev_kfree_skb_any(skb); 3220 } 3221 3222 hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz); 3223 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); 3224 ATH12K_SKB_RXCB(first_frag)->is_frag = 1; 3225 3226 if (ath12k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag)) 3227 first_frag = NULL; 3228 3229 *defrag_skb = first_frag; 3230 return 0; 3231 } 3232 3233 static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar, 3234 struct ath12k_dp_rx_tid *rx_tid, 3235 struct sk_buff *defrag_skb) 3236 { 3237 struct ath12k_base *ab = ar->ab; 3238 struct ath12k_dp *dp = &ab->dp; 3239 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data; 3240 struct hal_reo_entrance_ring *reo_ent_ring; 3241 struct hal_reo_dest_ring *reo_dest_ring; 3242 struct dp_link_desc_bank *link_desc_banks; 3243 struct hal_rx_msdu_link *msdu_link; 3244 struct hal_rx_msdu_details *msdu0; 3245 struct hal_srng *srng; 3246 dma_addr_t link_paddr, buf_paddr; 3247 u32 desc_bank, msdu_info, msdu_ext_info, mpdu_info; 3248 u32 cookie, hal_rx_desc_sz, dest_ring_info0, queue_addr_hi; 3249 int ret; 3250 struct ath12k_rx_desc_info *desc_info; 3251 enum hal_rx_buf_return_buf_manager idle_link_rbm = dp->idle_link_rbm; 3252 u8 dst_ind; 3253 3254 hal_rx_desc_sz = ab->hal.hal_desc_sz; 3255 link_desc_banks = dp->link_desc_banks; 3256 reo_dest_ring = rx_tid->dst_ring_desc; 3257 3258 ath12k_hal_rx_reo_ent_paddr_get(ab, &reo_dest_ring->buf_addr_info, 3259 &link_paddr, &cookie); 3260 desc_bank = u32_get_bits(cookie, DP_LINK_DESC_BANK_MASK); 3261 3262 msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr + 3263 (link_paddr - link_desc_banks[desc_bank].paddr)); 3264 msdu0 = &msdu_link->msdu_link[0]; 3265 msdu_ext_info = le32_to_cpu(msdu0->rx_msdu_ext_info.info0); 3266 dst_ind = u32_get_bits(msdu_ext_info, RX_MSDU_EXT_DESC_INFO0_REO_DEST_IND); 3267 3268 memset(msdu0, 0, sizeof(*msdu0)); 3269 3270 msdu_info = u32_encode_bits(1, RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU) | 3271 u32_encode_bits(1, RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU) | 3272 u32_encode_bits(0, RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) | 3273 u32_encode_bits(defrag_skb->len - hal_rx_desc_sz, 3274 RX_MSDU_DESC_INFO0_MSDU_LENGTH) | 3275 u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_SA) | 3276 u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_DA); 3277 msdu0->rx_msdu_info.info0 = cpu_to_le32(msdu_info); 3278 msdu0->rx_msdu_ext_info.info0 = cpu_to_le32(msdu_ext_info); 3279 3280 /* change msdu len in hal rx desc */ 3281 ath12k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz); 3282 3283 buf_paddr = dma_map_single(ab->dev, defrag_skb->data, 3284 defrag_skb->len + skb_tailroom(defrag_skb), 3285 DMA_TO_DEVICE); 3286 if (dma_mapping_error(ab->dev, buf_paddr)) 3287 return -ENOMEM; 3288 3289 spin_lock_bh(&dp->rx_desc_lock); 3290 desc_info = list_first_entry_or_null(&dp->rx_desc_free_list, 3291 struct ath12k_rx_desc_info, 3292 list); 3293 if (!desc_info) { 3294 spin_unlock_bh(&dp->rx_desc_lock); 3295 ath12k_warn(ab, "failed to find rx desc for reinject\n"); 3296 ret = -ENOMEM; 3297 goto err_unmap_dma; 3298 } 3299 3300 desc_info->skb = defrag_skb; 3301 desc_info->in_use = true; 3302 3303 list_del(&desc_info->list); 3304 spin_unlock_bh(&dp->rx_desc_lock); 3305 3306 ATH12K_SKB_RXCB(defrag_skb)->paddr = buf_paddr; 3307 3308 ath12k_hal_rx_buf_addr_info_set(&msdu0->buf_addr_info, buf_paddr, 3309 desc_info->cookie, 3310 HAL_RX_BUF_RBM_SW3_BM); 3311 3312 /* Fill mpdu details into reo entrance ring */ 3313 srng = &ab->hal.srng_list[dp->reo_reinject_ring.ring_id]; 3314 3315 spin_lock_bh(&srng->lock); 3316 ath12k_hal_srng_access_begin(ab, srng); 3317 3318 reo_ent_ring = ath12k_hal_srng_src_get_next_entry(ab, srng); 3319 if (!reo_ent_ring) { 3320 ath12k_hal_srng_access_end(ab, srng); 3321 spin_unlock_bh(&srng->lock); 3322 ret = -ENOSPC; 3323 goto err_free_desc; 3324 } 3325 memset(reo_ent_ring, 0, sizeof(*reo_ent_ring)); 3326 3327 ath12k_hal_rx_buf_addr_info_set(&reo_ent_ring->buf_addr_info, link_paddr, 3328 cookie, 3329 idle_link_rbm); 3330 3331 mpdu_info = u32_encode_bits(1, RX_MPDU_DESC_INFO0_MSDU_COUNT) | 3332 u32_encode_bits(0, RX_MPDU_DESC_INFO0_FRAG_FLAG) | 3333 u32_encode_bits(1, RX_MPDU_DESC_INFO0_RAW_MPDU) | 3334 u32_encode_bits(1, RX_MPDU_DESC_INFO0_VALID_PN) | 3335 u32_encode_bits(rx_tid->tid, RX_MPDU_DESC_INFO0_TID); 3336 3337 reo_ent_ring->rx_mpdu_info.info0 = cpu_to_le32(mpdu_info); 3338 reo_ent_ring->rx_mpdu_info.peer_meta_data = 3339 reo_dest_ring->rx_mpdu_info.peer_meta_data; 3340 3341 if (ab->hw_params->reoq_lut_support) { 3342 reo_ent_ring->queue_addr_lo = reo_dest_ring->rx_mpdu_info.peer_meta_data; 3343 queue_addr_hi = 0; 3344 } else { 3345 reo_ent_ring->queue_addr_lo = 3346 cpu_to_le32(lower_32_bits(rx_tid->qbuf.paddr_aligned)); 3347 queue_addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); 3348 } 3349 3350 reo_ent_ring->info0 = le32_encode_bits(queue_addr_hi, 3351 HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI) | 3352 le32_encode_bits(dst_ind, 3353 HAL_REO_ENTR_RING_INFO0_DEST_IND); 3354 3355 reo_ent_ring->info1 = le32_encode_bits(rx_tid->cur_sn, 3356 HAL_REO_ENTR_RING_INFO1_MPDU_SEQ_NUM); 3357 dest_ring_info0 = le32_get_bits(reo_dest_ring->info0, 3358 HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); 3359 reo_ent_ring->info2 = 3360 cpu_to_le32(u32_get_bits(dest_ring_info0, 3361 HAL_REO_ENTR_RING_INFO2_SRC_LINK_ID)); 3362 3363 ath12k_hal_srng_access_end(ab, srng); 3364 spin_unlock_bh(&srng->lock); 3365 3366 return 0; 3367 3368 err_free_desc: 3369 spin_lock_bh(&dp->rx_desc_lock); 3370 desc_info->in_use = false; 3371 desc_info->skb = NULL; 3372 list_add_tail(&desc_info->list, &dp->rx_desc_free_list); 3373 spin_unlock_bh(&dp->rx_desc_lock); 3374 err_unmap_dma: 3375 dma_unmap_single(ab->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb), 3376 DMA_TO_DEVICE); 3377 return ret; 3378 } 3379 3380 static int ath12k_dp_rx_h_cmp_frags(struct ath12k_base *ab, 3381 struct sk_buff *a, struct sk_buff *b) 3382 { 3383 int frag1, frag2; 3384 3385 frag1 = ath12k_dp_rx_h_frag_no(ab, a); 3386 frag2 = ath12k_dp_rx_h_frag_no(ab, b); 3387 3388 return frag1 - frag2; 3389 } 3390 3391 static void ath12k_dp_rx_h_sort_frags(struct ath12k_base *ab, 3392 struct sk_buff_head *frag_list, 3393 struct sk_buff *cur_frag) 3394 { 3395 struct sk_buff *skb; 3396 int cmp; 3397 3398 skb_queue_walk(frag_list, skb) { 3399 cmp = ath12k_dp_rx_h_cmp_frags(ab, skb, cur_frag); 3400 if (cmp < 0) 3401 continue; 3402 __skb_queue_before(frag_list, skb, cur_frag); 3403 return; 3404 } 3405 __skb_queue_tail(frag_list, cur_frag); 3406 } 3407 3408 static u64 ath12k_dp_rx_h_get_pn(struct ath12k *ar, struct sk_buff *skb) 3409 { 3410 struct ieee80211_hdr *hdr; 3411 u64 pn = 0; 3412 u8 *ehdr; 3413 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 3414 3415 hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz); 3416 ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control); 3417 3418 pn = ehdr[0]; 3419 pn |= (u64)ehdr[1] << 8; 3420 pn |= (u64)ehdr[4] << 16; 3421 pn |= (u64)ehdr[5] << 24; 3422 pn |= (u64)ehdr[6] << 32; 3423 pn |= (u64)ehdr[7] << 40; 3424 3425 return pn; 3426 } 3427 3428 static bool 3429 ath12k_dp_rx_h_defrag_validate_incr_pn(struct ath12k *ar, struct ath12k_dp_rx_tid *rx_tid) 3430 { 3431 struct ath12k_base *ab = ar->ab; 3432 enum hal_encrypt_type encrypt_type; 3433 struct sk_buff *first_frag, *skb; 3434 struct hal_rx_desc *desc; 3435 u64 last_pn; 3436 u64 cur_pn; 3437 3438 first_frag = skb_peek(&rx_tid->rx_frags); 3439 desc = (struct hal_rx_desc *)first_frag->data; 3440 3441 encrypt_type = ath12k_dp_rx_h_enctype(ab, desc); 3442 if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 && 3443 encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 && 3444 encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 && 3445 encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256) 3446 return true; 3447 3448 last_pn = ath12k_dp_rx_h_get_pn(ar, first_frag); 3449 skb_queue_walk(&rx_tid->rx_frags, skb) { 3450 if (skb == first_frag) 3451 continue; 3452 3453 cur_pn = ath12k_dp_rx_h_get_pn(ar, skb); 3454 if (cur_pn != last_pn + 1) 3455 return false; 3456 last_pn = cur_pn; 3457 } 3458 return true; 3459 } 3460 3461 static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar, 3462 struct sk_buff *msdu, 3463 struct hal_reo_dest_ring *ring_desc) 3464 { 3465 struct ath12k_base *ab = ar->ab; 3466 struct hal_rx_desc *rx_desc; 3467 struct ath12k_peer *peer; 3468 struct ath12k_dp_rx_tid *rx_tid; 3469 struct sk_buff *defrag_skb = NULL; 3470 u32 peer_id; 3471 u16 seqno, frag_no; 3472 u8 tid; 3473 int ret = 0; 3474 bool more_frags; 3475 3476 rx_desc = (struct hal_rx_desc *)msdu->data; 3477 peer_id = ath12k_dp_rx_h_peer_id(ab, rx_desc); 3478 tid = ath12k_dp_rx_h_tid(ab, rx_desc); 3479 seqno = ath12k_dp_rx_h_seq_no(ab, rx_desc); 3480 frag_no = ath12k_dp_rx_h_frag_no(ab, msdu); 3481 more_frags = ath12k_dp_rx_h_more_frags(ab, msdu); 3482 3483 if (!ath12k_dp_rx_h_seq_ctrl_valid(ab, rx_desc) || 3484 !ath12k_dp_rx_h_fc_valid(ab, rx_desc) || 3485 tid > IEEE80211_NUM_TIDS) 3486 return -EINVAL; 3487 3488 /* received unfragmented packet in reo 3489 * exception ring, this shouldn't happen 3490 * as these packets typically come from 3491 * reo2sw srngs. 3492 */ 3493 if (WARN_ON_ONCE(!frag_no && !more_frags)) 3494 return -EINVAL; 3495 3496 spin_lock_bh(&ab->base_lock); 3497 peer = ath12k_peer_find_by_id(ab, peer_id); 3498 if (!peer) { 3499 ath12k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n", 3500 peer_id); 3501 ret = -ENOENT; 3502 goto out_unlock; 3503 } 3504 3505 if (!peer->dp_setup_done) { 3506 ath12k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n", 3507 peer->addr, peer_id); 3508 ret = -ENOENT; 3509 goto out_unlock; 3510 } 3511 3512 rx_tid = &peer->rx_tid[tid]; 3513 3514 if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) || 3515 skb_queue_empty(&rx_tid->rx_frags)) { 3516 /* Flush stored fragments and start a new sequence */ 3517 ath12k_dp_rx_frags_cleanup(rx_tid, true); 3518 rx_tid->cur_sn = seqno; 3519 } 3520 3521 if (rx_tid->rx_frag_bitmap & BIT(frag_no)) { 3522 /* Fragment already present */ 3523 ret = -EINVAL; 3524 goto out_unlock; 3525 } 3526 3527 if ((!rx_tid->rx_frag_bitmap || frag_no > __fls(rx_tid->rx_frag_bitmap))) 3528 __skb_queue_tail(&rx_tid->rx_frags, msdu); 3529 else 3530 ath12k_dp_rx_h_sort_frags(ab, &rx_tid->rx_frags, msdu); 3531 3532 rx_tid->rx_frag_bitmap |= BIT(frag_no); 3533 if (!more_frags) 3534 rx_tid->last_frag_no = frag_no; 3535 3536 if (frag_no == 0) { 3537 rx_tid->dst_ring_desc = kmemdup(ring_desc, 3538 sizeof(*rx_tid->dst_ring_desc), 3539 GFP_ATOMIC); 3540 if (!rx_tid->dst_ring_desc) { 3541 ret = -ENOMEM; 3542 goto out_unlock; 3543 } 3544 } else { 3545 ath12k_dp_rx_link_desc_return(ab, &ring_desc->buf_addr_info, 3546 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3547 } 3548 3549 if (!rx_tid->last_frag_no || 3550 rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) { 3551 mod_timer(&rx_tid->frag_timer, jiffies + 3552 ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS); 3553 goto out_unlock; 3554 } 3555 3556 spin_unlock_bh(&ab->base_lock); 3557 timer_delete_sync(&rx_tid->frag_timer); 3558 spin_lock_bh(&ab->base_lock); 3559 3560 peer = ath12k_peer_find_by_id(ab, peer_id); 3561 if (!peer) 3562 goto err_frags_cleanup; 3563 3564 if (!ath12k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid)) 3565 goto err_frags_cleanup; 3566 3567 if (ath12k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb)) 3568 goto err_frags_cleanup; 3569 3570 if (!defrag_skb) 3571 goto err_frags_cleanup; 3572 3573 if (ath12k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb)) 3574 goto err_frags_cleanup; 3575 3576 ath12k_dp_rx_frags_cleanup(rx_tid, false); 3577 goto out_unlock; 3578 3579 err_frags_cleanup: 3580 dev_kfree_skb_any(defrag_skb); 3581 ath12k_dp_rx_frags_cleanup(rx_tid, true); 3582 out_unlock: 3583 spin_unlock_bh(&ab->base_lock); 3584 return ret; 3585 } 3586 3587 static int 3588 ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc, 3589 struct list_head *used_list, 3590 bool drop, u32 cookie) 3591 { 3592 struct ath12k_base *ab = ar->ab; 3593 struct sk_buff *msdu; 3594 struct ath12k_skb_rxcb *rxcb; 3595 struct hal_rx_desc *rx_desc; 3596 u16 msdu_len; 3597 u32 hal_rx_desc_sz = ab->hal.hal_desc_sz; 3598 struct ath12k_rx_desc_info *desc_info; 3599 u64 desc_va; 3600 3601 desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 | 3602 le32_to_cpu(desc->buf_va_lo)); 3603 desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va); 3604 3605 /* retry manual desc retrieval */ 3606 if (!desc_info) { 3607 desc_info = ath12k_dp_get_rx_desc(ab, cookie); 3608 if (!desc_info) { 3609 ath12k_warn(ab, "Invalid cookie in DP rx error descriptor retrieval: 0x%x\n", 3610 cookie); 3611 return -EINVAL; 3612 } 3613 } 3614 3615 if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC) 3616 ath12k_warn(ab, " RX Exception, Check HW CC implementation"); 3617 3618 msdu = desc_info->skb; 3619 desc_info->skb = NULL; 3620 3621 list_add_tail(&desc_info->list, used_list); 3622 3623 rxcb = ATH12K_SKB_RXCB(msdu); 3624 dma_unmap_single(ar->ab->dev, rxcb->paddr, 3625 msdu->len + skb_tailroom(msdu), 3626 DMA_FROM_DEVICE); 3627 3628 if (drop) { 3629 dev_kfree_skb_any(msdu); 3630 return 0; 3631 } 3632 3633 rcu_read_lock(); 3634 if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) { 3635 dev_kfree_skb_any(msdu); 3636 goto exit; 3637 } 3638 3639 if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) { 3640 dev_kfree_skb_any(msdu); 3641 goto exit; 3642 } 3643 3644 rx_desc = (struct hal_rx_desc *)msdu->data; 3645 msdu_len = ath12k_dp_rx_h_msdu_len(ar->ab, rx_desc); 3646 if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) { 3647 ath12k_warn(ar->ab, "invalid msdu leng %u", msdu_len); 3648 ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "", rx_desc, 3649 sizeof(*rx_desc)); 3650 dev_kfree_skb_any(msdu); 3651 goto exit; 3652 } 3653 3654 skb_put(msdu, hal_rx_desc_sz + msdu_len); 3655 3656 if (ath12k_dp_rx_frag_h_mpdu(ar, msdu, desc)) { 3657 dev_kfree_skb_any(msdu); 3658 ath12k_dp_rx_link_desc_return(ar->ab, &desc->buf_addr_info, 3659 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3660 } 3661 exit: 3662 rcu_read_unlock(); 3663 return 0; 3664 } 3665 3666 int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi, 3667 int budget) 3668 { 3669 struct ath12k_hw_group *ag = ab->ag; 3670 struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES]; 3671 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; 3672 int num_buffs_reaped[ATH12K_MAX_DEVICES] = {}; 3673 struct dp_link_desc_bank *link_desc_banks; 3674 enum hal_rx_buf_return_buf_manager rbm; 3675 struct hal_rx_msdu_link *link_desc_va; 3676 int tot_n_bufs_reaped, quota, ret, i; 3677 struct hal_reo_dest_ring *reo_desc; 3678 struct dp_rxdma_ring *rx_ring; 3679 struct dp_srng *reo_except; 3680 struct ath12k_hw_link *hw_links = ag->hw_links; 3681 struct ath12k_base *partner_ab; 3682 u8 hw_link_id, device_id; 3683 u32 desc_bank, num_msdus; 3684 struct hal_srng *srng; 3685 struct ath12k *ar; 3686 dma_addr_t paddr; 3687 bool is_frag; 3688 bool drop; 3689 int pdev_id; 3690 3691 tot_n_bufs_reaped = 0; 3692 quota = budget; 3693 3694 for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) 3695 INIT_LIST_HEAD(&rx_desc_used_list[device_id]); 3696 3697 reo_except = &ab->dp.reo_except_ring; 3698 3699 srng = &ab->hal.srng_list[reo_except->ring_id]; 3700 3701 spin_lock_bh(&srng->lock); 3702 3703 ath12k_hal_srng_access_begin(ab, srng); 3704 3705 while (budget && 3706 (reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) { 3707 drop = false; 3708 ab->device_stats.err_ring_pkts++; 3709 3710 ret = ath12k_hal_desc_reo_parse_err(ab, reo_desc, &paddr, 3711 &desc_bank); 3712 if (ret) { 3713 ath12k_warn(ab, "failed to parse error reo desc %d\n", 3714 ret); 3715 continue; 3716 } 3717 3718 hw_link_id = le32_get_bits(reo_desc->info0, 3719 HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); 3720 device_id = hw_links[hw_link_id].device_id; 3721 partner_ab = ath12k_ag_to_ab(ag, device_id); 3722 3723 pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params, 3724 hw_links[hw_link_id].pdev_idx); 3725 ar = partner_ab->pdevs[pdev_id].ar; 3726 3727 link_desc_banks = partner_ab->dp.link_desc_banks; 3728 link_desc_va = link_desc_banks[desc_bank].vaddr + 3729 (paddr - link_desc_banks[desc_bank].paddr); 3730 ath12k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies, 3731 &rbm); 3732 if (rbm != partner_ab->dp.idle_link_rbm && 3733 rbm != HAL_RX_BUF_RBM_SW3_BM && 3734 rbm != partner_ab->hw_params->hal_params->rx_buf_rbm) { 3735 ab->device_stats.invalid_rbm++; 3736 ath12k_warn(ab, "invalid return buffer manager %d\n", rbm); 3737 ath12k_dp_rx_link_desc_return(partner_ab, 3738 &reo_desc->buf_addr_info, 3739 HAL_WBM_REL_BM_ACT_REL_MSDU); 3740 continue; 3741 } 3742 3743 is_frag = !!(le32_to_cpu(reo_desc->rx_mpdu_info.info0) & 3744 RX_MPDU_DESC_INFO0_FRAG_FLAG); 3745 3746 /* Process only rx fragments with one msdu per link desc below, and drop 3747 * msdu's indicated due to error reasons. 3748 * Dynamic fragmentation not supported in Multi-link client, so drop the 3749 * partner device buffers. 3750 */ 3751 if (!is_frag || num_msdus > 1 || 3752 partner_ab->device_id != ab->device_id) { 3753 drop = true; 3754 3755 /* Return the link desc back to wbm idle list */ 3756 ath12k_dp_rx_link_desc_return(partner_ab, 3757 &reo_desc->buf_addr_info, 3758 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3759 } 3760 3761 for (i = 0; i < num_msdus; i++) { 3762 if (!ath12k_dp_process_rx_err_buf(ar, reo_desc, 3763 &rx_desc_used_list[device_id], 3764 drop, 3765 msdu_cookies[i])) { 3766 num_buffs_reaped[device_id]++; 3767 tot_n_bufs_reaped++; 3768 } 3769 } 3770 3771 if (tot_n_bufs_reaped >= quota) { 3772 tot_n_bufs_reaped = quota; 3773 goto exit; 3774 } 3775 3776 budget = quota - tot_n_bufs_reaped; 3777 } 3778 3779 exit: 3780 ath12k_hal_srng_access_end(ab, srng); 3781 3782 spin_unlock_bh(&srng->lock); 3783 3784 for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) { 3785 if (!num_buffs_reaped[device_id]) 3786 continue; 3787 3788 partner_ab = ath12k_ag_to_ab(ag, device_id); 3789 rx_ring = &partner_ab->dp.rx_refill_buf_ring; 3790 3791 ath12k_dp_rx_bufs_replenish(partner_ab, rx_ring, 3792 &rx_desc_used_list[device_id], 3793 num_buffs_reaped[device_id]); 3794 } 3795 3796 return tot_n_bufs_reaped; 3797 } 3798 3799 static void ath12k_dp_rx_null_q_desc_sg_drop(struct ath12k *ar, 3800 int msdu_len, 3801 struct sk_buff_head *msdu_list) 3802 { 3803 struct sk_buff *skb, *tmp; 3804 struct ath12k_skb_rxcb *rxcb; 3805 int n_buffs; 3806 3807 n_buffs = DIV_ROUND_UP(msdu_len, 3808 (DP_RX_BUFFER_SIZE - ar->ab->hal.hal_desc_sz)); 3809 3810 skb_queue_walk_safe(msdu_list, skb, tmp) { 3811 rxcb = ATH12K_SKB_RXCB(skb); 3812 if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO && 3813 rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) { 3814 if (!n_buffs) 3815 break; 3816 __skb_unlink(skb, msdu_list); 3817 dev_kfree_skb_any(skb); 3818 n_buffs--; 3819 } 3820 } 3821 } 3822 3823 static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu, 3824 struct ath12k_dp_rx_info *rx_info, 3825 struct sk_buff_head *msdu_list) 3826 { 3827 struct ath12k_base *ab = ar->ab; 3828 u16 msdu_len; 3829 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 3830 u8 l3pad_bytes; 3831 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 3832 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 3833 3834 msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc); 3835 3836 if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) { 3837 /* First buffer will be freed by the caller, so deduct it's length */ 3838 msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz); 3839 ath12k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list); 3840 return -EINVAL; 3841 } 3842 3843 /* Even after cleaning up the sg buffers in the msdu list with above check 3844 * any msdu received with continuation flag needs to be dropped as invalid. 3845 * This protects against some random err frame with continuation flag. 3846 */ 3847 if (rxcb->is_continuation) 3848 return -EINVAL; 3849 3850 if (!ath12k_dp_rx_h_msdu_done(ab, desc)) { 3851 ath12k_warn(ar->ab, 3852 "msdu_done bit not set in null_q_des processing\n"); 3853 __skb_queue_purge(msdu_list); 3854 return -EIO; 3855 } 3856 3857 /* Handle NULL queue descriptor violations arising out a missing 3858 * REO queue for a given peer or a given TID. This typically 3859 * may happen if a packet is received on a QOS enabled TID before the 3860 * ADDBA negotiation for that TID, when the TID queue is setup. Or 3861 * it may also happen for MC/BC frames if they are not routed to the 3862 * non-QOS TID queue, in the absence of any other default TID queue. 3863 * This error can show up both in a REO destination or WBM release ring. 3864 */ 3865 3866 if (rxcb->is_frag) { 3867 skb_pull(msdu, hal_rx_desc_sz); 3868 } else { 3869 l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc); 3870 3871 if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) 3872 return -EINVAL; 3873 3874 skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); 3875 skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); 3876 } 3877 if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu))) 3878 return -EINVAL; 3879 3880 ath12k_dp_rx_h_fetch_info(ab, desc, rx_info); 3881 ath12k_dp_rx_h_ppdu(ar, rx_info); 3882 ath12k_dp_rx_h_mpdu(ar, msdu, desc, rx_info); 3883 3884 rxcb->tid = rx_info->tid; 3885 3886 /* Please note that caller will having the access to msdu and completing 3887 * rx with mac80211. Need not worry about cleaning up amsdu_list. 3888 */ 3889 3890 return 0; 3891 } 3892 3893 static bool ath12k_dp_rx_h_reo_err(struct ath12k *ar, struct sk_buff *msdu, 3894 struct ath12k_dp_rx_info *rx_info, 3895 struct sk_buff_head *msdu_list) 3896 { 3897 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 3898 bool drop = false; 3899 3900 ar->ab->device_stats.reo_error[rxcb->err_code]++; 3901 3902 switch (rxcb->err_code) { 3903 case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO: 3904 if (ath12k_dp_rx_h_null_q_desc(ar, msdu, rx_info, msdu_list)) 3905 drop = true; 3906 break; 3907 case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED: 3908 /* TODO: Do not drop PN failed packets in the driver; 3909 * instead, it is good to drop such packets in mac80211 3910 * after incrementing the replay counters. 3911 */ 3912 fallthrough; 3913 default: 3914 /* TODO: Review other errors and process them to mac80211 3915 * as appropriate. 3916 */ 3917 drop = true; 3918 break; 3919 } 3920 3921 return drop; 3922 } 3923 3924 static bool ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu, 3925 struct ath12k_dp_rx_info *rx_info) 3926 { 3927 struct ath12k_base *ab = ar->ab; 3928 u16 msdu_len; 3929 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 3930 u8 l3pad_bytes; 3931 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 3932 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 3933 3934 rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, desc); 3935 rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, desc); 3936 3937 l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc); 3938 msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc); 3939 3940 if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) { 3941 ath12k_dbg(ab, ATH12K_DBG_DATA, 3942 "invalid msdu len in tkip mic err %u\n", msdu_len); 3943 ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", desc, 3944 sizeof(*desc)); 3945 return true; 3946 } 3947 3948 skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); 3949 skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); 3950 3951 if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu))) 3952 return true; 3953 3954 ath12k_dp_rx_h_ppdu(ar, rx_info); 3955 3956 rx_info->rx_status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR | 3957 RX_FLAG_DECRYPTED); 3958 3959 ath12k_dp_rx_h_undecap(ar, msdu, desc, 3960 HAL_ENCRYPT_TYPE_TKIP_MIC, rx_info->rx_status, false); 3961 return false; 3962 } 3963 3964 static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar, struct sk_buff *msdu, 3965 struct ath12k_dp_rx_info *rx_info) 3966 { 3967 struct ath12k_base *ab = ar->ab; 3968 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 3969 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data; 3970 bool drop = false; 3971 u32 err_bitmap; 3972 3973 ar->ab->device_stats.rxdma_error[rxcb->err_code]++; 3974 3975 switch (rxcb->err_code) { 3976 case HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR: 3977 case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR: 3978 err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc); 3979 if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) { 3980 ath12k_dp_rx_h_fetch_info(ab, rx_desc, rx_info); 3981 drop = ath12k_dp_rx_h_tkip_mic_err(ar, msdu, rx_info); 3982 break; 3983 } 3984 fallthrough; 3985 default: 3986 /* TODO: Review other rxdma error code to check if anything is 3987 * worth reporting to mac80211 3988 */ 3989 drop = true; 3990 break; 3991 } 3992 3993 return drop; 3994 } 3995 3996 static void ath12k_dp_rx_wbm_err(struct ath12k *ar, 3997 struct napi_struct *napi, 3998 struct sk_buff *msdu, 3999 struct sk_buff_head *msdu_list) 4000 { 4001 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 4002 struct ieee80211_rx_status rxs = {}; 4003 struct ath12k_dp_rx_info rx_info; 4004 bool drop = true; 4005 4006 rx_info.addr2_present = false; 4007 rx_info.rx_status = &rxs; 4008 4009 switch (rxcb->err_rel_src) { 4010 case HAL_WBM_REL_SRC_MODULE_REO: 4011 drop = ath12k_dp_rx_h_reo_err(ar, msdu, &rx_info, msdu_list); 4012 break; 4013 case HAL_WBM_REL_SRC_MODULE_RXDMA: 4014 drop = ath12k_dp_rx_h_rxdma_err(ar, msdu, &rx_info); 4015 break; 4016 default: 4017 /* msdu will get freed */ 4018 break; 4019 } 4020 4021 if (drop) { 4022 dev_kfree_skb_any(msdu); 4023 return; 4024 } 4025 4026 rx_info.rx_status->flag |= RX_FLAG_SKIP_MONITOR; 4027 4028 ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_info); 4029 } 4030 4031 int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab, 4032 struct napi_struct *napi, int budget) 4033 { 4034 struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES]; 4035 struct ath12k_hw_group *ag = ab->ag; 4036 struct ath12k *ar; 4037 struct ath12k_dp *dp = &ab->dp; 4038 struct dp_rxdma_ring *rx_ring; 4039 struct hal_rx_wbm_rel_info err_info; 4040 struct hal_srng *srng; 4041 struct sk_buff *msdu; 4042 struct sk_buff_head msdu_list, scatter_msdu_list; 4043 struct ath12k_skb_rxcb *rxcb; 4044 void *rx_desc; 4045 int num_buffs_reaped[ATH12K_MAX_DEVICES] = {}; 4046 int total_num_buffs_reaped = 0; 4047 struct ath12k_rx_desc_info *desc_info; 4048 struct ath12k_device_dp_stats *device_stats = &ab->device_stats; 4049 struct ath12k_hw_link *hw_links = ag->hw_links; 4050 struct ath12k_base *partner_ab; 4051 u8 hw_link_id, device_id; 4052 int ret, pdev_id; 4053 struct hal_rx_desc *msdu_data; 4054 4055 __skb_queue_head_init(&msdu_list); 4056 __skb_queue_head_init(&scatter_msdu_list); 4057 4058 for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) 4059 INIT_LIST_HEAD(&rx_desc_used_list[device_id]); 4060 4061 srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id]; 4062 spin_lock_bh(&srng->lock); 4063 4064 ath12k_hal_srng_access_begin(ab, srng); 4065 4066 while (budget) { 4067 rx_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng); 4068 if (!rx_desc) 4069 break; 4070 4071 ret = ath12k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info); 4072 if (ret) { 4073 ath12k_warn(ab, 4074 "failed to parse rx error in wbm_rel ring desc %d\n", 4075 ret); 4076 continue; 4077 } 4078 4079 desc_info = err_info.rx_desc; 4080 4081 /* retry manual desc retrieval if hw cc is not done */ 4082 if (!desc_info) { 4083 desc_info = ath12k_dp_get_rx_desc(ab, err_info.cookie); 4084 if (!desc_info) { 4085 ath12k_warn(ab, "Invalid cookie in DP WBM rx error descriptor retrieval: 0x%x\n", 4086 err_info.cookie); 4087 continue; 4088 } 4089 } 4090 4091 if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC) 4092 ath12k_warn(ab, "WBM RX err, Check HW CC implementation"); 4093 4094 msdu = desc_info->skb; 4095 desc_info->skb = NULL; 4096 4097 device_id = desc_info->device_id; 4098 partner_ab = ath12k_ag_to_ab(ag, device_id); 4099 if (unlikely(!partner_ab)) { 4100 dev_kfree_skb_any(msdu); 4101 4102 /* In any case continuation bit is set 4103 * in the previous record, cleanup scatter_msdu_list 4104 */ 4105 ath12k_dp_clean_up_skb_list(&scatter_msdu_list); 4106 continue; 4107 } 4108 4109 list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]); 4110 4111 rxcb = ATH12K_SKB_RXCB(msdu); 4112 dma_unmap_single(partner_ab->dev, rxcb->paddr, 4113 msdu->len + skb_tailroom(msdu), 4114 DMA_FROM_DEVICE); 4115 4116 num_buffs_reaped[device_id]++; 4117 total_num_buffs_reaped++; 4118 4119 if (!err_info.continuation) 4120 budget--; 4121 4122 if (err_info.push_reason != 4123 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { 4124 dev_kfree_skb_any(msdu); 4125 continue; 4126 } 4127 4128 msdu_data = (struct hal_rx_desc *)msdu->data; 4129 rxcb->err_rel_src = err_info.err_rel_src; 4130 rxcb->err_code = err_info.err_code; 4131 rxcb->is_first_msdu = err_info.first_msdu; 4132 rxcb->is_last_msdu = err_info.last_msdu; 4133 rxcb->is_continuation = err_info.continuation; 4134 rxcb->rx_desc = msdu_data; 4135 4136 if (err_info.continuation) { 4137 __skb_queue_tail(&scatter_msdu_list, msdu); 4138 continue; 4139 } 4140 4141 hw_link_id = ath12k_dp_rx_get_msdu_src_link(partner_ab, 4142 msdu_data); 4143 if (hw_link_id >= ATH12K_GROUP_MAX_RADIO) { 4144 dev_kfree_skb_any(msdu); 4145 4146 /* In any case continuation bit is set 4147 * in the previous record, cleanup scatter_msdu_list 4148 */ 4149 ath12k_dp_clean_up_skb_list(&scatter_msdu_list); 4150 continue; 4151 } 4152 4153 if (!skb_queue_empty(&scatter_msdu_list)) { 4154 struct sk_buff *msdu; 4155 4156 skb_queue_walk(&scatter_msdu_list, msdu) { 4157 rxcb = ATH12K_SKB_RXCB(msdu); 4158 rxcb->hw_link_id = hw_link_id; 4159 } 4160 4161 skb_queue_splice_tail_init(&scatter_msdu_list, 4162 &msdu_list); 4163 } 4164 4165 rxcb = ATH12K_SKB_RXCB(msdu); 4166 rxcb->hw_link_id = hw_link_id; 4167 __skb_queue_tail(&msdu_list, msdu); 4168 } 4169 4170 /* In any case continuation bit is set in the 4171 * last record, cleanup scatter_msdu_list 4172 */ 4173 ath12k_dp_clean_up_skb_list(&scatter_msdu_list); 4174 4175 ath12k_hal_srng_access_end(ab, srng); 4176 4177 spin_unlock_bh(&srng->lock); 4178 4179 if (!total_num_buffs_reaped) 4180 goto done; 4181 4182 for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) { 4183 if (!num_buffs_reaped[device_id]) 4184 continue; 4185 4186 partner_ab = ath12k_ag_to_ab(ag, device_id); 4187 rx_ring = &partner_ab->dp.rx_refill_buf_ring; 4188 4189 ath12k_dp_rx_bufs_replenish(ab, rx_ring, 4190 &rx_desc_used_list[device_id], 4191 num_buffs_reaped[device_id]); 4192 } 4193 4194 rcu_read_lock(); 4195 while ((msdu = __skb_dequeue(&msdu_list))) { 4196 rxcb = ATH12K_SKB_RXCB(msdu); 4197 hw_link_id = rxcb->hw_link_id; 4198 4199 device_id = hw_links[hw_link_id].device_id; 4200 partner_ab = ath12k_ag_to_ab(ag, device_id); 4201 if (unlikely(!partner_ab)) { 4202 ath12k_dbg(ab, ATH12K_DBG_DATA, 4203 "Unable to process WBM error msdu due to invalid hw link id %d device id %d\n", 4204 hw_link_id, device_id); 4205 dev_kfree_skb_any(msdu); 4206 continue; 4207 } 4208 4209 pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params, 4210 hw_links[hw_link_id].pdev_idx); 4211 ar = partner_ab->pdevs[pdev_id].ar; 4212 4213 if (!ar || !rcu_dereference(ar->ab->pdevs_active[pdev_id])) { 4214 dev_kfree_skb_any(msdu); 4215 continue; 4216 } 4217 4218 if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) { 4219 dev_kfree_skb_any(msdu); 4220 continue; 4221 } 4222 4223 if (rxcb->err_rel_src < HAL_WBM_REL_SRC_MODULE_MAX) { 4224 device_id = ar->ab->device_id; 4225 device_stats->rx_wbm_rel_source[rxcb->err_rel_src][device_id]++; 4226 } 4227 4228 ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list); 4229 } 4230 rcu_read_unlock(); 4231 done: 4232 return total_num_buffs_reaped; 4233 } 4234 4235 void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab) 4236 { 4237 struct ath12k_dp *dp = &ab->dp; 4238 struct hal_tlv_64_hdr *hdr; 4239 struct hal_srng *srng; 4240 struct ath12k_dp_rx_reo_cmd *cmd, *tmp; 4241 bool found = false; 4242 u16 tag; 4243 struct hal_reo_status reo_status; 4244 4245 srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id]; 4246 4247 memset(&reo_status, 0, sizeof(reo_status)); 4248 4249 spin_lock_bh(&srng->lock); 4250 4251 ath12k_hal_srng_access_begin(ab, srng); 4252 4253 while ((hdr = ath12k_hal_srng_dst_get_next_entry(ab, srng))) { 4254 tag = le64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG); 4255 4256 switch (tag) { 4257 case HAL_REO_GET_QUEUE_STATS_STATUS: 4258 ath12k_hal_reo_status_queue_stats(ab, hdr, 4259 &reo_status); 4260 break; 4261 case HAL_REO_FLUSH_QUEUE_STATUS: 4262 ath12k_hal_reo_flush_queue_status(ab, hdr, 4263 &reo_status); 4264 break; 4265 case HAL_REO_FLUSH_CACHE_STATUS: 4266 ath12k_hal_reo_flush_cache_status(ab, hdr, 4267 &reo_status); 4268 break; 4269 case HAL_REO_UNBLOCK_CACHE_STATUS: 4270 ath12k_hal_reo_unblk_cache_status(ab, hdr, 4271 &reo_status); 4272 break; 4273 case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS: 4274 ath12k_hal_reo_flush_timeout_list_status(ab, hdr, 4275 &reo_status); 4276 break; 4277 case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS: 4278 ath12k_hal_reo_desc_thresh_reached_status(ab, hdr, 4279 &reo_status); 4280 break; 4281 case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS: 4282 ath12k_hal_reo_update_rx_reo_queue_status(ab, hdr, 4283 &reo_status); 4284 break; 4285 default: 4286 ath12k_warn(ab, "Unknown reo status type %d\n", tag); 4287 continue; 4288 } 4289 4290 spin_lock_bh(&dp->reo_cmd_lock); 4291 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 4292 if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) { 4293 found = true; 4294 list_del(&cmd->list); 4295 break; 4296 } 4297 } 4298 spin_unlock_bh(&dp->reo_cmd_lock); 4299 4300 if (found) { 4301 cmd->handler(dp, (void *)&cmd->data, 4302 reo_status.uniform_hdr.cmd_status); 4303 kfree(cmd); 4304 } 4305 4306 found = false; 4307 } 4308 4309 ath12k_hal_srng_access_end(ab, srng); 4310 4311 spin_unlock_bh(&srng->lock); 4312 } 4313 4314 void ath12k_dp_rx_free(struct ath12k_base *ab) 4315 { 4316 struct ath12k_dp *dp = &ab->dp; 4317 struct dp_srng *srng; 4318 int i; 4319 4320 ath12k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring); 4321 4322 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4323 if (ab->hw_params->rx_mac_buf_ring) 4324 ath12k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]); 4325 if (!ab->hw_params->rxdma1_enable) { 4326 srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring; 4327 ath12k_dp_srng_cleanup(ab, srng); 4328 } 4329 } 4330 4331 for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) 4332 ath12k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]); 4333 4334 ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring); 4335 4336 ath12k_dp_rxdma_buf_free(ab); 4337 } 4338 4339 void ath12k_dp_rx_pdev_free(struct ath12k_base *ab, int mac_id) 4340 { 4341 struct ath12k *ar = ab->pdevs[mac_id].ar; 4342 4343 ath12k_dp_rx_pdev_srng_free(ar); 4344 } 4345 4346 int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab) 4347 { 4348 struct ath12k_dp *dp = &ab->dp; 4349 struct htt_rx_ring_tlv_filter tlv_filter = {}; 4350 u32 ring_id; 4351 int ret; 4352 u32 hal_rx_desc_sz = ab->hal.hal_desc_sz; 4353 4354 ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; 4355 4356 tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING; 4357 tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR; 4358 tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST | 4359 HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST | 4360 HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA; 4361 tlv_filter.offset_valid = true; 4362 tlv_filter.rx_packet_offset = hal_rx_desc_sz; 4363 4364 tlv_filter.rx_mpdu_start_offset = 4365 ab->hal_rx_ops->rx_desc_get_mpdu_start_offset(); 4366 tlv_filter.rx_msdu_end_offset = 4367 ab->hal_rx_ops->rx_desc_get_msdu_end_offset(); 4368 4369 if (ath12k_dp_wmask_compaction_rx_tlv_supported(ab)) { 4370 tlv_filter.rx_mpdu_start_wmask = 4371 ab->hw_params->hal_ops->rxdma_ring_wmask_rx_mpdu_start(); 4372 tlv_filter.rx_msdu_end_wmask = 4373 ab->hw_params->hal_ops->rxdma_ring_wmask_rx_msdu_end(); 4374 ath12k_dbg(ab, ATH12K_DBG_DATA, 4375 "Configuring compact tlv masks rx_mpdu_start_wmask 0x%x rx_msdu_end_wmask 0x%x\n", 4376 tlv_filter.rx_mpdu_start_wmask, tlv_filter.rx_msdu_end_wmask); 4377 } 4378 4379 ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, 0, 4380 HAL_RXDMA_BUF, 4381 DP_RXDMA_REFILL_RING_SIZE, 4382 &tlv_filter); 4383 4384 return ret; 4385 } 4386 4387 int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab) 4388 { 4389 struct ath12k_dp *dp = &ab->dp; 4390 struct htt_rx_ring_tlv_filter tlv_filter = {}; 4391 u32 ring_id; 4392 int ret = 0; 4393 u32 hal_rx_desc_sz = ab->hal.hal_desc_sz; 4394 int i; 4395 4396 ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; 4397 4398 tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING; 4399 tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR; 4400 tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST | 4401 HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST | 4402 HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA; 4403 tlv_filter.offset_valid = true; 4404 tlv_filter.rx_packet_offset = hal_rx_desc_sz; 4405 4406 tlv_filter.rx_header_offset = offsetof(struct hal_rx_desc_wcn7850, pkt_hdr_tlv); 4407 4408 tlv_filter.rx_mpdu_start_offset = 4409 ab->hal_rx_ops->rx_desc_get_mpdu_start_offset(); 4410 tlv_filter.rx_msdu_end_offset = 4411 ab->hal_rx_ops->rx_desc_get_msdu_end_offset(); 4412 4413 /* TODO: Selectively subscribe to required qwords within msdu_end 4414 * and mpdu_start and setup the mask in below msg 4415 * and modify the rx_desc struct 4416 */ 4417 4418 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4419 ring_id = dp->rx_mac_buf_ring[i].ring_id; 4420 ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, i, 4421 HAL_RXDMA_BUF, 4422 DP_RXDMA_REFILL_RING_SIZE, 4423 &tlv_filter); 4424 } 4425 4426 return ret; 4427 } 4428 4429 int ath12k_dp_rx_htt_setup(struct ath12k_base *ab) 4430 { 4431 struct ath12k_dp *dp = &ab->dp; 4432 u32 ring_id; 4433 int i, ret; 4434 4435 /* TODO: Need to verify the HTT setup for QCN9224 */ 4436 ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; 4437 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 0, HAL_RXDMA_BUF); 4438 if (ret) { 4439 ath12k_warn(ab, "failed to configure rx_refill_buf_ring %d\n", 4440 ret); 4441 return ret; 4442 } 4443 4444 if (ab->hw_params->rx_mac_buf_ring) { 4445 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4446 ring_id = dp->rx_mac_buf_ring[i].ring_id; 4447 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 4448 i, HAL_RXDMA_BUF); 4449 if (ret) { 4450 ath12k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n", 4451 i, ret); 4452 return ret; 4453 } 4454 } 4455 } 4456 4457 for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) { 4458 ring_id = dp->rxdma_err_dst_ring[i].ring_id; 4459 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 4460 i, HAL_RXDMA_DST); 4461 if (ret) { 4462 ath12k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n", 4463 i, ret); 4464 return ret; 4465 } 4466 } 4467 4468 if (ab->hw_params->rxdma1_enable) { 4469 ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id; 4470 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 4471 0, HAL_RXDMA_MONITOR_BUF); 4472 if (ret) { 4473 ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n", 4474 ret); 4475 return ret; 4476 } 4477 } else { 4478 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4479 ring_id = 4480 dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; 4481 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, i, 4482 HAL_RXDMA_MONITOR_STATUS); 4483 if (ret) { 4484 ath12k_warn(ab, 4485 "failed to configure mon_status_refill_ring%d %d\n", 4486 i, ret); 4487 return ret; 4488 } 4489 } 4490 } 4491 4492 ret = ab->hw_params->hw_ops->rxdma_ring_sel_config(ab); 4493 if (ret) { 4494 ath12k_warn(ab, "failed to setup rxdma ring selection config\n"); 4495 return ret; 4496 } 4497 4498 return 0; 4499 } 4500 4501 int ath12k_dp_rx_alloc(struct ath12k_base *ab) 4502 { 4503 struct ath12k_dp *dp = &ab->dp; 4504 struct dp_srng *srng; 4505 int i, ret; 4506 4507 idr_init(&dp->rxdma_mon_buf_ring.bufs_idr); 4508 spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock); 4509 4510 ret = ath12k_dp_srng_setup(ab, 4511 &dp->rx_refill_buf_ring.refill_buf_ring, 4512 HAL_RXDMA_BUF, 0, 0, 4513 DP_RXDMA_BUF_RING_SIZE); 4514 if (ret) { 4515 ath12k_warn(ab, "failed to setup rx_refill_buf_ring\n"); 4516 return ret; 4517 } 4518 4519 if (ab->hw_params->rx_mac_buf_ring) { 4520 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4521 ret = ath12k_dp_srng_setup(ab, 4522 &dp->rx_mac_buf_ring[i], 4523 HAL_RXDMA_BUF, 1, 4524 i, DP_RX_MAC_BUF_RING_SIZE); 4525 if (ret) { 4526 ath12k_warn(ab, "failed to setup rx_mac_buf_ring %d\n", 4527 i); 4528 return ret; 4529 } 4530 } 4531 } 4532 4533 for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) { 4534 ret = ath12k_dp_srng_setup(ab, &dp->rxdma_err_dst_ring[i], 4535 HAL_RXDMA_DST, 0, i, 4536 DP_RXDMA_ERR_DST_RING_SIZE); 4537 if (ret) { 4538 ath12k_warn(ab, "failed to setup rxdma_err_dst_ring %d\n", i); 4539 return ret; 4540 } 4541 } 4542 4543 if (ab->hw_params->rxdma1_enable) { 4544 ret = ath12k_dp_srng_setup(ab, 4545 &dp->rxdma_mon_buf_ring.refill_buf_ring, 4546 HAL_RXDMA_MONITOR_BUF, 0, 0, 4547 DP_RXDMA_MONITOR_BUF_RING_SIZE(ab)); 4548 if (ret) { 4549 ath12k_warn(ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n"); 4550 return ret; 4551 } 4552 } else { 4553 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4554 idr_init(&dp->rx_mon_status_refill_ring[i].bufs_idr); 4555 spin_lock_init(&dp->rx_mon_status_refill_ring[i].idr_lock); 4556 } 4557 4558 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4559 srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring; 4560 ret = ath12k_dp_srng_setup(ab, srng, 4561 HAL_RXDMA_MONITOR_STATUS, 0, i, 4562 DP_RXDMA_MON_STATUS_RING_SIZE); 4563 if (ret) { 4564 ath12k_warn(ab, "failed to setup mon status ring %d\n", 4565 i); 4566 return ret; 4567 } 4568 } 4569 } 4570 4571 ret = ath12k_dp_rxdma_buf_setup(ab); 4572 if (ret) { 4573 ath12k_warn(ab, "failed to setup rxdma ring\n"); 4574 return ret; 4575 } 4576 4577 return 0; 4578 } 4579 4580 int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int mac_id) 4581 { 4582 struct ath12k *ar = ab->pdevs[mac_id].ar; 4583 struct ath12k_pdev_dp *dp = &ar->dp; 4584 u32 ring_id; 4585 int i; 4586 int ret; 4587 4588 if (!ab->hw_params->rxdma1_enable) 4589 goto out; 4590 4591 ret = ath12k_dp_rx_pdev_srng_alloc(ar); 4592 if (ret) { 4593 ath12k_warn(ab, "failed to setup rx srngs\n"); 4594 return ret; 4595 } 4596 4597 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4598 ring_id = dp->rxdma_mon_dst_ring[i].ring_id; 4599 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 4600 mac_id + i, 4601 HAL_RXDMA_MONITOR_DST); 4602 if (ret) { 4603 ath12k_warn(ab, 4604 "failed to configure rxdma_mon_dst_ring %d %d\n", 4605 i, ret); 4606 return ret; 4607 } 4608 } 4609 out: 4610 return 0; 4611 } 4612 4613 static int ath12k_dp_rx_pdev_mon_status_attach(struct ath12k *ar) 4614 { 4615 struct ath12k_pdev_dp *dp = &ar->dp; 4616 struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&dp->mon_data; 4617 4618 skb_queue_head_init(&pmon->rx_status_q); 4619 4620 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 4621 4622 memset(&pmon->rx_mon_stats, 0, 4623 sizeof(pmon->rx_mon_stats)); 4624 return 0; 4625 } 4626 4627 int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar) 4628 { 4629 struct ath12k_pdev_dp *dp = &ar->dp; 4630 struct ath12k_mon_data *pmon = &dp->mon_data; 4631 int ret = 0; 4632 4633 ret = ath12k_dp_rx_pdev_mon_status_attach(ar); 4634 if (ret) { 4635 ath12k_warn(ar->ab, "pdev_mon_status_attach() failed"); 4636 return ret; 4637 } 4638 4639 pmon->mon_last_linkdesc_paddr = 0; 4640 pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1; 4641 spin_lock_init(&pmon->mon_lock); 4642 4643 if (!ar->ab->hw_params->rxdma1_enable) 4644 return 0; 4645 4646 INIT_LIST_HEAD(&pmon->dp_rx_mon_mpdu_list); 4647 pmon->mon_mpdu = NULL; 4648 4649 return 0; 4650 } 4651