1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 */ 6 7 #include <linux/ieee80211.h> 8 #include <linux/kernel.h> 9 #include <linux/skbuff.h> 10 #include <crypto/hash.h> 11 #include "core.h" 12 #include "debug.h" 13 #include "hal_desc.h" 14 #include "hw.h" 15 #include "dp_rx.h" 16 #include "hal_rx.h" 17 #include "dp_tx.h" 18 #include "peer.h" 19 #include "dp_mon.h" 20 #include "debugfs_htt_stats.h" 21 22 #define ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ) 23 24 static enum hal_encrypt_type ath12k_dp_rx_h_enctype(struct ath12k_base *ab, 25 struct hal_rx_desc *desc) 26 { 27 if (!ab->hal_rx_ops->rx_desc_encrypt_valid(desc)) 28 return HAL_ENCRYPT_TYPE_OPEN; 29 30 return ab->hal_rx_ops->rx_desc_get_encrypt_type(desc); 31 } 32 33 u8 ath12k_dp_rx_h_decap_type(struct ath12k_base *ab, 34 struct hal_rx_desc *desc) 35 { 36 return ab->hal_rx_ops->rx_desc_get_decap_type(desc); 37 } 38 39 static u8 ath12k_dp_rx_h_mesh_ctl_present(struct ath12k_base *ab, 40 struct hal_rx_desc *desc) 41 { 42 return ab->hal_rx_ops->rx_desc_get_mesh_ctl(desc); 43 } 44 45 static bool ath12k_dp_rx_h_seq_ctrl_valid(struct ath12k_base *ab, 46 struct hal_rx_desc *desc) 47 { 48 return ab->hal_rx_ops->rx_desc_get_mpdu_seq_ctl_vld(desc); 49 } 50 51 static bool ath12k_dp_rx_h_fc_valid(struct ath12k_base *ab, 52 struct hal_rx_desc *desc) 53 { 54 return ab->hal_rx_ops->rx_desc_get_mpdu_fc_valid(desc); 55 } 56 57 static bool ath12k_dp_rx_h_more_frags(struct ath12k_base *ab, 58 struct sk_buff *skb) 59 { 60 struct ieee80211_hdr *hdr; 61 62 hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz); 63 return ieee80211_has_morefrags(hdr->frame_control); 64 } 65 66 static u16 ath12k_dp_rx_h_frag_no(struct ath12k_base *ab, 67 struct sk_buff *skb) 68 { 69 struct ieee80211_hdr *hdr; 70 71 hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz); 72 return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; 73 } 74 75 static u16 ath12k_dp_rx_h_seq_no(struct ath12k_base *ab, 76 struct hal_rx_desc *desc) 77 { 78 return ab->hal_rx_ops->rx_desc_get_mpdu_start_seq_no(desc); 79 } 80 81 static bool ath12k_dp_rx_h_msdu_done(struct ath12k_base *ab, 82 struct hal_rx_desc *desc) 83 { 84 return ab->hal_rx_ops->dp_rx_h_msdu_done(desc); 85 } 86 87 static bool ath12k_dp_rx_h_l4_cksum_fail(struct ath12k_base *ab, 88 struct hal_rx_desc *desc) 89 { 90 return ab->hal_rx_ops->dp_rx_h_l4_cksum_fail(desc); 91 } 92 93 static bool ath12k_dp_rx_h_ip_cksum_fail(struct ath12k_base *ab, 94 struct hal_rx_desc *desc) 95 { 96 return ab->hal_rx_ops->dp_rx_h_ip_cksum_fail(desc); 97 } 98 99 static bool ath12k_dp_rx_h_is_decrypted(struct ath12k_base *ab, 100 struct hal_rx_desc *desc) 101 { 102 return ab->hal_rx_ops->dp_rx_h_is_decrypted(desc); 103 } 104 105 u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab, 106 struct hal_rx_desc *desc) 107 { 108 return ab->hal_rx_ops->dp_rx_h_mpdu_err(desc); 109 } 110 111 static u16 ath12k_dp_rx_h_msdu_len(struct ath12k_base *ab, 112 struct hal_rx_desc *desc) 113 { 114 return ab->hal_rx_ops->rx_desc_get_msdu_len(desc); 115 } 116 117 static u8 ath12k_dp_rx_h_sgi(struct ath12k_base *ab, 118 struct hal_rx_desc *desc) 119 { 120 return ab->hal_rx_ops->rx_desc_get_msdu_sgi(desc); 121 } 122 123 static u8 ath12k_dp_rx_h_rate_mcs(struct ath12k_base *ab, 124 struct hal_rx_desc *desc) 125 { 126 return ab->hal_rx_ops->rx_desc_get_msdu_rate_mcs(desc); 127 } 128 129 static u8 ath12k_dp_rx_h_rx_bw(struct ath12k_base *ab, 130 struct hal_rx_desc *desc) 131 { 132 return ab->hal_rx_ops->rx_desc_get_msdu_rx_bw(desc); 133 } 134 135 static u32 ath12k_dp_rx_h_freq(struct ath12k_base *ab, 136 struct hal_rx_desc *desc) 137 { 138 return ab->hal_rx_ops->rx_desc_get_msdu_freq(desc); 139 } 140 141 static u8 ath12k_dp_rx_h_pkt_type(struct ath12k_base *ab, 142 struct hal_rx_desc *desc) 143 { 144 return ab->hal_rx_ops->rx_desc_get_msdu_pkt_type(desc); 145 } 146 147 static u8 ath12k_dp_rx_h_nss(struct ath12k_base *ab, 148 struct hal_rx_desc *desc) 149 { 150 return hweight8(ab->hal_rx_ops->rx_desc_get_msdu_nss(desc)); 151 } 152 153 static u8 ath12k_dp_rx_h_tid(struct ath12k_base *ab, 154 struct hal_rx_desc *desc) 155 { 156 return ab->hal_rx_ops->rx_desc_get_mpdu_tid(desc); 157 } 158 159 static u16 ath12k_dp_rx_h_peer_id(struct ath12k_base *ab, 160 struct hal_rx_desc *desc) 161 { 162 return ab->hal_rx_ops->rx_desc_get_mpdu_peer_id(desc); 163 } 164 165 u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab, 166 struct hal_rx_desc *desc) 167 { 168 return ab->hal_rx_ops->rx_desc_get_l3_pad_bytes(desc); 169 } 170 171 static bool ath12k_dp_rx_h_first_msdu(struct ath12k_base *ab, 172 struct hal_rx_desc *desc) 173 { 174 return ab->hal_rx_ops->rx_desc_get_first_msdu(desc); 175 } 176 177 static bool ath12k_dp_rx_h_last_msdu(struct ath12k_base *ab, 178 struct hal_rx_desc *desc) 179 { 180 return ab->hal_rx_ops->rx_desc_get_last_msdu(desc); 181 } 182 183 static void ath12k_dp_rx_desc_end_tlv_copy(struct ath12k_base *ab, 184 struct hal_rx_desc *fdesc, 185 struct hal_rx_desc *ldesc) 186 { 187 ab->hal_rx_ops->rx_desc_copy_end_tlv(fdesc, ldesc); 188 } 189 190 static void ath12k_dp_rxdesc_set_msdu_len(struct ath12k_base *ab, 191 struct hal_rx_desc *desc, 192 u16 len) 193 { 194 ab->hal_rx_ops->rx_desc_set_msdu_len(desc, len); 195 } 196 197 static bool ath12k_dp_rx_h_is_da_mcbc(struct ath12k_base *ab, 198 struct hal_rx_desc *desc) 199 { 200 return (ath12k_dp_rx_h_first_msdu(ab, desc) && 201 ab->hal_rx_ops->rx_desc_is_da_mcbc(desc)); 202 } 203 204 static bool ath12k_dp_rxdesc_mac_addr2_valid(struct ath12k_base *ab, 205 struct hal_rx_desc *desc) 206 { 207 return ab->hal_rx_ops->rx_desc_mac_addr2_valid(desc); 208 } 209 210 static u8 *ath12k_dp_rxdesc_get_mpdu_start_addr2(struct ath12k_base *ab, 211 struct hal_rx_desc *desc) 212 { 213 return ab->hal_rx_ops->rx_desc_mpdu_start_addr2(desc); 214 } 215 216 static void ath12k_dp_rx_desc_get_dot11_hdr(struct ath12k_base *ab, 217 struct hal_rx_desc *desc, 218 struct ieee80211_hdr *hdr) 219 { 220 ab->hal_rx_ops->rx_desc_get_dot11_hdr(desc, hdr); 221 } 222 223 static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab, 224 struct hal_rx_desc *desc, 225 u8 *crypto_hdr, 226 enum hal_encrypt_type enctype) 227 { 228 ab->hal_rx_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype); 229 } 230 231 static inline u8 ath12k_dp_rx_get_msdu_src_link(struct ath12k_base *ab, 232 struct hal_rx_desc *desc) 233 { 234 return ab->hal_rx_ops->rx_desc_get_msdu_src_link_id(desc); 235 } 236 237 static void ath12k_dp_clean_up_skb_list(struct sk_buff_head *skb_list) 238 { 239 struct sk_buff *skb; 240 241 while ((skb = __skb_dequeue(skb_list))) 242 dev_kfree_skb_any(skb); 243 } 244 245 static size_t ath12k_dp_list_cut_nodes(struct list_head *list, 246 struct list_head *head, 247 size_t count) 248 { 249 struct list_head *cur; 250 struct ath12k_rx_desc_info *rx_desc; 251 size_t nodes = 0; 252 253 if (!count) { 254 INIT_LIST_HEAD(list); 255 goto out; 256 } 257 258 list_for_each(cur, head) { 259 if (!count) 260 break; 261 262 rx_desc = list_entry(cur, struct ath12k_rx_desc_info, list); 263 rx_desc->in_use = true; 264 265 count--; 266 nodes++; 267 } 268 269 list_cut_before(list, head, cur); 270 out: 271 return nodes; 272 } 273 274 static void ath12k_dp_rx_enqueue_free(struct ath12k_dp *dp, 275 struct list_head *used_list) 276 { 277 struct ath12k_rx_desc_info *rx_desc, *safe; 278 279 /* Reset the use flag */ 280 list_for_each_entry_safe(rx_desc, safe, used_list, list) 281 rx_desc->in_use = false; 282 283 spin_lock_bh(&dp->rx_desc_lock); 284 list_splice_tail(used_list, &dp->rx_desc_free_list); 285 spin_unlock_bh(&dp->rx_desc_lock); 286 } 287 288 /* Returns number of Rx buffers replenished */ 289 int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab, 290 struct dp_rxdma_ring *rx_ring, 291 struct list_head *used_list, 292 int req_entries) 293 { 294 struct ath12k_buffer_addr *desc; 295 struct hal_srng *srng; 296 struct sk_buff *skb; 297 int num_free; 298 int num_remain; 299 u32 cookie; 300 dma_addr_t paddr; 301 struct ath12k_dp *dp = &ab->dp; 302 struct ath12k_rx_desc_info *rx_desc; 303 enum hal_rx_buf_return_buf_manager mgr = ab->hw_params->hal_params->rx_buf_rbm; 304 305 req_entries = min(req_entries, rx_ring->bufs_max); 306 307 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 308 309 spin_lock_bh(&srng->lock); 310 311 ath12k_hal_srng_access_begin(ab, srng); 312 313 num_free = ath12k_hal_srng_src_num_free(ab, srng, true); 314 if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4)) 315 req_entries = num_free; 316 317 req_entries = min(num_free, req_entries); 318 num_remain = req_entries; 319 320 if (!num_remain) 321 goto out; 322 323 /* Get the descriptor from free list */ 324 if (list_empty(used_list)) { 325 spin_lock_bh(&dp->rx_desc_lock); 326 req_entries = ath12k_dp_list_cut_nodes(used_list, 327 &dp->rx_desc_free_list, 328 num_remain); 329 spin_unlock_bh(&dp->rx_desc_lock); 330 num_remain = req_entries; 331 } 332 333 while (num_remain > 0) { 334 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + 335 DP_RX_BUFFER_ALIGN_SIZE); 336 if (!skb) 337 break; 338 339 if (!IS_ALIGNED((unsigned long)skb->data, 340 DP_RX_BUFFER_ALIGN_SIZE)) { 341 skb_pull(skb, 342 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - 343 skb->data); 344 } 345 346 paddr = dma_map_single(ab->dev, skb->data, 347 skb->len + skb_tailroom(skb), 348 DMA_FROM_DEVICE); 349 if (dma_mapping_error(ab->dev, paddr)) 350 goto fail_free_skb; 351 352 rx_desc = list_first_entry_or_null(used_list, 353 struct ath12k_rx_desc_info, 354 list); 355 if (!rx_desc) 356 goto fail_dma_unmap; 357 358 rx_desc->skb = skb; 359 cookie = rx_desc->cookie; 360 361 desc = ath12k_hal_srng_src_get_next_entry(ab, srng); 362 if (!desc) 363 goto fail_dma_unmap; 364 365 list_del(&rx_desc->list); 366 ATH12K_SKB_RXCB(skb)->paddr = paddr; 367 368 num_remain--; 369 370 ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); 371 } 372 373 goto out; 374 375 fail_dma_unmap: 376 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 377 DMA_FROM_DEVICE); 378 fail_free_skb: 379 dev_kfree_skb_any(skb); 380 out: 381 ath12k_hal_srng_access_end(ab, srng); 382 383 if (!list_empty(used_list)) 384 ath12k_dp_rx_enqueue_free(dp, used_list); 385 386 spin_unlock_bh(&srng->lock); 387 388 return req_entries - num_remain; 389 } 390 391 static int ath12k_dp_rxdma_mon_buf_ring_free(struct ath12k_base *ab, 392 struct dp_rxdma_mon_ring *rx_ring) 393 { 394 struct sk_buff *skb; 395 int buf_id; 396 397 spin_lock_bh(&rx_ring->idr_lock); 398 idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { 399 idr_remove(&rx_ring->bufs_idr, buf_id); 400 /* TODO: Understand where internal driver does this dma_unmap 401 * of rxdma_buffer. 402 */ 403 dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr, 404 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); 405 dev_kfree_skb_any(skb); 406 } 407 408 idr_destroy(&rx_ring->bufs_idr); 409 spin_unlock_bh(&rx_ring->idr_lock); 410 411 return 0; 412 } 413 414 static int ath12k_dp_rxdma_buf_free(struct ath12k_base *ab) 415 { 416 struct ath12k_dp *dp = &ab->dp; 417 418 ath12k_dp_rxdma_mon_buf_ring_free(ab, &dp->rxdma_mon_buf_ring); 419 420 return 0; 421 } 422 423 static int ath12k_dp_rxdma_mon_ring_buf_setup(struct ath12k_base *ab, 424 struct dp_rxdma_mon_ring *rx_ring, 425 u32 ringtype) 426 { 427 int num_entries; 428 429 num_entries = rx_ring->refill_buf_ring.size / 430 ath12k_hal_srng_get_entrysize(ab, ringtype); 431 432 rx_ring->bufs_max = num_entries; 433 ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries); 434 435 return 0; 436 } 437 438 static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab, 439 struct dp_rxdma_ring *rx_ring) 440 { 441 LIST_HEAD(list); 442 443 rx_ring->bufs_max = rx_ring->refill_buf_ring.size / 444 ath12k_hal_srng_get_entrysize(ab, HAL_RXDMA_BUF); 445 446 ath12k_dp_rx_bufs_replenish(ab, rx_ring, &list, 0); 447 448 return 0; 449 } 450 451 static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab) 452 { 453 struct ath12k_dp *dp = &ab->dp; 454 int ret; 455 456 ret = ath12k_dp_rxdma_ring_buf_setup(ab, &dp->rx_refill_buf_ring); 457 if (ret) { 458 ath12k_warn(ab, 459 "failed to setup HAL_RXDMA_BUF\n"); 460 return ret; 461 } 462 463 if (ab->hw_params->rxdma1_enable) { 464 ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab, 465 &dp->rxdma_mon_buf_ring, 466 HAL_RXDMA_MONITOR_BUF); 467 if (ret) { 468 ath12k_warn(ab, 469 "failed to setup HAL_RXDMA_MONITOR_BUF\n"); 470 return ret; 471 } 472 } 473 474 return 0; 475 } 476 477 static void ath12k_dp_rx_pdev_srng_free(struct ath12k *ar) 478 { 479 struct ath12k_pdev_dp *dp = &ar->dp; 480 struct ath12k_base *ab = ar->ab; 481 int i; 482 483 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) 484 ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_dst_ring[i]); 485 } 486 487 void ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base *ab) 488 { 489 struct ath12k_dp *dp = &ab->dp; 490 int i; 491 492 for (i = 0; i < DP_REO_DST_RING_MAX; i++) 493 ath12k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]); 494 } 495 496 int ath12k_dp_rx_pdev_reo_setup(struct ath12k_base *ab) 497 { 498 struct ath12k_dp *dp = &ab->dp; 499 int ret; 500 int i; 501 502 for (i = 0; i < DP_REO_DST_RING_MAX; i++) { 503 ret = ath12k_dp_srng_setup(ab, &dp->reo_dst_ring[i], 504 HAL_REO_DST, i, 0, 505 DP_REO_DST_RING_SIZE); 506 if (ret) { 507 ath12k_warn(ab, "failed to setup reo_dst_ring\n"); 508 goto err_reo_cleanup; 509 } 510 } 511 512 return 0; 513 514 err_reo_cleanup: 515 ath12k_dp_rx_pdev_reo_cleanup(ab); 516 517 return ret; 518 } 519 520 static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar) 521 { 522 struct ath12k_pdev_dp *dp = &ar->dp; 523 struct ath12k_base *ab = ar->ab; 524 int i; 525 int ret; 526 u32 mac_id = dp->mac_id; 527 528 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 529 ret = ath12k_dp_srng_setup(ar->ab, 530 &dp->rxdma_mon_dst_ring[i], 531 HAL_RXDMA_MONITOR_DST, 532 0, mac_id + i, 533 DP_RXDMA_MONITOR_DST_RING_SIZE); 534 if (ret) { 535 ath12k_warn(ar->ab, 536 "failed to setup HAL_RXDMA_MONITOR_DST\n"); 537 return ret; 538 } 539 } 540 541 return 0; 542 } 543 544 void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab) 545 { 546 struct ath12k_dp *dp = &ab->dp; 547 struct ath12k_dp_rx_reo_cmd *cmd, *tmp; 548 struct ath12k_dp_rx_reo_cache_flush_elem *cmd_cache, *tmp_cache; 549 550 spin_lock_bh(&dp->reo_cmd_lock); 551 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 552 list_del(&cmd->list); 553 dma_unmap_single(ab->dev, cmd->data.qbuf.paddr_aligned, 554 cmd->data.qbuf.size, DMA_BIDIRECTIONAL); 555 kfree(cmd->data.qbuf.vaddr); 556 kfree(cmd); 557 } 558 559 list_for_each_entry_safe(cmd_cache, tmp_cache, 560 &dp->reo_cmd_cache_flush_list, list) { 561 list_del(&cmd_cache->list); 562 dp->reo_cmd_cache_flush_count--; 563 dma_unmap_single(ab->dev, cmd_cache->data.qbuf.paddr_aligned, 564 cmd_cache->data.qbuf.size, DMA_BIDIRECTIONAL); 565 kfree(cmd_cache->data.qbuf.vaddr); 566 kfree(cmd_cache); 567 } 568 spin_unlock_bh(&dp->reo_cmd_lock); 569 } 570 571 static void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx, 572 enum hal_reo_cmd_status status) 573 { 574 struct ath12k_dp_rx_tid *rx_tid = ctx; 575 576 if (status != HAL_REO_CMD_SUCCESS) 577 ath12k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n", 578 rx_tid->tid, status); 579 580 dma_unmap_single(dp->ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size, 581 DMA_BIDIRECTIONAL); 582 kfree(rx_tid->qbuf.vaddr); 583 rx_tid->qbuf.vaddr = NULL; 584 } 585 586 static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab, struct ath12k_dp_rx_tid *rx_tid, 587 enum hal_reo_cmd_type type, 588 struct ath12k_hal_reo_cmd *cmd, 589 void (*cb)(struct ath12k_dp *dp, void *ctx, 590 enum hal_reo_cmd_status status)) 591 { 592 struct ath12k_dp *dp = &ab->dp; 593 struct ath12k_dp_rx_reo_cmd *dp_cmd; 594 struct hal_srng *cmd_ring; 595 int cmd_num; 596 597 cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id]; 598 cmd_num = ath12k_hal_reo_cmd_send(ab, cmd_ring, type, cmd); 599 600 /* cmd_num should start from 1, during failure return the error code */ 601 if (cmd_num < 0) 602 return cmd_num; 603 604 /* reo cmd ring descriptors has cmd_num starting from 1 */ 605 if (cmd_num == 0) 606 return -EINVAL; 607 608 if (!cb) 609 return 0; 610 611 /* Can this be optimized so that we keep the pending command list only 612 * for tid delete command to free up the resource on the command status 613 * indication? 614 */ 615 dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC); 616 617 if (!dp_cmd) 618 return -ENOMEM; 619 620 memcpy(&dp_cmd->data, rx_tid, sizeof(*rx_tid)); 621 dp_cmd->cmd_num = cmd_num; 622 dp_cmd->handler = cb; 623 624 spin_lock_bh(&dp->reo_cmd_lock); 625 list_add_tail(&dp_cmd->list, &dp->reo_cmd_list); 626 spin_unlock_bh(&dp->reo_cmd_lock); 627 628 return 0; 629 } 630 631 static void ath12k_dp_reo_cache_flush(struct ath12k_base *ab, 632 struct ath12k_dp_rx_tid *rx_tid) 633 { 634 struct ath12k_hal_reo_cmd cmd = {0}; 635 unsigned long tot_desc_sz, desc_sz; 636 int ret; 637 638 tot_desc_sz = rx_tid->qbuf.size; 639 desc_sz = ath12k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID); 640 641 while (tot_desc_sz > desc_sz) { 642 tot_desc_sz -= desc_sz; 643 cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned + tot_desc_sz); 644 cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); 645 ret = ath12k_dp_reo_cmd_send(ab, rx_tid, 646 HAL_REO_CMD_FLUSH_CACHE, &cmd, 647 NULL); 648 if (ret) 649 ath12k_warn(ab, 650 "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n", 651 rx_tid->tid, ret); 652 } 653 654 memset(&cmd, 0, sizeof(cmd)); 655 cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned); 656 cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); 657 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 658 ret = ath12k_dp_reo_cmd_send(ab, rx_tid, 659 HAL_REO_CMD_FLUSH_CACHE, 660 &cmd, ath12k_dp_reo_cmd_free); 661 if (ret) { 662 ath12k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n", 663 rx_tid->tid, ret); 664 dma_unmap_single(ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size, 665 DMA_BIDIRECTIONAL); 666 kfree(rx_tid->qbuf.vaddr); 667 rx_tid->qbuf.vaddr = NULL; 668 } 669 } 670 671 static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx, 672 enum hal_reo_cmd_status status) 673 { 674 struct ath12k_base *ab = dp->ab; 675 struct ath12k_dp_rx_tid *rx_tid = ctx; 676 struct ath12k_dp_rx_reo_cache_flush_elem *elem, *tmp; 677 678 if (status == HAL_REO_CMD_DRAIN) { 679 goto free_desc; 680 } else if (status != HAL_REO_CMD_SUCCESS) { 681 /* Shouldn't happen! Cleanup in case of other failure? */ 682 ath12k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n", 683 rx_tid->tid, status); 684 return; 685 } 686 687 elem = kzalloc(sizeof(*elem), GFP_ATOMIC); 688 if (!elem) 689 goto free_desc; 690 691 elem->ts = jiffies; 692 memcpy(&elem->data, rx_tid, sizeof(*rx_tid)); 693 694 spin_lock_bh(&dp->reo_cmd_lock); 695 list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list); 696 dp->reo_cmd_cache_flush_count++; 697 698 /* Flush and invalidate aged REO desc from HW cache */ 699 list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list, 700 list) { 701 if (dp->reo_cmd_cache_flush_count > ATH12K_DP_RX_REO_DESC_FREE_THRES || 702 time_after(jiffies, elem->ts + 703 msecs_to_jiffies(ATH12K_DP_RX_REO_DESC_FREE_TIMEOUT_MS))) { 704 list_del(&elem->list); 705 dp->reo_cmd_cache_flush_count--; 706 707 /* Unlock the reo_cmd_lock before using ath12k_dp_reo_cmd_send() 708 * within ath12k_dp_reo_cache_flush. The reo_cmd_cache_flush_list 709 * is used in only two contexts, one is in this function called 710 * from napi and the other in ath12k_dp_free during core destroy. 711 * Before dp_free, the irqs would be disabled and would wait to 712 * synchronize. Hence there wouldn’t be any race against add or 713 * delete to this list. Hence unlock-lock is safe here. 714 */ 715 spin_unlock_bh(&dp->reo_cmd_lock); 716 717 ath12k_dp_reo_cache_flush(ab, &elem->data); 718 kfree(elem); 719 spin_lock_bh(&dp->reo_cmd_lock); 720 } 721 } 722 spin_unlock_bh(&dp->reo_cmd_lock); 723 724 return; 725 free_desc: 726 dma_unmap_single(ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size, 727 DMA_BIDIRECTIONAL); 728 kfree(rx_tid->qbuf.vaddr); 729 rx_tid->qbuf.vaddr = NULL; 730 } 731 732 static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u16 tid, 733 dma_addr_t paddr) 734 { 735 struct ath12k_reo_queue_ref *qref; 736 struct ath12k_dp *dp = &ab->dp; 737 bool ml_peer = false; 738 739 if (!ab->hw_params->reoq_lut_support) 740 return; 741 742 if (peer_id & ATH12K_PEER_ML_ID_VALID) { 743 peer_id &= ~ATH12K_PEER_ML_ID_VALID; 744 ml_peer = true; 745 } 746 747 if (ml_peer) 748 qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr + 749 (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); 750 else 751 qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr + 752 (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); 753 754 qref->info0 = u32_encode_bits(lower_32_bits(paddr), 755 BUFFER_ADDR_INFO0_ADDR); 756 qref->info1 = u32_encode_bits(upper_32_bits(paddr), 757 BUFFER_ADDR_INFO1_ADDR) | 758 u32_encode_bits(tid, DP_REO_QREF_NUM); 759 ath12k_hal_reo_shared_qaddr_cache_clear(ab); 760 } 761 762 static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u16 tid) 763 { 764 struct ath12k_reo_queue_ref *qref; 765 struct ath12k_dp *dp = &ab->dp; 766 bool ml_peer = false; 767 768 if (!ab->hw_params->reoq_lut_support) 769 return; 770 771 if (peer_id & ATH12K_PEER_ML_ID_VALID) { 772 peer_id &= ~ATH12K_PEER_ML_ID_VALID; 773 ml_peer = true; 774 } 775 776 if (ml_peer) 777 qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr + 778 (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); 779 else 780 qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr + 781 (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); 782 783 qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR); 784 qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) | 785 u32_encode_bits(tid, DP_REO_QREF_NUM); 786 } 787 788 void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar, 789 struct ath12k_peer *peer, u8 tid) 790 { 791 struct ath12k_hal_reo_cmd cmd = {0}; 792 struct ath12k_dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 793 int ret; 794 795 if (!rx_tid->active) 796 return; 797 798 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 799 cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned); 800 cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); 801 cmd.upd0 = HAL_REO_CMD_UPD0_VLD; 802 ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid, 803 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 804 ath12k_dp_rx_tid_del_func); 805 if (ret) { 806 ath12k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n", 807 tid, ret); 808 dma_unmap_single(ar->ab->dev, rx_tid->qbuf.paddr_aligned, 809 rx_tid->qbuf.size, DMA_BIDIRECTIONAL); 810 kfree(rx_tid->qbuf.vaddr); 811 rx_tid->qbuf.vaddr = NULL; 812 } 813 814 if (peer->mlo) 815 ath12k_peer_rx_tid_qref_reset(ar->ab, peer->ml_id, tid); 816 else 817 ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid); 818 819 rx_tid->active = false; 820 } 821 822 /* TODO: it's strange (and ugly) that struct hal_reo_dest_ring is converted 823 * to struct hal_wbm_release_ring, I couldn't figure out the logic behind 824 * that. 825 */ 826 static int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab, 827 struct hal_reo_dest_ring *ring, 828 enum hal_wbm_rel_bm_act action) 829 { 830 struct hal_wbm_release_ring *link_desc = (struct hal_wbm_release_ring *)ring; 831 struct hal_wbm_release_ring *desc; 832 struct ath12k_dp *dp = &ab->dp; 833 struct hal_srng *srng; 834 int ret = 0; 835 836 srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id]; 837 838 spin_lock_bh(&srng->lock); 839 840 ath12k_hal_srng_access_begin(ab, srng); 841 842 desc = ath12k_hal_srng_src_get_next_entry(ab, srng); 843 if (!desc) { 844 ret = -ENOBUFS; 845 goto exit; 846 } 847 848 ath12k_hal_rx_msdu_link_desc_set(ab, desc, link_desc, action); 849 850 exit: 851 ath12k_hal_srng_access_end(ab, srng); 852 853 spin_unlock_bh(&srng->lock); 854 855 return ret; 856 } 857 858 static void ath12k_dp_rx_frags_cleanup(struct ath12k_dp_rx_tid *rx_tid, 859 bool rel_link_desc) 860 { 861 struct ath12k_base *ab = rx_tid->ab; 862 863 lockdep_assert_held(&ab->base_lock); 864 865 if (rx_tid->dst_ring_desc) { 866 if (rel_link_desc) 867 ath12k_dp_rx_link_desc_return(ab, rx_tid->dst_ring_desc, 868 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 869 kfree(rx_tid->dst_ring_desc); 870 rx_tid->dst_ring_desc = NULL; 871 } 872 873 rx_tid->cur_sn = 0; 874 rx_tid->last_frag_no = 0; 875 rx_tid->rx_frag_bitmap = 0; 876 __skb_queue_purge(&rx_tid->rx_frags); 877 } 878 879 void ath12k_dp_rx_peer_tid_cleanup(struct ath12k *ar, struct ath12k_peer *peer) 880 { 881 struct ath12k_dp_rx_tid *rx_tid; 882 int i; 883 884 lockdep_assert_held(&ar->ab->base_lock); 885 886 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 887 rx_tid = &peer->rx_tid[i]; 888 889 ath12k_dp_rx_peer_tid_delete(ar, peer, i); 890 ath12k_dp_rx_frags_cleanup(rx_tid, true); 891 892 spin_unlock_bh(&ar->ab->base_lock); 893 timer_delete_sync(&rx_tid->frag_timer); 894 spin_lock_bh(&ar->ab->base_lock); 895 } 896 } 897 898 static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar, 899 struct ath12k_peer *peer, 900 struct ath12k_dp_rx_tid *rx_tid, 901 u32 ba_win_sz, u16 ssn, 902 bool update_ssn) 903 { 904 struct ath12k_hal_reo_cmd cmd = {0}; 905 int ret; 906 907 cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned); 908 cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); 909 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 910 cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE; 911 cmd.ba_window_size = ba_win_sz; 912 913 if (update_ssn) { 914 cmd.upd0 |= HAL_REO_CMD_UPD0_SSN; 915 cmd.upd2 = u32_encode_bits(ssn, HAL_REO_CMD_UPD2_SSN); 916 } 917 918 ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid, 919 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 920 NULL); 921 if (ret) { 922 ath12k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n", 923 rx_tid->tid, ret); 924 return ret; 925 } 926 927 rx_tid->ba_win_sz = ba_win_sz; 928 929 return 0; 930 } 931 932 static int ath12k_dp_rx_assign_reoq(struct ath12k_base *ab, 933 struct ath12k_sta *ahsta, 934 struct ath12k_dp_rx_tid *rx_tid, 935 u16 ssn, enum hal_pn_type pn_type) 936 { 937 u32 ba_win_sz = rx_tid->ba_win_sz; 938 struct ath12k_reoq_buf *buf; 939 void *vaddr, *vaddr_aligned; 940 dma_addr_t paddr_aligned; 941 u8 tid = rx_tid->tid; 942 u32 hw_desc_sz; 943 int ret; 944 945 buf = &ahsta->reoq_bufs[tid]; 946 if (!buf->vaddr) { 947 /* TODO: Optimize the memory allocation for qos tid based on 948 * the actual BA window size in REO tid update path. 949 */ 950 if (tid == HAL_DESC_REO_NON_QOS_TID) 951 hw_desc_sz = ath12k_hal_reo_qdesc_size(ba_win_sz, tid); 952 else 953 hw_desc_sz = ath12k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid); 954 955 vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC); 956 if (!vaddr) 957 return -ENOMEM; 958 959 vaddr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN); 960 961 ath12k_hal_reo_qdesc_setup(vaddr_aligned, tid, ba_win_sz, 962 ssn, pn_type); 963 964 paddr_aligned = dma_map_single(ab->dev, vaddr_aligned, hw_desc_sz, 965 DMA_BIDIRECTIONAL); 966 ret = dma_mapping_error(ab->dev, paddr_aligned); 967 if (ret) { 968 kfree(vaddr); 969 return ret; 970 } 971 972 buf->vaddr = vaddr; 973 buf->paddr_aligned = paddr_aligned; 974 buf->size = hw_desc_sz; 975 } 976 977 rx_tid->qbuf = *buf; 978 rx_tid->active = true; 979 980 return 0; 981 } 982 983 int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id, 984 u8 tid, u32 ba_win_sz, u16 ssn, 985 enum hal_pn_type pn_type) 986 { 987 struct ath12k_base *ab = ar->ab; 988 struct ath12k_dp *dp = &ab->dp; 989 struct ath12k_peer *peer; 990 struct ath12k_sta *ahsta; 991 struct ath12k_dp_rx_tid *rx_tid; 992 dma_addr_t paddr_aligned; 993 int ret; 994 995 spin_lock_bh(&ab->base_lock); 996 997 peer = ath12k_peer_find(ab, vdev_id, peer_mac); 998 if (!peer) { 999 spin_unlock_bh(&ab->base_lock); 1000 ath12k_warn(ab, "failed to find the peer to set up rx tid\n"); 1001 return -ENOENT; 1002 } 1003 1004 if (ab->hw_params->dp_primary_link_only && 1005 !peer->primary_link) { 1006 spin_unlock_bh(&ab->base_lock); 1007 return 0; 1008 } 1009 1010 if (ab->hw_params->reoq_lut_support && 1011 (!dp->reoq_lut.vaddr || !dp->ml_reoq_lut.vaddr)) { 1012 spin_unlock_bh(&ab->base_lock); 1013 ath12k_warn(ab, "reo qref table is not setup\n"); 1014 return -EINVAL; 1015 } 1016 1017 if (peer->peer_id > DP_MAX_PEER_ID || tid > IEEE80211_NUM_TIDS) { 1018 ath12k_warn(ab, "peer id of peer %d or tid %d doesn't allow reoq setup\n", 1019 peer->peer_id, tid); 1020 spin_unlock_bh(&ab->base_lock); 1021 return -EINVAL; 1022 } 1023 1024 rx_tid = &peer->rx_tid[tid]; 1025 paddr_aligned = rx_tid->qbuf.paddr_aligned; 1026 /* Update the tid queue if it is already setup */ 1027 if (rx_tid->active) { 1028 ret = ath12k_peer_rx_tid_reo_update(ar, peer, rx_tid, 1029 ba_win_sz, ssn, true); 1030 spin_unlock_bh(&ab->base_lock); 1031 if (ret) { 1032 ath12k_warn(ab, "failed to update reo for rx tid %d\n", tid); 1033 return ret; 1034 } 1035 1036 if (!ab->hw_params->reoq_lut_support) { 1037 ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, 1038 peer_mac, 1039 paddr_aligned, tid, 1040 1, ba_win_sz); 1041 if (ret) { 1042 ath12k_warn(ab, "failed to setup peer rx reorder queuefor tid %d: %d\n", 1043 tid, ret); 1044 return ret; 1045 } 1046 } 1047 1048 return 0; 1049 } 1050 1051 rx_tid->tid = tid; 1052 1053 rx_tid->ba_win_sz = ba_win_sz; 1054 1055 ahsta = ath12k_sta_to_ahsta(peer->sta); 1056 ret = ath12k_dp_rx_assign_reoq(ab, ahsta, rx_tid, ssn, pn_type); 1057 if (ret) { 1058 spin_unlock_bh(&ab->base_lock); 1059 ath12k_warn(ab, "failed to assign reoq buf for rx tid %u\n", tid); 1060 return ret; 1061 } 1062 1063 if (ab->hw_params->reoq_lut_support) { 1064 /* Update the REO queue LUT at the corresponding peer id 1065 * and tid with qaddr. 1066 */ 1067 if (peer->mlo) 1068 ath12k_peer_rx_tid_qref_setup(ab, peer->ml_id, tid, 1069 paddr_aligned); 1070 else 1071 ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid, 1072 paddr_aligned); 1073 1074 spin_unlock_bh(&ab->base_lock); 1075 } else { 1076 spin_unlock_bh(&ab->base_lock); 1077 ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, 1078 paddr_aligned, tid, 1, 1079 ba_win_sz); 1080 } 1081 1082 return ret; 1083 } 1084 1085 int ath12k_dp_rx_ampdu_start(struct ath12k *ar, 1086 struct ieee80211_ampdu_params *params, 1087 u8 link_id) 1088 { 1089 struct ath12k_base *ab = ar->ab; 1090 struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta); 1091 struct ath12k_link_sta *arsta; 1092 int vdev_id; 1093 int ret; 1094 1095 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 1096 1097 arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy, 1098 ahsta->link[link_id]); 1099 if (!arsta) 1100 return -ENOLINK; 1101 1102 vdev_id = arsta->arvif->vdev_id; 1103 1104 ret = ath12k_dp_rx_peer_tid_setup(ar, arsta->addr, vdev_id, 1105 params->tid, params->buf_size, 1106 params->ssn, arsta->ahsta->pn_type); 1107 if (ret) 1108 ath12k_warn(ab, "failed to setup rx tid %d\n", ret); 1109 1110 return ret; 1111 } 1112 1113 int ath12k_dp_rx_ampdu_stop(struct ath12k *ar, 1114 struct ieee80211_ampdu_params *params, 1115 u8 link_id) 1116 { 1117 struct ath12k_base *ab = ar->ab; 1118 struct ath12k_peer *peer; 1119 struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta); 1120 struct ath12k_link_sta *arsta; 1121 int vdev_id; 1122 bool active; 1123 int ret; 1124 1125 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 1126 1127 arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy, 1128 ahsta->link[link_id]); 1129 if (!arsta) 1130 return -ENOLINK; 1131 1132 vdev_id = arsta->arvif->vdev_id; 1133 1134 spin_lock_bh(&ab->base_lock); 1135 1136 peer = ath12k_peer_find(ab, vdev_id, arsta->addr); 1137 if (!peer) { 1138 spin_unlock_bh(&ab->base_lock); 1139 ath12k_warn(ab, "failed to find the peer to stop rx aggregation\n"); 1140 return -ENOENT; 1141 } 1142 1143 active = peer->rx_tid[params->tid].active; 1144 1145 if (!active) { 1146 spin_unlock_bh(&ab->base_lock); 1147 return 0; 1148 } 1149 1150 ret = ath12k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false); 1151 spin_unlock_bh(&ab->base_lock); 1152 if (ret) { 1153 ath12k_warn(ab, "failed to update reo for rx tid %d: %d\n", 1154 params->tid, ret); 1155 return ret; 1156 } 1157 1158 return ret; 1159 } 1160 1161 int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif, 1162 const u8 *peer_addr, 1163 enum set_key_cmd key_cmd, 1164 struct ieee80211_key_conf *key) 1165 { 1166 struct ath12k *ar = arvif->ar; 1167 struct ath12k_base *ab = ar->ab; 1168 struct ath12k_hal_reo_cmd cmd = {0}; 1169 struct ath12k_peer *peer; 1170 struct ath12k_dp_rx_tid *rx_tid; 1171 u8 tid; 1172 int ret = 0; 1173 1174 /* NOTE: Enable PN/TSC replay check offload only for unicast frames. 1175 * We use mac80211 PN/TSC replay check functionality for bcast/mcast 1176 * for now. 1177 */ 1178 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) 1179 return 0; 1180 1181 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 1182 cmd.upd0 = HAL_REO_CMD_UPD0_PN | 1183 HAL_REO_CMD_UPD0_PN_SIZE | 1184 HAL_REO_CMD_UPD0_PN_VALID | 1185 HAL_REO_CMD_UPD0_PN_CHECK | 1186 HAL_REO_CMD_UPD0_SVLD; 1187 1188 switch (key->cipher) { 1189 case WLAN_CIPHER_SUITE_TKIP: 1190 case WLAN_CIPHER_SUITE_CCMP: 1191 case WLAN_CIPHER_SUITE_CCMP_256: 1192 case WLAN_CIPHER_SUITE_GCMP: 1193 case WLAN_CIPHER_SUITE_GCMP_256: 1194 if (key_cmd == SET_KEY) { 1195 cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK; 1196 cmd.pn_size = 48; 1197 } 1198 break; 1199 default: 1200 break; 1201 } 1202 1203 spin_lock_bh(&ab->base_lock); 1204 1205 peer = ath12k_peer_find(ab, arvif->vdev_id, peer_addr); 1206 if (!peer) { 1207 spin_unlock_bh(&ab->base_lock); 1208 ath12k_warn(ab, "failed to find the peer %pM to configure pn replay detection\n", 1209 peer_addr); 1210 return -ENOENT; 1211 } 1212 1213 for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) { 1214 rx_tid = &peer->rx_tid[tid]; 1215 if (!rx_tid->active) 1216 continue; 1217 cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned); 1218 cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); 1219 ret = ath12k_dp_reo_cmd_send(ab, rx_tid, 1220 HAL_REO_CMD_UPDATE_RX_QUEUE, 1221 &cmd, NULL); 1222 if (ret) { 1223 ath12k_warn(ab, "failed to configure rx tid %d queue of peer %pM for pn replay detection %d\n", 1224 tid, peer_addr, ret); 1225 break; 1226 } 1227 } 1228 1229 spin_unlock_bh(&ab->base_lock); 1230 1231 return ret; 1232 } 1233 1234 static int ath12k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats, 1235 u16 peer_id) 1236 { 1237 int i; 1238 1239 for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) { 1240 if (ppdu_stats->user_stats[i].is_valid_peer_id) { 1241 if (peer_id == ppdu_stats->user_stats[i].peer_id) 1242 return i; 1243 } else { 1244 return i; 1245 } 1246 } 1247 1248 return -EINVAL; 1249 } 1250 1251 static int ath12k_htt_tlv_ppdu_stats_parse(struct ath12k_base *ab, 1252 u16 tag, u16 len, const void *ptr, 1253 void *data) 1254 { 1255 const struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *ba_status; 1256 const struct htt_ppdu_stats_usr_cmpltn_cmn *cmplt_cmn; 1257 const struct htt_ppdu_stats_user_rate *user_rate; 1258 struct htt_ppdu_stats_info *ppdu_info; 1259 struct htt_ppdu_user_stats *user_stats; 1260 int cur_user; 1261 u16 peer_id; 1262 1263 ppdu_info = data; 1264 1265 switch (tag) { 1266 case HTT_PPDU_STATS_TAG_COMMON: 1267 if (len < sizeof(struct htt_ppdu_stats_common)) { 1268 ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1269 len, tag); 1270 return -EINVAL; 1271 } 1272 memcpy(&ppdu_info->ppdu_stats.common, ptr, 1273 sizeof(struct htt_ppdu_stats_common)); 1274 break; 1275 case HTT_PPDU_STATS_TAG_USR_RATE: 1276 if (len < sizeof(struct htt_ppdu_stats_user_rate)) { 1277 ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1278 len, tag); 1279 return -EINVAL; 1280 } 1281 user_rate = ptr; 1282 peer_id = le16_to_cpu(user_rate->sw_peer_id); 1283 cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1284 peer_id); 1285 if (cur_user < 0) 1286 return -EINVAL; 1287 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1288 user_stats->peer_id = peer_id; 1289 user_stats->is_valid_peer_id = true; 1290 memcpy(&user_stats->rate, ptr, 1291 sizeof(struct htt_ppdu_stats_user_rate)); 1292 user_stats->tlv_flags |= BIT(tag); 1293 break; 1294 case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON: 1295 if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) { 1296 ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1297 len, tag); 1298 return -EINVAL; 1299 } 1300 1301 cmplt_cmn = ptr; 1302 peer_id = le16_to_cpu(cmplt_cmn->sw_peer_id); 1303 cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1304 peer_id); 1305 if (cur_user < 0) 1306 return -EINVAL; 1307 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1308 user_stats->peer_id = peer_id; 1309 user_stats->is_valid_peer_id = true; 1310 memcpy(&user_stats->cmpltn_cmn, ptr, 1311 sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)); 1312 user_stats->tlv_flags |= BIT(tag); 1313 break; 1314 case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS: 1315 if (len < 1316 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) { 1317 ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1318 len, tag); 1319 return -EINVAL; 1320 } 1321 1322 ba_status = ptr; 1323 peer_id = le16_to_cpu(ba_status->sw_peer_id); 1324 cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1325 peer_id); 1326 if (cur_user < 0) 1327 return -EINVAL; 1328 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1329 user_stats->peer_id = peer_id; 1330 user_stats->is_valid_peer_id = true; 1331 memcpy(&user_stats->ack_ba, ptr, 1332 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)); 1333 user_stats->tlv_flags |= BIT(tag); 1334 break; 1335 } 1336 return 0; 1337 } 1338 1339 int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len, 1340 int (*iter)(struct ath12k_base *ar, u16 tag, u16 len, 1341 const void *ptr, void *data), 1342 void *data) 1343 { 1344 const struct htt_tlv *tlv; 1345 const void *begin = ptr; 1346 u16 tlv_tag, tlv_len; 1347 int ret = -EINVAL; 1348 1349 while (len > 0) { 1350 if (len < sizeof(*tlv)) { 1351 ath12k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", 1352 ptr - begin, len, sizeof(*tlv)); 1353 return -EINVAL; 1354 } 1355 tlv = (struct htt_tlv *)ptr; 1356 tlv_tag = le32_get_bits(tlv->header, HTT_TLV_TAG); 1357 tlv_len = le32_get_bits(tlv->header, HTT_TLV_LEN); 1358 ptr += sizeof(*tlv); 1359 len -= sizeof(*tlv); 1360 1361 if (tlv_len > len) { 1362 ath12k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n", 1363 tlv_tag, ptr - begin, len, tlv_len); 1364 return -EINVAL; 1365 } 1366 ret = iter(ab, tlv_tag, tlv_len, ptr, data); 1367 if (ret == -ENOMEM) 1368 return ret; 1369 1370 ptr += tlv_len; 1371 len -= tlv_len; 1372 } 1373 return 0; 1374 } 1375 1376 static void 1377 ath12k_update_per_peer_tx_stats(struct ath12k *ar, 1378 struct htt_ppdu_stats *ppdu_stats, u8 user) 1379 { 1380 struct ath12k_base *ab = ar->ab; 1381 struct ath12k_peer *peer; 1382 struct ieee80211_sta *sta; 1383 struct ath12k_sta *ahsta; 1384 struct ath12k_link_sta *arsta; 1385 struct htt_ppdu_stats_user_rate *user_rate; 1386 struct ath12k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats; 1387 struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user]; 1388 struct htt_ppdu_stats_common *common = &ppdu_stats->common; 1389 int ret; 1390 u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0; 1391 u32 v, succ_bytes = 0; 1392 u16 tones, rate = 0, succ_pkts = 0; 1393 u32 tx_duration = 0; 1394 u8 tid = HTT_PPDU_STATS_NON_QOS_TID; 1395 bool is_ampdu = false; 1396 1397 if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE))) 1398 return; 1399 1400 if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) 1401 is_ampdu = 1402 HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags); 1403 1404 if (usr_stats->tlv_flags & 1405 BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) { 1406 succ_bytes = le32_to_cpu(usr_stats->ack_ba.success_bytes); 1407 succ_pkts = le32_get_bits(usr_stats->ack_ba.info, 1408 HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M); 1409 tid = le32_get_bits(usr_stats->ack_ba.info, 1410 HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM); 1411 } 1412 1413 if (common->fes_duration_us) 1414 tx_duration = le32_to_cpu(common->fes_duration_us); 1415 1416 user_rate = &usr_stats->rate; 1417 flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags); 1418 bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2; 1419 nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1; 1420 mcs = HTT_USR_RATE_MCS(user_rate->rate_flags); 1421 sgi = HTT_USR_RATE_GI(user_rate->rate_flags); 1422 dcm = HTT_USR_RATE_DCM(user_rate->rate_flags); 1423 1424 /* Note: If host configured fixed rates and in some other special 1425 * cases, the broadcast/management frames are sent in different rates. 1426 * Firmware rate's control to be skipped for this? 1427 */ 1428 1429 if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH12K_HE_MCS_MAX) { 1430 ath12k_warn(ab, "Invalid HE mcs %d peer stats", mcs); 1431 return; 1432 } 1433 1434 if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH12K_VHT_MCS_MAX) { 1435 ath12k_warn(ab, "Invalid VHT mcs %d peer stats", mcs); 1436 return; 1437 } 1438 1439 if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH12K_HT_MCS_MAX || nss < 1)) { 1440 ath12k_warn(ab, "Invalid HT mcs %d nss %d peer stats", 1441 mcs, nss); 1442 return; 1443 } 1444 1445 if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) { 1446 ret = ath12k_mac_hw_ratecode_to_legacy_rate(mcs, 1447 flags, 1448 &rate_idx, 1449 &rate); 1450 if (ret < 0) 1451 return; 1452 } 1453 1454 rcu_read_lock(); 1455 spin_lock_bh(&ab->base_lock); 1456 peer = ath12k_peer_find_by_id(ab, usr_stats->peer_id); 1457 1458 if (!peer || !peer->sta) { 1459 spin_unlock_bh(&ab->base_lock); 1460 rcu_read_unlock(); 1461 return; 1462 } 1463 1464 sta = peer->sta; 1465 ahsta = ath12k_sta_to_ahsta(sta); 1466 arsta = &ahsta->deflink; 1467 1468 memset(&arsta->txrate, 0, sizeof(arsta->txrate)); 1469 1470 switch (flags) { 1471 case WMI_RATE_PREAMBLE_OFDM: 1472 arsta->txrate.legacy = rate; 1473 break; 1474 case WMI_RATE_PREAMBLE_CCK: 1475 arsta->txrate.legacy = rate; 1476 break; 1477 case WMI_RATE_PREAMBLE_HT: 1478 arsta->txrate.mcs = mcs + 8 * (nss - 1); 1479 arsta->txrate.flags = RATE_INFO_FLAGS_MCS; 1480 if (sgi) 1481 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1482 break; 1483 case WMI_RATE_PREAMBLE_VHT: 1484 arsta->txrate.mcs = mcs; 1485 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; 1486 if (sgi) 1487 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1488 break; 1489 case WMI_RATE_PREAMBLE_HE: 1490 arsta->txrate.mcs = mcs; 1491 arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS; 1492 arsta->txrate.he_dcm = dcm; 1493 arsta->txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi); 1494 tones = le16_to_cpu(user_rate->ru_end) - 1495 le16_to_cpu(user_rate->ru_start) + 1; 1496 v = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(tones); 1497 arsta->txrate.he_ru_alloc = v; 1498 break; 1499 } 1500 1501 arsta->txrate.nss = nss; 1502 arsta->txrate.bw = ath12k_mac_bw_to_mac80211_bw(bw); 1503 arsta->tx_duration += tx_duration; 1504 memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info)); 1505 1506 /* PPDU stats reported for mgmt packet doesn't have valid tx bytes. 1507 * So skip peer stats update for mgmt packets. 1508 */ 1509 if (tid < HTT_PPDU_STATS_NON_QOS_TID) { 1510 memset(peer_stats, 0, sizeof(*peer_stats)); 1511 peer_stats->succ_pkts = succ_pkts; 1512 peer_stats->succ_bytes = succ_bytes; 1513 peer_stats->is_ampdu = is_ampdu; 1514 peer_stats->duration = tx_duration; 1515 peer_stats->ba_fails = 1516 HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) + 1517 HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags); 1518 } 1519 1520 spin_unlock_bh(&ab->base_lock); 1521 rcu_read_unlock(); 1522 } 1523 1524 static void ath12k_htt_update_ppdu_stats(struct ath12k *ar, 1525 struct htt_ppdu_stats *ppdu_stats) 1526 { 1527 u8 user; 1528 1529 for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++) 1530 ath12k_update_per_peer_tx_stats(ar, ppdu_stats, user); 1531 } 1532 1533 static 1534 struct htt_ppdu_stats_info *ath12k_dp_htt_get_ppdu_desc(struct ath12k *ar, 1535 u32 ppdu_id) 1536 { 1537 struct htt_ppdu_stats_info *ppdu_info; 1538 1539 lockdep_assert_held(&ar->data_lock); 1540 if (!list_empty(&ar->ppdu_stats_info)) { 1541 list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) { 1542 if (ppdu_info->ppdu_id == ppdu_id) 1543 return ppdu_info; 1544 } 1545 1546 if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) { 1547 ppdu_info = list_first_entry(&ar->ppdu_stats_info, 1548 typeof(*ppdu_info), list); 1549 list_del(&ppdu_info->list); 1550 ar->ppdu_stat_list_depth--; 1551 ath12k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats); 1552 kfree(ppdu_info); 1553 } 1554 } 1555 1556 ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC); 1557 if (!ppdu_info) 1558 return NULL; 1559 1560 list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info); 1561 ar->ppdu_stat_list_depth++; 1562 1563 return ppdu_info; 1564 } 1565 1566 static void ath12k_copy_to_delay_stats(struct ath12k_peer *peer, 1567 struct htt_ppdu_user_stats *usr_stats) 1568 { 1569 peer->ppdu_stats_delayba.sw_peer_id = le16_to_cpu(usr_stats->rate.sw_peer_id); 1570 peer->ppdu_stats_delayba.info0 = le32_to_cpu(usr_stats->rate.info0); 1571 peer->ppdu_stats_delayba.ru_end = le16_to_cpu(usr_stats->rate.ru_end); 1572 peer->ppdu_stats_delayba.ru_start = le16_to_cpu(usr_stats->rate.ru_start); 1573 peer->ppdu_stats_delayba.info1 = le32_to_cpu(usr_stats->rate.info1); 1574 peer->ppdu_stats_delayba.rate_flags = le32_to_cpu(usr_stats->rate.rate_flags); 1575 peer->ppdu_stats_delayba.resp_rate_flags = 1576 le32_to_cpu(usr_stats->rate.resp_rate_flags); 1577 1578 peer->delayba_flag = true; 1579 } 1580 1581 static void ath12k_copy_to_bar(struct ath12k_peer *peer, 1582 struct htt_ppdu_user_stats *usr_stats) 1583 { 1584 usr_stats->rate.sw_peer_id = cpu_to_le16(peer->ppdu_stats_delayba.sw_peer_id); 1585 usr_stats->rate.info0 = cpu_to_le32(peer->ppdu_stats_delayba.info0); 1586 usr_stats->rate.ru_end = cpu_to_le16(peer->ppdu_stats_delayba.ru_end); 1587 usr_stats->rate.ru_start = cpu_to_le16(peer->ppdu_stats_delayba.ru_start); 1588 usr_stats->rate.info1 = cpu_to_le32(peer->ppdu_stats_delayba.info1); 1589 usr_stats->rate.rate_flags = cpu_to_le32(peer->ppdu_stats_delayba.rate_flags); 1590 usr_stats->rate.resp_rate_flags = 1591 cpu_to_le32(peer->ppdu_stats_delayba.resp_rate_flags); 1592 1593 peer->delayba_flag = false; 1594 } 1595 1596 static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab, 1597 struct sk_buff *skb) 1598 { 1599 struct ath12k_htt_ppdu_stats_msg *msg; 1600 struct htt_ppdu_stats_info *ppdu_info; 1601 struct ath12k_peer *peer = NULL; 1602 struct htt_ppdu_user_stats *usr_stats = NULL; 1603 u32 peer_id = 0; 1604 struct ath12k *ar; 1605 int ret, i; 1606 u8 pdev_id; 1607 u32 ppdu_id, len; 1608 1609 msg = (struct ath12k_htt_ppdu_stats_msg *)skb->data; 1610 len = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE); 1611 if (len > (skb->len - struct_size(msg, data, 0))) { 1612 ath12k_warn(ab, 1613 "HTT PPDU STATS event has unexpected payload size %u, should be smaller than %u\n", 1614 len, skb->len); 1615 return -EINVAL; 1616 } 1617 1618 pdev_id = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PDEV_ID); 1619 ppdu_id = le32_to_cpu(msg->ppdu_id); 1620 1621 rcu_read_lock(); 1622 ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id); 1623 if (!ar) { 1624 ret = -EINVAL; 1625 goto exit; 1626 } 1627 1628 spin_lock_bh(&ar->data_lock); 1629 ppdu_info = ath12k_dp_htt_get_ppdu_desc(ar, ppdu_id); 1630 if (!ppdu_info) { 1631 spin_unlock_bh(&ar->data_lock); 1632 ret = -EINVAL; 1633 goto exit; 1634 } 1635 1636 ppdu_info->ppdu_id = ppdu_id; 1637 ret = ath12k_dp_htt_tlv_iter(ab, msg->data, len, 1638 ath12k_htt_tlv_ppdu_stats_parse, 1639 (void *)ppdu_info); 1640 if (ret) { 1641 spin_unlock_bh(&ar->data_lock); 1642 ath12k_warn(ab, "Failed to parse tlv %d\n", ret); 1643 goto exit; 1644 } 1645 1646 if (ppdu_info->ppdu_stats.common.num_users >= HTT_PPDU_STATS_MAX_USERS) { 1647 spin_unlock_bh(&ar->data_lock); 1648 ath12k_warn(ab, 1649 "HTT PPDU STATS event has unexpected num_users %u, should be smaller than %u\n", 1650 ppdu_info->ppdu_stats.common.num_users, 1651 HTT_PPDU_STATS_MAX_USERS); 1652 ret = -EINVAL; 1653 goto exit; 1654 } 1655 1656 /* back up data rate tlv for all peers */ 1657 if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_DATA && 1658 (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON)) && 1659 ppdu_info->delay_ba) { 1660 for (i = 0; i < ppdu_info->ppdu_stats.common.num_users; i++) { 1661 peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id; 1662 spin_lock_bh(&ab->base_lock); 1663 peer = ath12k_peer_find_by_id(ab, peer_id); 1664 if (!peer) { 1665 spin_unlock_bh(&ab->base_lock); 1666 continue; 1667 } 1668 1669 usr_stats = &ppdu_info->ppdu_stats.user_stats[i]; 1670 if (usr_stats->delay_ba) 1671 ath12k_copy_to_delay_stats(peer, usr_stats); 1672 spin_unlock_bh(&ab->base_lock); 1673 } 1674 } 1675 1676 /* restore all peers' data rate tlv to mu-bar tlv */ 1677 if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_BAR && 1678 (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON))) { 1679 for (i = 0; i < ppdu_info->bar_num_users; i++) { 1680 peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id; 1681 spin_lock_bh(&ab->base_lock); 1682 peer = ath12k_peer_find_by_id(ab, peer_id); 1683 if (!peer) { 1684 spin_unlock_bh(&ab->base_lock); 1685 continue; 1686 } 1687 1688 usr_stats = &ppdu_info->ppdu_stats.user_stats[i]; 1689 if (peer->delayba_flag) 1690 ath12k_copy_to_bar(peer, usr_stats); 1691 spin_unlock_bh(&ab->base_lock); 1692 } 1693 } 1694 1695 spin_unlock_bh(&ar->data_lock); 1696 1697 exit: 1698 rcu_read_unlock(); 1699 1700 return ret; 1701 } 1702 1703 static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab, 1704 struct sk_buff *skb) 1705 { 1706 struct ath12k_htt_mlo_offset_msg *msg; 1707 struct ath12k_pdev *pdev; 1708 struct ath12k *ar; 1709 u8 pdev_id; 1710 1711 msg = (struct ath12k_htt_mlo_offset_msg *)skb->data; 1712 pdev_id = u32_get_bits(__le32_to_cpu(msg->info), 1713 HTT_T2H_MLO_OFFSET_INFO_PDEV_ID); 1714 1715 rcu_read_lock(); 1716 ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id); 1717 if (!ar) { 1718 /* It is possible that the ar is not yet active (started). 1719 * The above function will only look for the active pdev 1720 * and hence %NULL return is possible. Just silently 1721 * discard this message 1722 */ 1723 goto exit; 1724 } 1725 1726 spin_lock_bh(&ar->data_lock); 1727 pdev = ar->pdev; 1728 1729 pdev->timestamp.info = __le32_to_cpu(msg->info); 1730 pdev->timestamp.sync_timestamp_lo_us = __le32_to_cpu(msg->sync_timestamp_lo_us); 1731 pdev->timestamp.sync_timestamp_hi_us = __le32_to_cpu(msg->sync_timestamp_hi_us); 1732 pdev->timestamp.mlo_offset_lo = __le32_to_cpu(msg->mlo_offset_lo); 1733 pdev->timestamp.mlo_offset_hi = __le32_to_cpu(msg->mlo_offset_hi); 1734 pdev->timestamp.mlo_offset_clks = __le32_to_cpu(msg->mlo_offset_clks); 1735 pdev->timestamp.mlo_comp_clks = __le32_to_cpu(msg->mlo_comp_clks); 1736 pdev->timestamp.mlo_comp_timer = __le32_to_cpu(msg->mlo_comp_timer); 1737 1738 spin_unlock_bh(&ar->data_lock); 1739 exit: 1740 rcu_read_unlock(); 1741 } 1742 1743 void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab, 1744 struct sk_buff *skb) 1745 { 1746 struct ath12k_dp *dp = &ab->dp; 1747 struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data; 1748 enum htt_t2h_msg_type type; 1749 u16 peer_id; 1750 u8 vdev_id; 1751 u8 mac_addr[ETH_ALEN]; 1752 u16 peer_mac_h16; 1753 u16 ast_hash = 0; 1754 u16 hw_peer_id; 1755 1756 type = le32_get_bits(resp->version_msg.version, HTT_T2H_MSG_TYPE); 1757 1758 ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type); 1759 1760 switch (type) { 1761 case HTT_T2H_MSG_TYPE_VERSION_CONF: 1762 dp->htt_tgt_ver_major = le32_get_bits(resp->version_msg.version, 1763 HTT_T2H_VERSION_CONF_MAJOR); 1764 dp->htt_tgt_ver_minor = le32_get_bits(resp->version_msg.version, 1765 HTT_T2H_VERSION_CONF_MINOR); 1766 complete(&dp->htt_tgt_version_received); 1767 break; 1768 /* TODO: remove unused peer map versions after testing */ 1769 case HTT_T2H_MSG_TYPE_PEER_MAP: 1770 vdev_id = le32_get_bits(resp->peer_map_ev.info, 1771 HTT_T2H_PEER_MAP_INFO_VDEV_ID); 1772 peer_id = le32_get_bits(resp->peer_map_ev.info, 1773 HTT_T2H_PEER_MAP_INFO_PEER_ID); 1774 peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1, 1775 HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16); 1776 ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32), 1777 peer_mac_h16, mac_addr); 1778 ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0); 1779 break; 1780 case HTT_T2H_MSG_TYPE_PEER_MAP2: 1781 vdev_id = le32_get_bits(resp->peer_map_ev.info, 1782 HTT_T2H_PEER_MAP_INFO_VDEV_ID); 1783 peer_id = le32_get_bits(resp->peer_map_ev.info, 1784 HTT_T2H_PEER_MAP_INFO_PEER_ID); 1785 peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1, 1786 HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16); 1787 ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32), 1788 peer_mac_h16, mac_addr); 1789 ast_hash = le32_get_bits(resp->peer_map_ev.info2, 1790 HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL); 1791 hw_peer_id = le32_get_bits(resp->peer_map_ev.info1, 1792 HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID); 1793 ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash, 1794 hw_peer_id); 1795 break; 1796 case HTT_T2H_MSG_TYPE_PEER_MAP3: 1797 vdev_id = le32_get_bits(resp->peer_map_ev.info, 1798 HTT_T2H_PEER_MAP_INFO_VDEV_ID); 1799 peer_id = le32_get_bits(resp->peer_map_ev.info, 1800 HTT_T2H_PEER_MAP_INFO_PEER_ID); 1801 peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1, 1802 HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16); 1803 ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32), 1804 peer_mac_h16, mac_addr); 1805 ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash, 1806 peer_id); 1807 break; 1808 case HTT_T2H_MSG_TYPE_PEER_UNMAP: 1809 case HTT_T2H_MSG_TYPE_PEER_UNMAP2: 1810 peer_id = le32_get_bits(resp->peer_unmap_ev.info, 1811 HTT_T2H_PEER_UNMAP_INFO_PEER_ID); 1812 ath12k_peer_unmap_event(ab, peer_id); 1813 break; 1814 case HTT_T2H_MSG_TYPE_PPDU_STATS_IND: 1815 ath12k_htt_pull_ppdu_stats(ab, skb); 1816 break; 1817 case HTT_T2H_MSG_TYPE_EXT_STATS_CONF: 1818 ath12k_debugfs_htt_ext_stats_handler(ab, skb); 1819 break; 1820 case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND: 1821 ath12k_htt_mlo_offset_event_handler(ab, skb); 1822 break; 1823 default: 1824 ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt event %d not handled\n", 1825 type); 1826 break; 1827 } 1828 1829 dev_kfree_skb_any(skb); 1830 } 1831 1832 static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar, 1833 struct sk_buff_head *msdu_list, 1834 struct sk_buff *first, struct sk_buff *last, 1835 u8 l3pad_bytes, int msdu_len) 1836 { 1837 struct ath12k_base *ab = ar->ab; 1838 struct sk_buff *skb; 1839 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first); 1840 int buf_first_hdr_len, buf_first_len; 1841 struct hal_rx_desc *ldesc; 1842 int space_extra, rem_len, buf_len; 1843 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 1844 bool is_continuation; 1845 1846 /* As the msdu is spread across multiple rx buffers, 1847 * find the offset to the start of msdu for computing 1848 * the length of the msdu in the first buffer. 1849 */ 1850 buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes; 1851 buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len; 1852 1853 if (WARN_ON_ONCE(msdu_len <= buf_first_len)) { 1854 skb_put(first, buf_first_hdr_len + msdu_len); 1855 skb_pull(first, buf_first_hdr_len); 1856 return 0; 1857 } 1858 1859 ldesc = (struct hal_rx_desc *)last->data; 1860 rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, ldesc); 1861 rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, ldesc); 1862 1863 /* MSDU spans over multiple buffers because the length of the MSDU 1864 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data 1865 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. 1866 */ 1867 skb_put(first, DP_RX_BUFFER_SIZE); 1868 skb_pull(first, buf_first_hdr_len); 1869 1870 /* When an MSDU spread over multiple buffers MSDU_END 1871 * tlvs are valid only in the last buffer. Copy those tlvs. 1872 */ 1873 ath12k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc); 1874 1875 space_extra = msdu_len - (buf_first_len + skb_tailroom(first)); 1876 if (space_extra > 0 && 1877 (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) { 1878 /* Free up all buffers of the MSDU */ 1879 while ((skb = __skb_dequeue(msdu_list)) != NULL) { 1880 rxcb = ATH12K_SKB_RXCB(skb); 1881 if (!rxcb->is_continuation) { 1882 dev_kfree_skb_any(skb); 1883 break; 1884 } 1885 dev_kfree_skb_any(skb); 1886 } 1887 return -ENOMEM; 1888 } 1889 1890 rem_len = msdu_len - buf_first_len; 1891 while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) { 1892 rxcb = ATH12K_SKB_RXCB(skb); 1893 is_continuation = rxcb->is_continuation; 1894 if (is_continuation) 1895 buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz; 1896 else 1897 buf_len = rem_len; 1898 1899 if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) { 1900 WARN_ON_ONCE(1); 1901 dev_kfree_skb_any(skb); 1902 return -EINVAL; 1903 } 1904 1905 skb_put(skb, buf_len + hal_rx_desc_sz); 1906 skb_pull(skb, hal_rx_desc_sz); 1907 skb_copy_from_linear_data(skb, skb_put(first, buf_len), 1908 buf_len); 1909 dev_kfree_skb_any(skb); 1910 1911 rem_len -= buf_len; 1912 if (!is_continuation) 1913 break; 1914 } 1915 1916 return 0; 1917 } 1918 1919 static struct sk_buff *ath12k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list, 1920 struct sk_buff *first) 1921 { 1922 struct sk_buff *skb; 1923 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first); 1924 1925 if (!rxcb->is_continuation) 1926 return first; 1927 1928 skb_queue_walk(msdu_list, skb) { 1929 rxcb = ATH12K_SKB_RXCB(skb); 1930 if (!rxcb->is_continuation) 1931 return skb; 1932 } 1933 1934 return NULL; 1935 } 1936 1937 static void ath12k_dp_rx_h_csum_offload(struct sk_buff *msdu, 1938 struct ath12k_dp_rx_info *rx_info) 1939 { 1940 msdu->ip_summed = (rx_info->ip_csum_fail || rx_info->l4_csum_fail) ? 1941 CHECKSUM_NONE : CHECKSUM_UNNECESSARY; 1942 } 1943 1944 int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar, enum hal_encrypt_type enctype) 1945 { 1946 switch (enctype) { 1947 case HAL_ENCRYPT_TYPE_OPEN: 1948 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1949 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1950 return 0; 1951 case HAL_ENCRYPT_TYPE_CCMP_128: 1952 return IEEE80211_CCMP_MIC_LEN; 1953 case HAL_ENCRYPT_TYPE_CCMP_256: 1954 return IEEE80211_CCMP_256_MIC_LEN; 1955 case HAL_ENCRYPT_TYPE_GCMP_128: 1956 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1957 return IEEE80211_GCMP_MIC_LEN; 1958 case HAL_ENCRYPT_TYPE_WEP_40: 1959 case HAL_ENCRYPT_TYPE_WEP_104: 1960 case HAL_ENCRYPT_TYPE_WEP_128: 1961 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1962 case HAL_ENCRYPT_TYPE_WAPI: 1963 break; 1964 } 1965 1966 ath12k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype); 1967 return 0; 1968 } 1969 1970 static int ath12k_dp_rx_crypto_param_len(struct ath12k *ar, 1971 enum hal_encrypt_type enctype) 1972 { 1973 switch (enctype) { 1974 case HAL_ENCRYPT_TYPE_OPEN: 1975 return 0; 1976 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1977 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1978 return IEEE80211_TKIP_IV_LEN; 1979 case HAL_ENCRYPT_TYPE_CCMP_128: 1980 return IEEE80211_CCMP_HDR_LEN; 1981 case HAL_ENCRYPT_TYPE_CCMP_256: 1982 return IEEE80211_CCMP_256_HDR_LEN; 1983 case HAL_ENCRYPT_TYPE_GCMP_128: 1984 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1985 return IEEE80211_GCMP_HDR_LEN; 1986 case HAL_ENCRYPT_TYPE_WEP_40: 1987 case HAL_ENCRYPT_TYPE_WEP_104: 1988 case HAL_ENCRYPT_TYPE_WEP_128: 1989 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1990 case HAL_ENCRYPT_TYPE_WAPI: 1991 break; 1992 } 1993 1994 ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 1995 return 0; 1996 } 1997 1998 static int ath12k_dp_rx_crypto_icv_len(struct ath12k *ar, 1999 enum hal_encrypt_type enctype) 2000 { 2001 switch (enctype) { 2002 case HAL_ENCRYPT_TYPE_OPEN: 2003 case HAL_ENCRYPT_TYPE_CCMP_128: 2004 case HAL_ENCRYPT_TYPE_CCMP_256: 2005 case HAL_ENCRYPT_TYPE_GCMP_128: 2006 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 2007 return 0; 2008 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 2009 case HAL_ENCRYPT_TYPE_TKIP_MIC: 2010 return IEEE80211_TKIP_ICV_LEN; 2011 case HAL_ENCRYPT_TYPE_WEP_40: 2012 case HAL_ENCRYPT_TYPE_WEP_104: 2013 case HAL_ENCRYPT_TYPE_WEP_128: 2014 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 2015 case HAL_ENCRYPT_TYPE_WAPI: 2016 break; 2017 } 2018 2019 ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 2020 return 0; 2021 } 2022 2023 static void ath12k_dp_rx_h_undecap_nwifi(struct ath12k *ar, 2024 struct sk_buff *msdu, 2025 enum hal_encrypt_type enctype, 2026 struct ieee80211_rx_status *status) 2027 { 2028 struct ath12k_base *ab = ar->ab; 2029 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 2030 u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN]; 2031 struct ieee80211_hdr *hdr; 2032 size_t hdr_len; 2033 u8 *crypto_hdr; 2034 u16 qos_ctl; 2035 2036 /* pull decapped header */ 2037 hdr = (struct ieee80211_hdr *)msdu->data; 2038 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2039 skb_pull(msdu, hdr_len); 2040 2041 /* Rebuild qos header */ 2042 hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 2043 2044 /* Reset the order bit as the HT_Control header is stripped */ 2045 hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER)); 2046 2047 qos_ctl = rxcb->tid; 2048 2049 if (ath12k_dp_rx_h_mesh_ctl_present(ab, rxcb->rx_desc)) 2050 qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT; 2051 2052 /* TODO: Add other QoS ctl fields when required */ 2053 2054 /* copy decap header before overwriting for reuse below */ 2055 memcpy(decap_hdr, hdr, hdr_len); 2056 2057 /* Rebuild crypto header for mac80211 use */ 2058 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 2059 crypto_hdr = skb_push(msdu, ath12k_dp_rx_crypto_param_len(ar, enctype)); 2060 ath12k_dp_rx_desc_get_crypto_header(ar->ab, 2061 rxcb->rx_desc, crypto_hdr, 2062 enctype); 2063 } 2064 2065 memcpy(skb_push(msdu, 2066 IEEE80211_QOS_CTL_LEN), &qos_ctl, 2067 IEEE80211_QOS_CTL_LEN); 2068 memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len); 2069 } 2070 2071 static void ath12k_dp_rx_h_undecap_raw(struct ath12k *ar, struct sk_buff *msdu, 2072 enum hal_encrypt_type enctype, 2073 struct ieee80211_rx_status *status, 2074 bool decrypted) 2075 { 2076 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 2077 struct ieee80211_hdr *hdr; 2078 size_t hdr_len; 2079 size_t crypto_len; 2080 2081 if (!rxcb->is_first_msdu || 2082 !(rxcb->is_first_msdu && rxcb->is_last_msdu)) { 2083 WARN_ON_ONCE(1); 2084 return; 2085 } 2086 2087 skb_trim(msdu, msdu->len - FCS_LEN); 2088 2089 if (!decrypted) 2090 return; 2091 2092 hdr = (void *)msdu->data; 2093 2094 /* Tail */ 2095 if (status->flag & RX_FLAG_IV_STRIPPED) { 2096 skb_trim(msdu, msdu->len - 2097 ath12k_dp_rx_crypto_mic_len(ar, enctype)); 2098 2099 skb_trim(msdu, msdu->len - 2100 ath12k_dp_rx_crypto_icv_len(ar, enctype)); 2101 } else { 2102 /* MIC */ 2103 if (status->flag & RX_FLAG_MIC_STRIPPED) 2104 skb_trim(msdu, msdu->len - 2105 ath12k_dp_rx_crypto_mic_len(ar, enctype)); 2106 2107 /* ICV */ 2108 if (status->flag & RX_FLAG_ICV_STRIPPED) 2109 skb_trim(msdu, msdu->len - 2110 ath12k_dp_rx_crypto_icv_len(ar, enctype)); 2111 } 2112 2113 /* MMIC */ 2114 if ((status->flag & RX_FLAG_MMIC_STRIPPED) && 2115 !ieee80211_has_morefrags(hdr->frame_control) && 2116 enctype == HAL_ENCRYPT_TYPE_TKIP_MIC) 2117 skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN); 2118 2119 /* Head */ 2120 if (status->flag & RX_FLAG_IV_STRIPPED) { 2121 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2122 crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype); 2123 2124 memmove(msdu->data + crypto_len, msdu->data, hdr_len); 2125 skb_pull(msdu, crypto_len); 2126 } 2127 } 2128 2129 static void ath12k_get_dot11_hdr_from_rx_desc(struct ath12k *ar, 2130 struct sk_buff *msdu, 2131 struct ath12k_skb_rxcb *rxcb, 2132 struct ieee80211_rx_status *status, 2133 enum hal_encrypt_type enctype) 2134 { 2135 struct hal_rx_desc *rx_desc = rxcb->rx_desc; 2136 struct ath12k_base *ab = ar->ab; 2137 size_t hdr_len, crypto_len; 2138 struct ieee80211_hdr hdr; 2139 __le16 qos_ctl; 2140 u8 *crypto_hdr, mesh_ctrl; 2141 2142 ath12k_dp_rx_desc_get_dot11_hdr(ab, rx_desc, &hdr); 2143 hdr_len = ieee80211_hdrlen(hdr.frame_control); 2144 mesh_ctrl = ath12k_dp_rx_h_mesh_ctl_present(ab, rx_desc); 2145 2146 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 2147 crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype); 2148 crypto_hdr = skb_push(msdu, crypto_len); 2149 ath12k_dp_rx_desc_get_crypto_header(ab, rx_desc, crypto_hdr, enctype); 2150 } 2151 2152 skb_push(msdu, hdr_len); 2153 memcpy(msdu->data, &hdr, min(hdr_len, sizeof(hdr))); 2154 2155 if (rxcb->is_mcbc) 2156 status->flag &= ~RX_FLAG_PN_VALIDATED; 2157 2158 /* Add QOS header */ 2159 if (ieee80211_is_data_qos(hdr.frame_control)) { 2160 struct ieee80211_hdr *qos_ptr = (struct ieee80211_hdr *)msdu->data; 2161 2162 qos_ctl = cpu_to_le16(rxcb->tid & IEEE80211_QOS_CTL_TID_MASK); 2163 if (mesh_ctrl) 2164 qos_ctl |= cpu_to_le16(IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT); 2165 2166 memcpy(ieee80211_get_qos_ctl(qos_ptr), &qos_ctl, IEEE80211_QOS_CTL_LEN); 2167 } 2168 } 2169 2170 static void ath12k_dp_rx_h_undecap_eth(struct ath12k *ar, 2171 struct sk_buff *msdu, 2172 enum hal_encrypt_type enctype, 2173 struct ieee80211_rx_status *status) 2174 { 2175 struct ieee80211_hdr *hdr; 2176 struct ethhdr *eth; 2177 u8 da[ETH_ALEN]; 2178 u8 sa[ETH_ALEN]; 2179 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 2180 struct ath12k_dp_rx_rfc1042_hdr rfc = {0xaa, 0xaa, 0x03, {0x00, 0x00, 0x00}}; 2181 2182 eth = (struct ethhdr *)msdu->data; 2183 ether_addr_copy(da, eth->h_dest); 2184 ether_addr_copy(sa, eth->h_source); 2185 rfc.snap_type = eth->h_proto; 2186 skb_pull(msdu, sizeof(*eth)); 2187 memcpy(skb_push(msdu, sizeof(rfc)), &rfc, 2188 sizeof(rfc)); 2189 ath12k_get_dot11_hdr_from_rx_desc(ar, msdu, rxcb, status, enctype); 2190 2191 /* original 802.11 header has a different DA and in 2192 * case of 4addr it may also have different SA 2193 */ 2194 hdr = (struct ieee80211_hdr *)msdu->data; 2195 ether_addr_copy(ieee80211_get_DA(hdr), da); 2196 ether_addr_copy(ieee80211_get_SA(hdr), sa); 2197 } 2198 2199 static void ath12k_dp_rx_h_undecap(struct ath12k *ar, struct sk_buff *msdu, 2200 struct hal_rx_desc *rx_desc, 2201 enum hal_encrypt_type enctype, 2202 struct ieee80211_rx_status *status, 2203 bool decrypted) 2204 { 2205 struct ath12k_base *ab = ar->ab; 2206 u8 decap; 2207 struct ethhdr *ehdr; 2208 2209 decap = ath12k_dp_rx_h_decap_type(ab, rx_desc); 2210 2211 switch (decap) { 2212 case DP_RX_DECAP_TYPE_NATIVE_WIFI: 2213 ath12k_dp_rx_h_undecap_nwifi(ar, msdu, enctype, status); 2214 break; 2215 case DP_RX_DECAP_TYPE_RAW: 2216 ath12k_dp_rx_h_undecap_raw(ar, msdu, enctype, status, 2217 decrypted); 2218 break; 2219 case DP_RX_DECAP_TYPE_ETHERNET2_DIX: 2220 ehdr = (struct ethhdr *)msdu->data; 2221 2222 /* mac80211 allows fast path only for authorized STA */ 2223 if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) { 2224 ATH12K_SKB_RXCB(msdu)->is_eapol = true; 2225 ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status); 2226 break; 2227 } 2228 2229 /* PN for mcast packets will be validated in mac80211; 2230 * remove eth header and add 802.11 header. 2231 */ 2232 if (ATH12K_SKB_RXCB(msdu)->is_mcbc && decrypted) 2233 ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status); 2234 break; 2235 case DP_RX_DECAP_TYPE_8023: 2236 /* TODO: Handle undecap for these formats */ 2237 break; 2238 } 2239 } 2240 2241 struct ath12k_peer * 2242 ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu, 2243 struct ath12k_dp_rx_info *rx_info) 2244 { 2245 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 2246 struct ath12k_peer *peer = NULL; 2247 2248 lockdep_assert_held(&ab->base_lock); 2249 2250 if (rxcb->peer_id) 2251 peer = ath12k_peer_find_by_id(ab, rxcb->peer_id); 2252 2253 if (peer) 2254 return peer; 2255 2256 if (rx_info->addr2_present) 2257 peer = ath12k_peer_find_by_addr(ab, rx_info->addr2); 2258 2259 return peer; 2260 } 2261 2262 static void ath12k_dp_rx_h_mpdu(struct ath12k *ar, 2263 struct sk_buff *msdu, 2264 struct hal_rx_desc *rx_desc, 2265 struct ath12k_dp_rx_info *rx_info) 2266 { 2267 struct ath12k_base *ab = ar->ab; 2268 struct ath12k_skb_rxcb *rxcb; 2269 enum hal_encrypt_type enctype; 2270 bool is_decrypted = false; 2271 struct ieee80211_hdr *hdr; 2272 struct ath12k_peer *peer; 2273 struct ieee80211_rx_status *rx_status = rx_info->rx_status; 2274 u32 err_bitmap; 2275 2276 /* PN for multicast packets will be checked in mac80211 */ 2277 rxcb = ATH12K_SKB_RXCB(msdu); 2278 rxcb->is_mcbc = rx_info->is_mcbc; 2279 2280 if (rxcb->is_mcbc) 2281 rxcb->peer_id = rx_info->peer_id; 2282 2283 spin_lock_bh(&ar->ab->base_lock); 2284 peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu, rx_info); 2285 if (peer) { 2286 /* resetting mcbc bit because mcbc packets are unicast 2287 * packets only for AP as STA sends unicast packets. 2288 */ 2289 rxcb->is_mcbc = rxcb->is_mcbc && !peer->ucast_ra_only; 2290 2291 if (rxcb->is_mcbc) 2292 enctype = peer->sec_type_grp; 2293 else 2294 enctype = peer->sec_type; 2295 } else { 2296 enctype = HAL_ENCRYPT_TYPE_OPEN; 2297 } 2298 spin_unlock_bh(&ar->ab->base_lock); 2299 2300 err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc); 2301 if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap) 2302 is_decrypted = ath12k_dp_rx_h_is_decrypted(ab, rx_desc); 2303 2304 /* Clear per-MPDU flags while leaving per-PPDU flags intact */ 2305 rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | 2306 RX_FLAG_MMIC_ERROR | 2307 RX_FLAG_DECRYPTED | 2308 RX_FLAG_IV_STRIPPED | 2309 RX_FLAG_MMIC_STRIPPED); 2310 2311 if (err_bitmap & HAL_RX_MPDU_ERR_FCS) 2312 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 2313 if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) 2314 rx_status->flag |= RX_FLAG_MMIC_ERROR; 2315 2316 if (is_decrypted) { 2317 rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED; 2318 2319 if (rx_info->is_mcbc) 2320 rx_status->flag |= RX_FLAG_MIC_STRIPPED | 2321 RX_FLAG_ICV_STRIPPED; 2322 else 2323 rx_status->flag |= RX_FLAG_IV_STRIPPED | 2324 RX_FLAG_PN_VALIDATED; 2325 } 2326 2327 ath12k_dp_rx_h_csum_offload(msdu, rx_info); 2328 ath12k_dp_rx_h_undecap(ar, msdu, rx_desc, 2329 enctype, rx_status, is_decrypted); 2330 2331 if (!is_decrypted || rx_info->is_mcbc) 2332 return; 2333 2334 if (rx_info->decap_type != DP_RX_DECAP_TYPE_ETHERNET2_DIX) { 2335 hdr = (void *)msdu->data; 2336 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 2337 } 2338 } 2339 2340 static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info) 2341 { 2342 struct ieee80211_supported_band *sband; 2343 struct ieee80211_rx_status *rx_status = rx_info->rx_status; 2344 enum rx_msdu_start_pkt_type pkt_type = rx_info->pkt_type; 2345 u8 bw = rx_info->bw, sgi = rx_info->sgi; 2346 u8 rate_mcs = rx_info->rate_mcs, nss = rx_info->nss; 2347 bool is_cck; 2348 2349 switch (pkt_type) { 2350 case RX_MSDU_START_PKT_TYPE_11A: 2351 case RX_MSDU_START_PKT_TYPE_11B: 2352 is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B); 2353 sband = &ar->mac.sbands[rx_status->band]; 2354 rx_status->rate_idx = ath12k_mac_hw_rate_to_idx(sband, rate_mcs, 2355 is_cck); 2356 break; 2357 case RX_MSDU_START_PKT_TYPE_11N: 2358 rx_status->encoding = RX_ENC_HT; 2359 if (rate_mcs > ATH12K_HT_MCS_MAX) { 2360 ath12k_warn(ar->ab, 2361 "Received with invalid mcs in HT mode %d\n", 2362 rate_mcs); 2363 break; 2364 } 2365 rx_status->rate_idx = rate_mcs + (8 * (nss - 1)); 2366 if (sgi) 2367 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 2368 rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw); 2369 break; 2370 case RX_MSDU_START_PKT_TYPE_11AC: 2371 rx_status->encoding = RX_ENC_VHT; 2372 rx_status->rate_idx = rate_mcs; 2373 if (rate_mcs > ATH12K_VHT_MCS_MAX) { 2374 ath12k_warn(ar->ab, 2375 "Received with invalid mcs in VHT mode %d\n", 2376 rate_mcs); 2377 break; 2378 } 2379 rx_status->nss = nss; 2380 if (sgi) 2381 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 2382 rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw); 2383 break; 2384 case RX_MSDU_START_PKT_TYPE_11AX: 2385 rx_status->rate_idx = rate_mcs; 2386 if (rate_mcs > ATH12K_HE_MCS_MAX) { 2387 ath12k_warn(ar->ab, 2388 "Received with invalid mcs in HE mode %d\n", 2389 rate_mcs); 2390 break; 2391 } 2392 rx_status->encoding = RX_ENC_HE; 2393 rx_status->nss = nss; 2394 rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi); 2395 rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw); 2396 break; 2397 case RX_MSDU_START_PKT_TYPE_11BE: 2398 rx_status->rate_idx = rate_mcs; 2399 2400 if (rate_mcs > ATH12K_EHT_MCS_MAX) { 2401 ath12k_warn(ar->ab, 2402 "Received with invalid mcs in EHT mode %d\n", 2403 rate_mcs); 2404 break; 2405 } 2406 2407 rx_status->encoding = RX_ENC_EHT; 2408 rx_status->nss = nss; 2409 rx_status->eht.gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(sgi); 2410 rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw); 2411 break; 2412 default: 2413 break; 2414 } 2415 } 2416 2417 void ath12k_dp_rx_h_fetch_info(struct ath12k_base *ab, struct hal_rx_desc *rx_desc, 2418 struct ath12k_dp_rx_info *rx_info) 2419 { 2420 rx_info->ip_csum_fail = ath12k_dp_rx_h_ip_cksum_fail(ab, rx_desc); 2421 rx_info->l4_csum_fail = ath12k_dp_rx_h_l4_cksum_fail(ab, rx_desc); 2422 rx_info->is_mcbc = ath12k_dp_rx_h_is_da_mcbc(ab, rx_desc); 2423 rx_info->decap_type = ath12k_dp_rx_h_decap_type(ab, rx_desc); 2424 rx_info->pkt_type = ath12k_dp_rx_h_pkt_type(ab, rx_desc); 2425 rx_info->sgi = ath12k_dp_rx_h_sgi(ab, rx_desc); 2426 rx_info->rate_mcs = ath12k_dp_rx_h_rate_mcs(ab, rx_desc); 2427 rx_info->bw = ath12k_dp_rx_h_rx_bw(ab, rx_desc); 2428 rx_info->nss = ath12k_dp_rx_h_nss(ab, rx_desc); 2429 rx_info->tid = ath12k_dp_rx_h_tid(ab, rx_desc); 2430 rx_info->peer_id = ath12k_dp_rx_h_peer_id(ab, rx_desc); 2431 rx_info->phy_meta_data = ath12k_dp_rx_h_freq(ab, rx_desc); 2432 2433 if (ath12k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)) { 2434 ether_addr_copy(rx_info->addr2, 2435 ath12k_dp_rxdesc_get_mpdu_start_addr2(ab, rx_desc)); 2436 rx_info->addr2_present = true; 2437 } 2438 2439 ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "rx_desc: ", 2440 rx_desc, sizeof(*rx_desc)); 2441 } 2442 2443 void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info) 2444 { 2445 struct ieee80211_rx_status *rx_status = rx_info->rx_status; 2446 u8 channel_num; 2447 u32 center_freq, meta_data; 2448 struct ieee80211_channel *channel; 2449 2450 rx_status->freq = 0; 2451 rx_status->rate_idx = 0; 2452 rx_status->nss = 0; 2453 rx_status->encoding = RX_ENC_LEGACY; 2454 rx_status->bw = RATE_INFO_BW_20; 2455 rx_status->enc_flags = 0; 2456 2457 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 2458 2459 meta_data = rx_info->phy_meta_data; 2460 channel_num = meta_data; 2461 center_freq = meta_data >> 16; 2462 2463 if (center_freq >= ATH12K_MIN_6GHZ_FREQ && 2464 center_freq <= ATH12K_MAX_6GHZ_FREQ) { 2465 rx_status->band = NL80211_BAND_6GHZ; 2466 rx_status->freq = center_freq; 2467 } else if (channel_num >= 1 && channel_num <= 14) { 2468 rx_status->band = NL80211_BAND_2GHZ; 2469 } else if (channel_num >= 36 && channel_num <= 173) { 2470 rx_status->band = NL80211_BAND_5GHZ; 2471 } else { 2472 spin_lock_bh(&ar->data_lock); 2473 channel = ar->rx_channel; 2474 if (channel) { 2475 rx_status->band = channel->band; 2476 channel_num = 2477 ieee80211_frequency_to_channel(channel->center_freq); 2478 } 2479 spin_unlock_bh(&ar->data_lock); 2480 } 2481 2482 if (rx_status->band != NL80211_BAND_6GHZ) 2483 rx_status->freq = ieee80211_channel_to_frequency(channel_num, 2484 rx_status->band); 2485 2486 ath12k_dp_rx_h_rate(ar, rx_info); 2487 } 2488 2489 static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi, 2490 struct sk_buff *msdu, 2491 struct ath12k_dp_rx_info *rx_info) 2492 { 2493 struct ath12k_base *ab = ar->ab; 2494 static const struct ieee80211_radiotap_he known = { 2495 .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | 2496 IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN), 2497 .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN), 2498 }; 2499 struct ieee80211_radiotap_he *he; 2500 struct ieee80211_rx_status *rx_status; 2501 struct ieee80211_sta *pubsta; 2502 struct ath12k_peer *peer; 2503 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 2504 struct ieee80211_rx_status *status = rx_info->rx_status; 2505 u8 decap = DP_RX_DECAP_TYPE_RAW; 2506 bool is_mcbc = rxcb->is_mcbc; 2507 bool is_eapol = rxcb->is_eapol; 2508 2509 if (status->encoding == RX_ENC_HE && !(status->flag & RX_FLAG_RADIOTAP_HE) && 2510 !(status->flag & RX_FLAG_SKIP_MONITOR)) { 2511 he = skb_push(msdu, sizeof(known)); 2512 memcpy(he, &known, sizeof(known)); 2513 status->flag |= RX_FLAG_RADIOTAP_HE; 2514 } 2515 2516 if (!(status->flag & RX_FLAG_ONLY_MONITOR)) 2517 decap = rx_info->decap_type; 2518 2519 spin_lock_bh(&ab->base_lock); 2520 peer = ath12k_dp_rx_h_find_peer(ab, msdu, rx_info); 2521 2522 pubsta = peer ? peer->sta : NULL; 2523 2524 if (pubsta && pubsta->valid_links) { 2525 status->link_valid = 1; 2526 status->link_id = peer->link_id; 2527 } 2528 2529 spin_unlock_bh(&ab->base_lock); 2530 2531 ath12k_dbg(ab, ATH12K_DBG_DATA, 2532 "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", 2533 msdu, 2534 msdu->len, 2535 peer ? peer->addr : NULL, 2536 rxcb->tid, 2537 is_mcbc ? "mcast" : "ucast", 2538 ath12k_dp_rx_h_seq_no(ab, rxcb->rx_desc), 2539 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", 2540 (status->encoding == RX_ENC_HT) ? "ht" : "", 2541 (status->encoding == RX_ENC_VHT) ? "vht" : "", 2542 (status->encoding == RX_ENC_HE) ? "he" : "", 2543 (status->encoding == RX_ENC_EHT) ? "eht" : "", 2544 (status->bw == RATE_INFO_BW_40) ? "40" : "", 2545 (status->bw == RATE_INFO_BW_80) ? "80" : "", 2546 (status->bw == RATE_INFO_BW_160) ? "160" : "", 2547 (status->bw == RATE_INFO_BW_320) ? "320" : "", 2548 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", 2549 status->rate_idx, 2550 status->nss, 2551 status->freq, 2552 status->band, status->flag, 2553 !!(status->flag & RX_FLAG_FAILED_FCS_CRC), 2554 !!(status->flag & RX_FLAG_MMIC_ERROR), 2555 !!(status->flag & RX_FLAG_AMSDU_MORE)); 2556 2557 ath12k_dbg_dump(ab, ATH12K_DBG_DP_RX, NULL, "dp rx msdu: ", 2558 msdu->data, msdu->len); 2559 2560 rx_status = IEEE80211_SKB_RXCB(msdu); 2561 *rx_status = *status; 2562 2563 /* TODO: trace rx packet */ 2564 2565 /* PN for multicast packets are not validate in HW, 2566 * so skip 802.3 rx path 2567 * Also, fast_rx expects the STA to be authorized, hence 2568 * eapol packets are sent in slow path. 2569 */ 2570 if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol && 2571 !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED)) 2572 rx_status->flag |= RX_FLAG_8023; 2573 2574 ieee80211_rx_napi(ath12k_ar_to_hw(ar), pubsta, msdu, napi); 2575 } 2576 2577 static bool ath12k_dp_rx_check_nwifi_hdr_len_valid(struct ath12k_base *ab, 2578 struct hal_rx_desc *rx_desc, 2579 struct sk_buff *msdu) 2580 { 2581 struct ieee80211_hdr *hdr; 2582 u8 decap_type; 2583 u32 hdr_len; 2584 2585 decap_type = ath12k_dp_rx_h_decap_type(ab, rx_desc); 2586 if (decap_type != DP_RX_DECAP_TYPE_NATIVE_WIFI) 2587 return true; 2588 2589 hdr = (struct ieee80211_hdr *)msdu->data; 2590 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2591 2592 if ((likely(hdr_len <= DP_MAX_NWIFI_HDR_LEN))) 2593 return true; 2594 2595 ab->soc_stats.invalid_rbm++; 2596 WARN_ON_ONCE(1); 2597 return false; 2598 } 2599 2600 static int ath12k_dp_rx_process_msdu(struct ath12k *ar, 2601 struct sk_buff *msdu, 2602 struct sk_buff_head *msdu_list, 2603 struct ath12k_dp_rx_info *rx_info) 2604 { 2605 struct ath12k_base *ab = ar->ab; 2606 struct hal_rx_desc *rx_desc, *lrx_desc; 2607 struct ath12k_skb_rxcb *rxcb; 2608 struct sk_buff *last_buf; 2609 u8 l3_pad_bytes; 2610 u16 msdu_len; 2611 int ret; 2612 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 2613 2614 last_buf = ath12k_dp_rx_get_msdu_last_buf(msdu_list, msdu); 2615 if (!last_buf) { 2616 ath12k_warn(ab, 2617 "No valid Rx buffer to access MSDU_END tlv\n"); 2618 ret = -EIO; 2619 goto free_out; 2620 } 2621 2622 rx_desc = (struct hal_rx_desc *)msdu->data; 2623 lrx_desc = (struct hal_rx_desc *)last_buf->data; 2624 if (!ath12k_dp_rx_h_msdu_done(ab, lrx_desc)) { 2625 ath12k_warn(ab, "msdu_done bit in msdu_end is not set\n"); 2626 ret = -EIO; 2627 goto free_out; 2628 } 2629 2630 rxcb = ATH12K_SKB_RXCB(msdu); 2631 rxcb->rx_desc = rx_desc; 2632 msdu_len = ath12k_dp_rx_h_msdu_len(ab, lrx_desc); 2633 l3_pad_bytes = ath12k_dp_rx_h_l3pad(ab, lrx_desc); 2634 2635 if (rxcb->is_frag) { 2636 skb_pull(msdu, hal_rx_desc_sz); 2637 } else if (!rxcb->is_continuation) { 2638 if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) { 2639 ret = -EINVAL; 2640 ath12k_warn(ab, "invalid msdu len %u\n", msdu_len); 2641 ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", rx_desc, 2642 sizeof(*rx_desc)); 2643 goto free_out; 2644 } 2645 skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len); 2646 skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes); 2647 } else { 2648 ret = ath12k_dp_rx_msdu_coalesce(ar, msdu_list, 2649 msdu, last_buf, 2650 l3_pad_bytes, msdu_len); 2651 if (ret) { 2652 ath12k_warn(ab, 2653 "failed to coalesce msdu rx buffer%d\n", ret); 2654 goto free_out; 2655 } 2656 } 2657 2658 if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, rx_desc, msdu))) { 2659 ret = -EINVAL; 2660 goto free_out; 2661 } 2662 2663 ath12k_dp_rx_h_fetch_info(ab, rx_desc, rx_info); 2664 ath12k_dp_rx_h_ppdu(ar, rx_info); 2665 ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_info); 2666 2667 rx_info->rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED; 2668 2669 return 0; 2670 2671 free_out: 2672 return ret; 2673 } 2674 2675 static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab, 2676 struct napi_struct *napi, 2677 struct sk_buff_head *msdu_list, 2678 int ring_id) 2679 { 2680 struct ath12k_hw_group *ag = ab->ag; 2681 struct ieee80211_rx_status rx_status = {0}; 2682 struct ath12k_skb_rxcb *rxcb; 2683 struct sk_buff *msdu; 2684 struct ath12k *ar; 2685 struct ath12k_hw_link *hw_links = ag->hw_links; 2686 struct ath12k_base *partner_ab; 2687 struct ath12k_dp_rx_info rx_info; 2688 u8 hw_link_id, pdev_id; 2689 int ret; 2690 2691 if (skb_queue_empty(msdu_list)) 2692 return; 2693 2694 rx_info.addr2_present = false; 2695 rx_info.rx_status = &rx_status; 2696 2697 rcu_read_lock(); 2698 2699 while ((msdu = __skb_dequeue(msdu_list))) { 2700 rxcb = ATH12K_SKB_RXCB(msdu); 2701 hw_link_id = rxcb->hw_link_id; 2702 partner_ab = ath12k_ag_to_ab(ag, 2703 hw_links[hw_link_id].device_id); 2704 pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params, 2705 hw_links[hw_link_id].pdev_idx); 2706 ar = partner_ab->pdevs[pdev_id].ar; 2707 if (!rcu_dereference(partner_ab->pdevs_active[pdev_id])) { 2708 dev_kfree_skb_any(msdu); 2709 continue; 2710 } 2711 2712 if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) { 2713 dev_kfree_skb_any(msdu); 2714 continue; 2715 } 2716 2717 ret = ath12k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_info); 2718 if (ret) { 2719 ath12k_dbg(ab, ATH12K_DBG_DATA, 2720 "Unable to process msdu %d", ret); 2721 dev_kfree_skb_any(msdu); 2722 continue; 2723 } 2724 2725 ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_info); 2726 } 2727 2728 rcu_read_unlock(); 2729 } 2730 2731 static u16 ath12k_dp_rx_get_peer_id(struct ath12k_base *ab, 2732 enum ath12k_peer_metadata_version ver, 2733 __le32 peer_metadata) 2734 { 2735 switch (ver) { 2736 default: 2737 ath12k_warn(ab, "Unknown peer metadata version: %d", ver); 2738 fallthrough; 2739 case ATH12K_PEER_METADATA_V0: 2740 return le32_get_bits(peer_metadata, 2741 RX_MPDU_DESC_META_DATA_V0_PEER_ID); 2742 case ATH12K_PEER_METADATA_V1: 2743 return le32_get_bits(peer_metadata, 2744 RX_MPDU_DESC_META_DATA_V1_PEER_ID); 2745 case ATH12K_PEER_METADATA_V1A: 2746 return le32_get_bits(peer_metadata, 2747 RX_MPDU_DESC_META_DATA_V1A_PEER_ID); 2748 case ATH12K_PEER_METADATA_V1B: 2749 return le32_get_bits(peer_metadata, 2750 RX_MPDU_DESC_META_DATA_V1B_PEER_ID); 2751 } 2752 } 2753 2754 int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id, 2755 struct napi_struct *napi, int budget) 2756 { 2757 struct ath12k_hw_group *ag = ab->ag; 2758 struct list_head rx_desc_used_list[ATH12K_MAX_SOCS]; 2759 struct ath12k_hw_link *hw_links = ag->hw_links; 2760 int num_buffs_reaped[ATH12K_MAX_SOCS] = {}; 2761 struct ath12k_rx_desc_info *desc_info; 2762 struct ath12k_dp *dp = &ab->dp; 2763 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 2764 struct hal_reo_dest_ring *desc; 2765 struct ath12k_base *partner_ab; 2766 struct sk_buff_head msdu_list; 2767 struct ath12k_skb_rxcb *rxcb; 2768 int total_msdu_reaped = 0; 2769 u8 hw_link_id, device_id; 2770 struct hal_srng *srng; 2771 struct sk_buff *msdu; 2772 bool done = false; 2773 u64 desc_va; 2774 2775 __skb_queue_head_init(&msdu_list); 2776 2777 for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) 2778 INIT_LIST_HEAD(&rx_desc_used_list[device_id]); 2779 2780 srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id]; 2781 2782 spin_lock_bh(&srng->lock); 2783 2784 try_again: 2785 ath12k_hal_srng_access_begin(ab, srng); 2786 2787 while ((desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) { 2788 struct rx_mpdu_desc *mpdu_info; 2789 struct rx_msdu_desc *msdu_info; 2790 enum hal_reo_dest_ring_push_reason push_reason; 2791 u32 cookie; 2792 2793 cookie = le32_get_bits(desc->buf_addr_info.info1, 2794 BUFFER_ADDR_INFO1_SW_COOKIE); 2795 2796 hw_link_id = le32_get_bits(desc->info0, 2797 HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); 2798 2799 desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 | 2800 le32_to_cpu(desc->buf_va_lo)); 2801 desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va); 2802 2803 device_id = hw_links[hw_link_id].device_id; 2804 partner_ab = ath12k_ag_to_ab(ag, device_id); 2805 if (unlikely(!partner_ab)) { 2806 if (desc_info->skb) { 2807 dev_kfree_skb_any(desc_info->skb); 2808 desc_info->skb = NULL; 2809 } 2810 2811 continue; 2812 } 2813 2814 /* retry manual desc retrieval */ 2815 if (!desc_info) { 2816 desc_info = ath12k_dp_get_rx_desc(partner_ab, cookie); 2817 if (!desc_info) { 2818 ath12k_warn(partner_ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n", 2819 cookie); 2820 continue; 2821 } 2822 } 2823 2824 if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC) 2825 ath12k_warn(ab, "Check HW CC implementation"); 2826 2827 msdu = desc_info->skb; 2828 desc_info->skb = NULL; 2829 2830 list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]); 2831 2832 rxcb = ATH12K_SKB_RXCB(msdu); 2833 dma_unmap_single(partner_ab->dev, rxcb->paddr, 2834 msdu->len + skb_tailroom(msdu), 2835 DMA_FROM_DEVICE); 2836 2837 num_buffs_reaped[device_id]++; 2838 2839 push_reason = le32_get_bits(desc->info0, 2840 HAL_REO_DEST_RING_INFO0_PUSH_REASON); 2841 if (push_reason != 2842 HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) { 2843 dev_kfree_skb_any(msdu); 2844 ab->soc_stats.hal_reo_error[ring_id]++; 2845 continue; 2846 } 2847 2848 msdu_info = &desc->rx_msdu_info; 2849 mpdu_info = &desc->rx_mpdu_info; 2850 2851 rxcb->is_first_msdu = !!(le32_to_cpu(msdu_info->info0) & 2852 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU); 2853 rxcb->is_last_msdu = !!(le32_to_cpu(msdu_info->info0) & 2854 RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU); 2855 rxcb->is_continuation = !!(le32_to_cpu(msdu_info->info0) & 2856 RX_MSDU_DESC_INFO0_MSDU_CONTINUATION); 2857 rxcb->hw_link_id = hw_link_id; 2858 rxcb->peer_id = ath12k_dp_rx_get_peer_id(ab, dp->peer_metadata_ver, 2859 mpdu_info->peer_meta_data); 2860 rxcb->tid = le32_get_bits(mpdu_info->info0, 2861 RX_MPDU_DESC_INFO0_TID); 2862 2863 __skb_queue_tail(&msdu_list, msdu); 2864 2865 if (!rxcb->is_continuation) { 2866 total_msdu_reaped++; 2867 done = true; 2868 } else { 2869 done = false; 2870 } 2871 2872 if (total_msdu_reaped >= budget) 2873 break; 2874 } 2875 2876 /* Hw might have updated the head pointer after we cached it. 2877 * In this case, even though there are entries in the ring we'll 2878 * get rx_desc NULL. Give the read another try with updated cached 2879 * head pointer so that we can reap complete MPDU in the current 2880 * rx processing. 2881 */ 2882 if (!done && ath12k_hal_srng_dst_num_free(ab, srng, true)) { 2883 ath12k_hal_srng_access_end(ab, srng); 2884 goto try_again; 2885 } 2886 2887 ath12k_hal_srng_access_end(ab, srng); 2888 2889 spin_unlock_bh(&srng->lock); 2890 2891 if (!total_msdu_reaped) 2892 goto exit; 2893 2894 for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) { 2895 if (!num_buffs_reaped[device_id]) 2896 continue; 2897 2898 partner_ab = ath12k_ag_to_ab(ag, device_id); 2899 rx_ring = &partner_ab->dp.rx_refill_buf_ring; 2900 2901 ath12k_dp_rx_bufs_replenish(partner_ab, rx_ring, 2902 &rx_desc_used_list[device_id], 2903 num_buffs_reaped[device_id]); 2904 } 2905 2906 ath12k_dp_rx_process_received_packets(ab, napi, &msdu_list, 2907 ring_id); 2908 2909 exit: 2910 return total_msdu_reaped; 2911 } 2912 2913 static void ath12k_dp_rx_frag_timer(struct timer_list *timer) 2914 { 2915 struct ath12k_dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer); 2916 2917 spin_lock_bh(&rx_tid->ab->base_lock); 2918 if (rx_tid->last_frag_no && 2919 rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) { 2920 spin_unlock_bh(&rx_tid->ab->base_lock); 2921 return; 2922 } 2923 ath12k_dp_rx_frags_cleanup(rx_tid, true); 2924 spin_unlock_bh(&rx_tid->ab->base_lock); 2925 } 2926 2927 int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id) 2928 { 2929 struct ath12k_base *ab = ar->ab; 2930 struct crypto_shash *tfm; 2931 struct ath12k_peer *peer; 2932 struct ath12k_dp_rx_tid *rx_tid; 2933 int i; 2934 2935 tfm = crypto_alloc_shash("michael_mic", 0, 0); 2936 if (IS_ERR(tfm)) 2937 return PTR_ERR(tfm); 2938 2939 spin_lock_bh(&ab->base_lock); 2940 2941 peer = ath12k_peer_find(ab, vdev_id, peer_mac); 2942 if (!peer) { 2943 spin_unlock_bh(&ab->base_lock); 2944 crypto_free_shash(tfm); 2945 ath12k_warn(ab, "failed to find the peer to set up fragment info\n"); 2946 return -ENOENT; 2947 } 2948 2949 if (!peer->primary_link) { 2950 spin_unlock_bh(&ab->base_lock); 2951 crypto_free_shash(tfm); 2952 return 0; 2953 } 2954 2955 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 2956 rx_tid = &peer->rx_tid[i]; 2957 rx_tid->ab = ab; 2958 timer_setup(&rx_tid->frag_timer, ath12k_dp_rx_frag_timer, 0); 2959 skb_queue_head_init(&rx_tid->rx_frags); 2960 } 2961 2962 peer->tfm_mmic = tfm; 2963 peer->dp_setup_done = true; 2964 spin_unlock_bh(&ab->base_lock); 2965 2966 return 0; 2967 } 2968 2969 static int ath12k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key, 2970 struct ieee80211_hdr *hdr, u8 *data, 2971 size_t data_len, u8 *mic) 2972 { 2973 SHASH_DESC_ON_STACK(desc, tfm); 2974 u8 mic_hdr[16] = {0}; 2975 u8 tid = 0; 2976 int ret; 2977 2978 if (!tfm) 2979 return -EINVAL; 2980 2981 desc->tfm = tfm; 2982 2983 ret = crypto_shash_setkey(tfm, key, 8); 2984 if (ret) 2985 goto out; 2986 2987 ret = crypto_shash_init(desc); 2988 if (ret) 2989 goto out; 2990 2991 /* TKIP MIC header */ 2992 memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN); 2993 memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN); 2994 if (ieee80211_is_data_qos(hdr->frame_control)) 2995 tid = ieee80211_get_tid(hdr); 2996 mic_hdr[12] = tid; 2997 2998 ret = crypto_shash_update(desc, mic_hdr, 16); 2999 if (ret) 3000 goto out; 3001 ret = crypto_shash_update(desc, data, data_len); 3002 if (ret) 3003 goto out; 3004 ret = crypto_shash_final(desc, mic); 3005 out: 3006 shash_desc_zero(desc); 3007 return ret; 3008 } 3009 3010 static int ath12k_dp_rx_h_verify_tkip_mic(struct ath12k *ar, struct ath12k_peer *peer, 3011 struct sk_buff *msdu) 3012 { 3013 struct ath12k_base *ab = ar->ab; 3014 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data; 3015 struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu); 3016 struct ieee80211_key_conf *key_conf; 3017 struct ieee80211_hdr *hdr; 3018 struct ath12k_dp_rx_info rx_info; 3019 u8 mic[IEEE80211_CCMP_MIC_LEN]; 3020 int head_len, tail_len, ret; 3021 size_t data_len; 3022 u32 hdr_len, hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 3023 u8 *key, *data; 3024 u8 key_idx; 3025 3026 if (ath12k_dp_rx_h_enctype(ab, rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC) 3027 return 0; 3028 3029 rx_info.addr2_present = false; 3030 rx_info.rx_status = rxs; 3031 3032 hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz); 3033 hdr_len = ieee80211_hdrlen(hdr->frame_control); 3034 head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN; 3035 tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN; 3036 3037 if (!is_multicast_ether_addr(hdr->addr1)) 3038 key_idx = peer->ucast_keyidx; 3039 else 3040 key_idx = peer->mcast_keyidx; 3041 3042 key_conf = peer->keys[key_idx]; 3043 3044 data = msdu->data + head_len; 3045 data_len = msdu->len - head_len - tail_len; 3046 key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; 3047 3048 ret = ath12k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic); 3049 if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN)) 3050 goto mic_fail; 3051 3052 return 0; 3053 3054 mic_fail: 3055 (ATH12K_SKB_RXCB(msdu))->is_first_msdu = true; 3056 (ATH12K_SKB_RXCB(msdu))->is_last_msdu = true; 3057 3058 ath12k_dp_rx_h_fetch_info(ab, rx_desc, &rx_info); 3059 3060 rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED | 3061 RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED; 3062 skb_pull(msdu, hal_rx_desc_sz); 3063 3064 if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, rx_desc, msdu))) 3065 return -EINVAL; 3066 3067 ath12k_dp_rx_h_ppdu(ar, &rx_info); 3068 ath12k_dp_rx_h_undecap(ar, msdu, rx_desc, 3069 HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true); 3070 ieee80211_rx(ath12k_ar_to_hw(ar), msdu); 3071 return -EINVAL; 3072 } 3073 3074 static void ath12k_dp_rx_h_undecap_frag(struct ath12k *ar, struct sk_buff *msdu, 3075 enum hal_encrypt_type enctype, u32 flags) 3076 { 3077 struct ieee80211_hdr *hdr; 3078 size_t hdr_len; 3079 size_t crypto_len; 3080 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 3081 3082 if (!flags) 3083 return; 3084 3085 hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz); 3086 3087 if (flags & RX_FLAG_MIC_STRIPPED) 3088 skb_trim(msdu, msdu->len - 3089 ath12k_dp_rx_crypto_mic_len(ar, enctype)); 3090 3091 if (flags & RX_FLAG_ICV_STRIPPED) 3092 skb_trim(msdu, msdu->len - 3093 ath12k_dp_rx_crypto_icv_len(ar, enctype)); 3094 3095 if (flags & RX_FLAG_IV_STRIPPED) { 3096 hdr_len = ieee80211_hdrlen(hdr->frame_control); 3097 crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype); 3098 3099 memmove(msdu->data + hal_rx_desc_sz + crypto_len, 3100 msdu->data + hal_rx_desc_sz, hdr_len); 3101 skb_pull(msdu, crypto_len); 3102 } 3103 } 3104 3105 static int ath12k_dp_rx_h_defrag(struct ath12k *ar, 3106 struct ath12k_peer *peer, 3107 struct ath12k_dp_rx_tid *rx_tid, 3108 struct sk_buff **defrag_skb) 3109 { 3110 struct ath12k_base *ab = ar->ab; 3111 struct hal_rx_desc *rx_desc; 3112 struct sk_buff *skb, *first_frag, *last_frag; 3113 struct ieee80211_hdr *hdr; 3114 enum hal_encrypt_type enctype; 3115 bool is_decrypted = false; 3116 int msdu_len = 0; 3117 int extra_space; 3118 u32 flags, hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 3119 3120 first_frag = skb_peek(&rx_tid->rx_frags); 3121 last_frag = skb_peek_tail(&rx_tid->rx_frags); 3122 3123 skb_queue_walk(&rx_tid->rx_frags, skb) { 3124 flags = 0; 3125 rx_desc = (struct hal_rx_desc *)skb->data; 3126 hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz); 3127 3128 enctype = ath12k_dp_rx_h_enctype(ab, rx_desc); 3129 if (enctype != HAL_ENCRYPT_TYPE_OPEN) 3130 is_decrypted = ath12k_dp_rx_h_is_decrypted(ab, 3131 rx_desc); 3132 3133 if (is_decrypted) { 3134 if (skb != first_frag) 3135 flags |= RX_FLAG_IV_STRIPPED; 3136 if (skb != last_frag) 3137 flags |= RX_FLAG_ICV_STRIPPED | 3138 RX_FLAG_MIC_STRIPPED; 3139 } 3140 3141 /* RX fragments are always raw packets */ 3142 if (skb != last_frag) 3143 skb_trim(skb, skb->len - FCS_LEN); 3144 ath12k_dp_rx_h_undecap_frag(ar, skb, enctype, flags); 3145 3146 if (skb != first_frag) 3147 skb_pull(skb, hal_rx_desc_sz + 3148 ieee80211_hdrlen(hdr->frame_control)); 3149 msdu_len += skb->len; 3150 } 3151 3152 extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag)); 3153 if (extra_space > 0 && 3154 (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0)) 3155 return -ENOMEM; 3156 3157 __skb_unlink(first_frag, &rx_tid->rx_frags); 3158 while ((skb = __skb_dequeue(&rx_tid->rx_frags))) { 3159 skb_put_data(first_frag, skb->data, skb->len); 3160 dev_kfree_skb_any(skb); 3161 } 3162 3163 hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz); 3164 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); 3165 ATH12K_SKB_RXCB(first_frag)->is_frag = 1; 3166 3167 if (ath12k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag)) 3168 first_frag = NULL; 3169 3170 *defrag_skb = first_frag; 3171 return 0; 3172 } 3173 3174 static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar, 3175 struct ath12k_dp_rx_tid *rx_tid, 3176 struct sk_buff *defrag_skb) 3177 { 3178 struct ath12k_base *ab = ar->ab; 3179 struct ath12k_dp *dp = &ab->dp; 3180 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data; 3181 struct hal_reo_entrance_ring *reo_ent_ring; 3182 struct hal_reo_dest_ring *reo_dest_ring; 3183 struct dp_link_desc_bank *link_desc_banks; 3184 struct hal_rx_msdu_link *msdu_link; 3185 struct hal_rx_msdu_details *msdu0; 3186 struct hal_srng *srng; 3187 dma_addr_t link_paddr, buf_paddr; 3188 u32 desc_bank, msdu_info, msdu_ext_info, mpdu_info; 3189 u32 cookie, hal_rx_desc_sz, dest_ring_info0, queue_addr_hi; 3190 int ret; 3191 struct ath12k_rx_desc_info *desc_info; 3192 enum hal_rx_buf_return_buf_manager idle_link_rbm = dp->idle_link_rbm; 3193 u8 dst_ind; 3194 3195 hal_rx_desc_sz = ab->hal.hal_desc_sz; 3196 link_desc_banks = dp->link_desc_banks; 3197 reo_dest_ring = rx_tid->dst_ring_desc; 3198 3199 ath12k_hal_rx_reo_ent_paddr_get(ab, &reo_dest_ring->buf_addr_info, 3200 &link_paddr, &cookie); 3201 desc_bank = u32_get_bits(cookie, DP_LINK_DESC_BANK_MASK); 3202 3203 msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr + 3204 (link_paddr - link_desc_banks[desc_bank].paddr)); 3205 msdu0 = &msdu_link->msdu_link[0]; 3206 msdu_ext_info = le32_to_cpu(msdu0->rx_msdu_ext_info.info0); 3207 dst_ind = u32_get_bits(msdu_ext_info, RX_MSDU_EXT_DESC_INFO0_REO_DEST_IND); 3208 3209 memset(msdu0, 0, sizeof(*msdu0)); 3210 3211 msdu_info = u32_encode_bits(1, RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU) | 3212 u32_encode_bits(1, RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU) | 3213 u32_encode_bits(0, RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) | 3214 u32_encode_bits(defrag_skb->len - hal_rx_desc_sz, 3215 RX_MSDU_DESC_INFO0_MSDU_LENGTH) | 3216 u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_SA) | 3217 u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_DA); 3218 msdu0->rx_msdu_info.info0 = cpu_to_le32(msdu_info); 3219 msdu0->rx_msdu_ext_info.info0 = cpu_to_le32(msdu_ext_info); 3220 3221 /* change msdu len in hal rx desc */ 3222 ath12k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz); 3223 3224 buf_paddr = dma_map_single(ab->dev, defrag_skb->data, 3225 defrag_skb->len + skb_tailroom(defrag_skb), 3226 DMA_TO_DEVICE); 3227 if (dma_mapping_error(ab->dev, buf_paddr)) 3228 return -ENOMEM; 3229 3230 spin_lock_bh(&dp->rx_desc_lock); 3231 desc_info = list_first_entry_or_null(&dp->rx_desc_free_list, 3232 struct ath12k_rx_desc_info, 3233 list); 3234 if (!desc_info) { 3235 spin_unlock_bh(&dp->rx_desc_lock); 3236 ath12k_warn(ab, "failed to find rx desc for reinject\n"); 3237 ret = -ENOMEM; 3238 goto err_unmap_dma; 3239 } 3240 3241 desc_info->skb = defrag_skb; 3242 desc_info->in_use = true; 3243 3244 list_del(&desc_info->list); 3245 spin_unlock_bh(&dp->rx_desc_lock); 3246 3247 ATH12K_SKB_RXCB(defrag_skb)->paddr = buf_paddr; 3248 3249 ath12k_hal_rx_buf_addr_info_set(&msdu0->buf_addr_info, buf_paddr, 3250 desc_info->cookie, 3251 HAL_RX_BUF_RBM_SW3_BM); 3252 3253 /* Fill mpdu details into reo entrance ring */ 3254 srng = &ab->hal.srng_list[dp->reo_reinject_ring.ring_id]; 3255 3256 spin_lock_bh(&srng->lock); 3257 ath12k_hal_srng_access_begin(ab, srng); 3258 3259 reo_ent_ring = ath12k_hal_srng_src_get_next_entry(ab, srng); 3260 if (!reo_ent_ring) { 3261 ath12k_hal_srng_access_end(ab, srng); 3262 spin_unlock_bh(&srng->lock); 3263 ret = -ENOSPC; 3264 goto err_free_desc; 3265 } 3266 memset(reo_ent_ring, 0, sizeof(*reo_ent_ring)); 3267 3268 ath12k_hal_rx_buf_addr_info_set(&reo_ent_ring->buf_addr_info, link_paddr, 3269 cookie, 3270 idle_link_rbm); 3271 3272 mpdu_info = u32_encode_bits(1, RX_MPDU_DESC_INFO0_MSDU_COUNT) | 3273 u32_encode_bits(0, RX_MPDU_DESC_INFO0_FRAG_FLAG) | 3274 u32_encode_bits(1, RX_MPDU_DESC_INFO0_RAW_MPDU) | 3275 u32_encode_bits(1, RX_MPDU_DESC_INFO0_VALID_PN) | 3276 u32_encode_bits(rx_tid->tid, RX_MPDU_DESC_INFO0_TID); 3277 3278 reo_ent_ring->rx_mpdu_info.info0 = cpu_to_le32(mpdu_info); 3279 reo_ent_ring->rx_mpdu_info.peer_meta_data = 3280 reo_dest_ring->rx_mpdu_info.peer_meta_data; 3281 3282 if (ab->hw_params->reoq_lut_support) { 3283 reo_ent_ring->queue_addr_lo = reo_dest_ring->rx_mpdu_info.peer_meta_data; 3284 queue_addr_hi = 0; 3285 } else { 3286 reo_ent_ring->queue_addr_lo = 3287 cpu_to_le32(lower_32_bits(rx_tid->qbuf.paddr_aligned)); 3288 queue_addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); 3289 } 3290 3291 reo_ent_ring->info0 = le32_encode_bits(queue_addr_hi, 3292 HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI) | 3293 le32_encode_bits(dst_ind, 3294 HAL_REO_ENTR_RING_INFO0_DEST_IND); 3295 3296 reo_ent_ring->info1 = le32_encode_bits(rx_tid->cur_sn, 3297 HAL_REO_ENTR_RING_INFO1_MPDU_SEQ_NUM); 3298 dest_ring_info0 = le32_get_bits(reo_dest_ring->info0, 3299 HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); 3300 reo_ent_ring->info2 = 3301 cpu_to_le32(u32_get_bits(dest_ring_info0, 3302 HAL_REO_ENTR_RING_INFO2_SRC_LINK_ID)); 3303 3304 ath12k_hal_srng_access_end(ab, srng); 3305 spin_unlock_bh(&srng->lock); 3306 3307 return 0; 3308 3309 err_free_desc: 3310 spin_lock_bh(&dp->rx_desc_lock); 3311 desc_info->in_use = false; 3312 desc_info->skb = NULL; 3313 list_add_tail(&desc_info->list, &dp->rx_desc_free_list); 3314 spin_unlock_bh(&dp->rx_desc_lock); 3315 err_unmap_dma: 3316 dma_unmap_single(ab->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb), 3317 DMA_TO_DEVICE); 3318 return ret; 3319 } 3320 3321 static int ath12k_dp_rx_h_cmp_frags(struct ath12k_base *ab, 3322 struct sk_buff *a, struct sk_buff *b) 3323 { 3324 int frag1, frag2; 3325 3326 frag1 = ath12k_dp_rx_h_frag_no(ab, a); 3327 frag2 = ath12k_dp_rx_h_frag_no(ab, b); 3328 3329 return frag1 - frag2; 3330 } 3331 3332 static void ath12k_dp_rx_h_sort_frags(struct ath12k_base *ab, 3333 struct sk_buff_head *frag_list, 3334 struct sk_buff *cur_frag) 3335 { 3336 struct sk_buff *skb; 3337 int cmp; 3338 3339 skb_queue_walk(frag_list, skb) { 3340 cmp = ath12k_dp_rx_h_cmp_frags(ab, skb, cur_frag); 3341 if (cmp < 0) 3342 continue; 3343 __skb_queue_before(frag_list, skb, cur_frag); 3344 return; 3345 } 3346 __skb_queue_tail(frag_list, cur_frag); 3347 } 3348 3349 static u64 ath12k_dp_rx_h_get_pn(struct ath12k *ar, struct sk_buff *skb) 3350 { 3351 struct ieee80211_hdr *hdr; 3352 u64 pn = 0; 3353 u8 *ehdr; 3354 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 3355 3356 hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz); 3357 ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control); 3358 3359 pn = ehdr[0]; 3360 pn |= (u64)ehdr[1] << 8; 3361 pn |= (u64)ehdr[4] << 16; 3362 pn |= (u64)ehdr[5] << 24; 3363 pn |= (u64)ehdr[6] << 32; 3364 pn |= (u64)ehdr[7] << 40; 3365 3366 return pn; 3367 } 3368 3369 static bool 3370 ath12k_dp_rx_h_defrag_validate_incr_pn(struct ath12k *ar, struct ath12k_dp_rx_tid *rx_tid) 3371 { 3372 struct ath12k_base *ab = ar->ab; 3373 enum hal_encrypt_type encrypt_type; 3374 struct sk_buff *first_frag, *skb; 3375 struct hal_rx_desc *desc; 3376 u64 last_pn; 3377 u64 cur_pn; 3378 3379 first_frag = skb_peek(&rx_tid->rx_frags); 3380 desc = (struct hal_rx_desc *)first_frag->data; 3381 3382 encrypt_type = ath12k_dp_rx_h_enctype(ab, desc); 3383 if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 && 3384 encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 && 3385 encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 && 3386 encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256) 3387 return true; 3388 3389 last_pn = ath12k_dp_rx_h_get_pn(ar, first_frag); 3390 skb_queue_walk(&rx_tid->rx_frags, skb) { 3391 if (skb == first_frag) 3392 continue; 3393 3394 cur_pn = ath12k_dp_rx_h_get_pn(ar, skb); 3395 if (cur_pn != last_pn + 1) 3396 return false; 3397 last_pn = cur_pn; 3398 } 3399 return true; 3400 } 3401 3402 static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar, 3403 struct sk_buff *msdu, 3404 struct hal_reo_dest_ring *ring_desc) 3405 { 3406 struct ath12k_base *ab = ar->ab; 3407 struct hal_rx_desc *rx_desc; 3408 struct ath12k_peer *peer; 3409 struct ath12k_dp_rx_tid *rx_tid; 3410 struct sk_buff *defrag_skb = NULL; 3411 u32 peer_id; 3412 u16 seqno, frag_no; 3413 u8 tid; 3414 int ret = 0; 3415 bool more_frags; 3416 3417 rx_desc = (struct hal_rx_desc *)msdu->data; 3418 peer_id = ath12k_dp_rx_h_peer_id(ab, rx_desc); 3419 tid = ath12k_dp_rx_h_tid(ab, rx_desc); 3420 seqno = ath12k_dp_rx_h_seq_no(ab, rx_desc); 3421 frag_no = ath12k_dp_rx_h_frag_no(ab, msdu); 3422 more_frags = ath12k_dp_rx_h_more_frags(ab, msdu); 3423 3424 if (!ath12k_dp_rx_h_seq_ctrl_valid(ab, rx_desc) || 3425 !ath12k_dp_rx_h_fc_valid(ab, rx_desc) || 3426 tid > IEEE80211_NUM_TIDS) 3427 return -EINVAL; 3428 3429 /* received unfragmented packet in reo 3430 * exception ring, this shouldn't happen 3431 * as these packets typically come from 3432 * reo2sw srngs. 3433 */ 3434 if (WARN_ON_ONCE(!frag_no && !more_frags)) 3435 return -EINVAL; 3436 3437 spin_lock_bh(&ab->base_lock); 3438 peer = ath12k_peer_find_by_id(ab, peer_id); 3439 if (!peer) { 3440 ath12k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n", 3441 peer_id); 3442 ret = -ENOENT; 3443 goto out_unlock; 3444 } 3445 3446 if (!peer->dp_setup_done) { 3447 ath12k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n", 3448 peer->addr, peer_id); 3449 ret = -ENOENT; 3450 goto out_unlock; 3451 } 3452 3453 rx_tid = &peer->rx_tid[tid]; 3454 3455 if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) || 3456 skb_queue_empty(&rx_tid->rx_frags)) { 3457 /* Flush stored fragments and start a new sequence */ 3458 ath12k_dp_rx_frags_cleanup(rx_tid, true); 3459 rx_tid->cur_sn = seqno; 3460 } 3461 3462 if (rx_tid->rx_frag_bitmap & BIT(frag_no)) { 3463 /* Fragment already present */ 3464 ret = -EINVAL; 3465 goto out_unlock; 3466 } 3467 3468 if ((!rx_tid->rx_frag_bitmap || frag_no > __fls(rx_tid->rx_frag_bitmap))) 3469 __skb_queue_tail(&rx_tid->rx_frags, msdu); 3470 else 3471 ath12k_dp_rx_h_sort_frags(ab, &rx_tid->rx_frags, msdu); 3472 3473 rx_tid->rx_frag_bitmap |= BIT(frag_no); 3474 if (!more_frags) 3475 rx_tid->last_frag_no = frag_no; 3476 3477 if (frag_no == 0) { 3478 rx_tid->dst_ring_desc = kmemdup(ring_desc, 3479 sizeof(*rx_tid->dst_ring_desc), 3480 GFP_ATOMIC); 3481 if (!rx_tid->dst_ring_desc) { 3482 ret = -ENOMEM; 3483 goto out_unlock; 3484 } 3485 } else { 3486 ath12k_dp_rx_link_desc_return(ab, ring_desc, 3487 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3488 } 3489 3490 if (!rx_tid->last_frag_no || 3491 rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) { 3492 mod_timer(&rx_tid->frag_timer, jiffies + 3493 ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS); 3494 goto out_unlock; 3495 } 3496 3497 spin_unlock_bh(&ab->base_lock); 3498 timer_delete_sync(&rx_tid->frag_timer); 3499 spin_lock_bh(&ab->base_lock); 3500 3501 peer = ath12k_peer_find_by_id(ab, peer_id); 3502 if (!peer) 3503 goto err_frags_cleanup; 3504 3505 if (!ath12k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid)) 3506 goto err_frags_cleanup; 3507 3508 if (ath12k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb)) 3509 goto err_frags_cleanup; 3510 3511 if (!defrag_skb) 3512 goto err_frags_cleanup; 3513 3514 if (ath12k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb)) 3515 goto err_frags_cleanup; 3516 3517 ath12k_dp_rx_frags_cleanup(rx_tid, false); 3518 goto out_unlock; 3519 3520 err_frags_cleanup: 3521 dev_kfree_skb_any(defrag_skb); 3522 ath12k_dp_rx_frags_cleanup(rx_tid, true); 3523 out_unlock: 3524 spin_unlock_bh(&ab->base_lock); 3525 return ret; 3526 } 3527 3528 static int 3529 ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc, 3530 struct list_head *used_list, 3531 bool drop, u32 cookie) 3532 { 3533 struct ath12k_base *ab = ar->ab; 3534 struct sk_buff *msdu; 3535 struct ath12k_skb_rxcb *rxcb; 3536 struct hal_rx_desc *rx_desc; 3537 u16 msdu_len; 3538 u32 hal_rx_desc_sz = ab->hal.hal_desc_sz; 3539 struct ath12k_rx_desc_info *desc_info; 3540 u64 desc_va; 3541 3542 desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 | 3543 le32_to_cpu(desc->buf_va_lo)); 3544 desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va); 3545 3546 /* retry manual desc retrieval */ 3547 if (!desc_info) { 3548 desc_info = ath12k_dp_get_rx_desc(ab, cookie); 3549 if (!desc_info) { 3550 ath12k_warn(ab, "Invalid cookie in DP rx error descriptor retrieval: 0x%x\n", 3551 cookie); 3552 return -EINVAL; 3553 } 3554 } 3555 3556 if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC) 3557 ath12k_warn(ab, " RX Exception, Check HW CC implementation"); 3558 3559 msdu = desc_info->skb; 3560 desc_info->skb = NULL; 3561 3562 list_add_tail(&desc_info->list, used_list); 3563 3564 rxcb = ATH12K_SKB_RXCB(msdu); 3565 dma_unmap_single(ar->ab->dev, rxcb->paddr, 3566 msdu->len + skb_tailroom(msdu), 3567 DMA_FROM_DEVICE); 3568 3569 if (drop) { 3570 dev_kfree_skb_any(msdu); 3571 return 0; 3572 } 3573 3574 rcu_read_lock(); 3575 if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) { 3576 dev_kfree_skb_any(msdu); 3577 goto exit; 3578 } 3579 3580 if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) { 3581 dev_kfree_skb_any(msdu); 3582 goto exit; 3583 } 3584 3585 rx_desc = (struct hal_rx_desc *)msdu->data; 3586 msdu_len = ath12k_dp_rx_h_msdu_len(ar->ab, rx_desc); 3587 if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) { 3588 ath12k_warn(ar->ab, "invalid msdu leng %u", msdu_len); 3589 ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "", rx_desc, 3590 sizeof(*rx_desc)); 3591 dev_kfree_skb_any(msdu); 3592 goto exit; 3593 } 3594 3595 skb_put(msdu, hal_rx_desc_sz + msdu_len); 3596 3597 if (ath12k_dp_rx_frag_h_mpdu(ar, msdu, desc)) { 3598 dev_kfree_skb_any(msdu); 3599 ath12k_dp_rx_link_desc_return(ar->ab, desc, 3600 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3601 } 3602 exit: 3603 rcu_read_unlock(); 3604 return 0; 3605 } 3606 3607 int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi, 3608 int budget) 3609 { 3610 struct ath12k_hw_group *ag = ab->ag; 3611 struct list_head rx_desc_used_list[ATH12K_MAX_SOCS]; 3612 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; 3613 int num_buffs_reaped[ATH12K_MAX_SOCS] = {}; 3614 struct dp_link_desc_bank *link_desc_banks; 3615 enum hal_rx_buf_return_buf_manager rbm; 3616 struct hal_rx_msdu_link *link_desc_va; 3617 int tot_n_bufs_reaped, quota, ret, i; 3618 struct hal_reo_dest_ring *reo_desc; 3619 struct dp_rxdma_ring *rx_ring; 3620 struct dp_srng *reo_except; 3621 struct ath12k_hw_link *hw_links = ag->hw_links; 3622 struct ath12k_base *partner_ab; 3623 u8 hw_link_id, device_id; 3624 u32 desc_bank, num_msdus; 3625 struct hal_srng *srng; 3626 struct ath12k *ar; 3627 dma_addr_t paddr; 3628 bool is_frag; 3629 bool drop; 3630 int pdev_id; 3631 3632 tot_n_bufs_reaped = 0; 3633 quota = budget; 3634 3635 for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) 3636 INIT_LIST_HEAD(&rx_desc_used_list[device_id]); 3637 3638 reo_except = &ab->dp.reo_except_ring; 3639 3640 srng = &ab->hal.srng_list[reo_except->ring_id]; 3641 3642 spin_lock_bh(&srng->lock); 3643 3644 ath12k_hal_srng_access_begin(ab, srng); 3645 3646 while (budget && 3647 (reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) { 3648 drop = false; 3649 ab->soc_stats.err_ring_pkts++; 3650 3651 ret = ath12k_hal_desc_reo_parse_err(ab, reo_desc, &paddr, 3652 &desc_bank); 3653 if (ret) { 3654 ath12k_warn(ab, "failed to parse error reo desc %d\n", 3655 ret); 3656 continue; 3657 } 3658 3659 hw_link_id = le32_get_bits(reo_desc->info0, 3660 HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); 3661 device_id = hw_links[hw_link_id].device_id; 3662 partner_ab = ath12k_ag_to_ab(ag, device_id); 3663 3664 pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params, 3665 hw_links[hw_link_id].pdev_idx); 3666 ar = partner_ab->pdevs[pdev_id].ar; 3667 3668 link_desc_banks = partner_ab->dp.link_desc_banks; 3669 link_desc_va = link_desc_banks[desc_bank].vaddr + 3670 (paddr - link_desc_banks[desc_bank].paddr); 3671 ath12k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies, 3672 &rbm); 3673 if (rbm != partner_ab->dp.idle_link_rbm && 3674 rbm != HAL_RX_BUF_RBM_SW3_BM && 3675 rbm != partner_ab->hw_params->hal_params->rx_buf_rbm) { 3676 ab->soc_stats.invalid_rbm++; 3677 ath12k_warn(ab, "invalid return buffer manager %d\n", rbm); 3678 ath12k_dp_rx_link_desc_return(partner_ab, reo_desc, 3679 HAL_WBM_REL_BM_ACT_REL_MSDU); 3680 continue; 3681 } 3682 3683 is_frag = !!(le32_to_cpu(reo_desc->rx_mpdu_info.info0) & 3684 RX_MPDU_DESC_INFO0_FRAG_FLAG); 3685 3686 /* Process only rx fragments with one msdu per link desc below, and drop 3687 * msdu's indicated due to error reasons. 3688 * Dynamic fragmentation not supported in Multi-link client, so drop the 3689 * partner device buffers. 3690 */ 3691 if (!is_frag || num_msdus > 1 || 3692 partner_ab->device_id != ab->device_id) { 3693 drop = true; 3694 3695 /* Return the link desc back to wbm idle list */ 3696 ath12k_dp_rx_link_desc_return(partner_ab, reo_desc, 3697 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3698 } 3699 3700 for (i = 0; i < num_msdus; i++) { 3701 if (!ath12k_dp_process_rx_err_buf(ar, reo_desc, 3702 &rx_desc_used_list[device_id], 3703 drop, 3704 msdu_cookies[i])) { 3705 num_buffs_reaped[device_id]++; 3706 tot_n_bufs_reaped++; 3707 } 3708 } 3709 3710 if (tot_n_bufs_reaped >= quota) { 3711 tot_n_bufs_reaped = quota; 3712 goto exit; 3713 } 3714 3715 budget = quota - tot_n_bufs_reaped; 3716 } 3717 3718 exit: 3719 ath12k_hal_srng_access_end(ab, srng); 3720 3721 spin_unlock_bh(&srng->lock); 3722 3723 for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) { 3724 if (!num_buffs_reaped[device_id]) 3725 continue; 3726 3727 partner_ab = ath12k_ag_to_ab(ag, device_id); 3728 rx_ring = &partner_ab->dp.rx_refill_buf_ring; 3729 3730 ath12k_dp_rx_bufs_replenish(partner_ab, rx_ring, 3731 &rx_desc_used_list[device_id], 3732 num_buffs_reaped[device_id]); 3733 } 3734 3735 return tot_n_bufs_reaped; 3736 } 3737 3738 static void ath12k_dp_rx_null_q_desc_sg_drop(struct ath12k *ar, 3739 int msdu_len, 3740 struct sk_buff_head *msdu_list) 3741 { 3742 struct sk_buff *skb, *tmp; 3743 struct ath12k_skb_rxcb *rxcb; 3744 int n_buffs; 3745 3746 n_buffs = DIV_ROUND_UP(msdu_len, 3747 (DP_RX_BUFFER_SIZE - ar->ab->hal.hal_desc_sz)); 3748 3749 skb_queue_walk_safe(msdu_list, skb, tmp) { 3750 rxcb = ATH12K_SKB_RXCB(skb); 3751 if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO && 3752 rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) { 3753 if (!n_buffs) 3754 break; 3755 __skb_unlink(skb, msdu_list); 3756 dev_kfree_skb_any(skb); 3757 n_buffs--; 3758 } 3759 } 3760 } 3761 3762 static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu, 3763 struct ath12k_dp_rx_info *rx_info, 3764 struct sk_buff_head *msdu_list) 3765 { 3766 struct ath12k_base *ab = ar->ab; 3767 u16 msdu_len; 3768 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 3769 u8 l3pad_bytes; 3770 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 3771 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 3772 3773 msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc); 3774 3775 if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) { 3776 /* First buffer will be freed by the caller, so deduct it's length */ 3777 msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz); 3778 ath12k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list); 3779 return -EINVAL; 3780 } 3781 3782 /* Even after cleaning up the sg buffers in the msdu list with above check 3783 * any msdu received with continuation flag needs to be dropped as invalid. 3784 * This protects against some random err frame with continuation flag. 3785 */ 3786 if (rxcb->is_continuation) 3787 return -EINVAL; 3788 3789 if (!ath12k_dp_rx_h_msdu_done(ab, desc)) { 3790 ath12k_warn(ar->ab, 3791 "msdu_done bit not set in null_q_des processing\n"); 3792 __skb_queue_purge(msdu_list); 3793 return -EIO; 3794 } 3795 3796 /* Handle NULL queue descriptor violations arising out a missing 3797 * REO queue for a given peer or a given TID. This typically 3798 * may happen if a packet is received on a QOS enabled TID before the 3799 * ADDBA negotiation for that TID, when the TID queue is setup. Or 3800 * it may also happen for MC/BC frames if they are not routed to the 3801 * non-QOS TID queue, in the absence of any other default TID queue. 3802 * This error can show up both in a REO destination or WBM release ring. 3803 */ 3804 3805 if (rxcb->is_frag) { 3806 skb_pull(msdu, hal_rx_desc_sz); 3807 } else { 3808 l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc); 3809 3810 if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) 3811 return -EINVAL; 3812 3813 skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); 3814 skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); 3815 } 3816 if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu))) 3817 return -EINVAL; 3818 3819 ath12k_dp_rx_h_fetch_info(ab, desc, rx_info); 3820 ath12k_dp_rx_h_ppdu(ar, rx_info); 3821 ath12k_dp_rx_h_mpdu(ar, msdu, desc, rx_info); 3822 3823 rxcb->tid = rx_info->tid; 3824 3825 /* Please note that caller will having the access to msdu and completing 3826 * rx with mac80211. Need not worry about cleaning up amsdu_list. 3827 */ 3828 3829 return 0; 3830 } 3831 3832 static bool ath12k_dp_rx_h_reo_err(struct ath12k *ar, struct sk_buff *msdu, 3833 struct ath12k_dp_rx_info *rx_info, 3834 struct sk_buff_head *msdu_list) 3835 { 3836 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 3837 bool drop = false; 3838 3839 ar->ab->soc_stats.reo_error[rxcb->err_code]++; 3840 3841 switch (rxcb->err_code) { 3842 case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO: 3843 if (ath12k_dp_rx_h_null_q_desc(ar, msdu, rx_info, msdu_list)) 3844 drop = true; 3845 break; 3846 case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED: 3847 /* TODO: Do not drop PN failed packets in the driver; 3848 * instead, it is good to drop such packets in mac80211 3849 * after incrementing the replay counters. 3850 */ 3851 fallthrough; 3852 default: 3853 /* TODO: Review other errors and process them to mac80211 3854 * as appropriate. 3855 */ 3856 drop = true; 3857 break; 3858 } 3859 3860 return drop; 3861 } 3862 3863 static bool ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu, 3864 struct ath12k_dp_rx_info *rx_info) 3865 { 3866 struct ath12k_base *ab = ar->ab; 3867 u16 msdu_len; 3868 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 3869 u8 l3pad_bytes; 3870 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 3871 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz; 3872 3873 rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, desc); 3874 rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, desc); 3875 3876 l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc); 3877 msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc); 3878 3879 if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) { 3880 ath12k_dbg(ab, ATH12K_DBG_DATA, 3881 "invalid msdu len in tkip mic err %u\n", msdu_len); 3882 ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", desc, 3883 sizeof(*desc)); 3884 return true; 3885 } 3886 3887 skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); 3888 skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); 3889 3890 if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu))) 3891 return true; 3892 3893 ath12k_dp_rx_h_ppdu(ar, rx_info); 3894 3895 rx_info->rx_status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR | 3896 RX_FLAG_DECRYPTED); 3897 3898 ath12k_dp_rx_h_undecap(ar, msdu, desc, 3899 HAL_ENCRYPT_TYPE_TKIP_MIC, rx_info->rx_status, false); 3900 return false; 3901 } 3902 3903 static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar, struct sk_buff *msdu, 3904 struct ath12k_dp_rx_info *rx_info) 3905 { 3906 struct ath12k_base *ab = ar->ab; 3907 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 3908 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data; 3909 bool drop = false; 3910 u32 err_bitmap; 3911 3912 ar->ab->soc_stats.rxdma_error[rxcb->err_code]++; 3913 3914 switch (rxcb->err_code) { 3915 case HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR: 3916 case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR: 3917 err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc); 3918 if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) { 3919 ath12k_dp_rx_h_fetch_info(ab, rx_desc, rx_info); 3920 drop = ath12k_dp_rx_h_tkip_mic_err(ar, msdu, rx_info); 3921 break; 3922 } 3923 fallthrough; 3924 default: 3925 /* TODO: Review other rxdma error code to check if anything is 3926 * worth reporting to mac80211 3927 */ 3928 drop = true; 3929 break; 3930 } 3931 3932 return drop; 3933 } 3934 3935 static void ath12k_dp_rx_wbm_err(struct ath12k *ar, 3936 struct napi_struct *napi, 3937 struct sk_buff *msdu, 3938 struct sk_buff_head *msdu_list) 3939 { 3940 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); 3941 struct ieee80211_rx_status rxs = {0}; 3942 struct ath12k_dp_rx_info rx_info; 3943 bool drop = true; 3944 3945 rx_info.addr2_present = false; 3946 rx_info.rx_status = &rxs; 3947 3948 switch (rxcb->err_rel_src) { 3949 case HAL_WBM_REL_SRC_MODULE_REO: 3950 drop = ath12k_dp_rx_h_reo_err(ar, msdu, &rx_info, msdu_list); 3951 break; 3952 case HAL_WBM_REL_SRC_MODULE_RXDMA: 3953 drop = ath12k_dp_rx_h_rxdma_err(ar, msdu, &rx_info); 3954 break; 3955 default: 3956 /* msdu will get freed */ 3957 break; 3958 } 3959 3960 if (drop) { 3961 dev_kfree_skb_any(msdu); 3962 return; 3963 } 3964 3965 ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_info); 3966 } 3967 3968 int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab, 3969 struct napi_struct *napi, int budget) 3970 { 3971 struct list_head rx_desc_used_list[ATH12K_MAX_SOCS]; 3972 struct ath12k_hw_group *ag = ab->ag; 3973 struct ath12k *ar; 3974 struct ath12k_dp *dp = &ab->dp; 3975 struct dp_rxdma_ring *rx_ring; 3976 struct hal_rx_wbm_rel_info err_info; 3977 struct hal_srng *srng; 3978 struct sk_buff *msdu; 3979 struct sk_buff_head msdu_list, scatter_msdu_list; 3980 struct ath12k_skb_rxcb *rxcb; 3981 void *rx_desc; 3982 int num_buffs_reaped[ATH12K_MAX_SOCS] = {}; 3983 int total_num_buffs_reaped = 0; 3984 struct ath12k_rx_desc_info *desc_info; 3985 struct ath12k_hw_link *hw_links = ag->hw_links; 3986 struct ath12k_base *partner_ab; 3987 u8 hw_link_id, device_id; 3988 int ret, pdev_id; 3989 struct hal_rx_desc *msdu_data; 3990 3991 __skb_queue_head_init(&msdu_list); 3992 __skb_queue_head_init(&scatter_msdu_list); 3993 3994 for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) 3995 INIT_LIST_HEAD(&rx_desc_used_list[device_id]); 3996 3997 srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id]; 3998 spin_lock_bh(&srng->lock); 3999 4000 ath12k_hal_srng_access_begin(ab, srng); 4001 4002 while (budget) { 4003 rx_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng); 4004 if (!rx_desc) 4005 break; 4006 4007 ret = ath12k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info); 4008 if (ret) { 4009 ath12k_warn(ab, 4010 "failed to parse rx error in wbm_rel ring desc %d\n", 4011 ret); 4012 continue; 4013 } 4014 4015 desc_info = err_info.rx_desc; 4016 4017 /* retry manual desc retrieval if hw cc is not done */ 4018 if (!desc_info) { 4019 desc_info = ath12k_dp_get_rx_desc(ab, err_info.cookie); 4020 if (!desc_info) { 4021 ath12k_warn(ab, "Invalid cookie in DP WBM rx error descriptor retrieval: 0x%x\n", 4022 err_info.cookie); 4023 continue; 4024 } 4025 } 4026 4027 if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC) 4028 ath12k_warn(ab, "WBM RX err, Check HW CC implementation"); 4029 4030 msdu = desc_info->skb; 4031 desc_info->skb = NULL; 4032 4033 device_id = desc_info->device_id; 4034 partner_ab = ath12k_ag_to_ab(ag, device_id); 4035 if (unlikely(!partner_ab)) { 4036 dev_kfree_skb_any(msdu); 4037 4038 /* In any case continuation bit is set 4039 * in the previous record, cleanup scatter_msdu_list 4040 */ 4041 ath12k_dp_clean_up_skb_list(&scatter_msdu_list); 4042 continue; 4043 } 4044 4045 list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]); 4046 4047 rxcb = ATH12K_SKB_RXCB(msdu); 4048 dma_unmap_single(partner_ab->dev, rxcb->paddr, 4049 msdu->len + skb_tailroom(msdu), 4050 DMA_FROM_DEVICE); 4051 4052 num_buffs_reaped[device_id]++; 4053 total_num_buffs_reaped++; 4054 4055 if (!err_info.continuation) 4056 budget--; 4057 4058 if (err_info.push_reason != 4059 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { 4060 dev_kfree_skb_any(msdu); 4061 continue; 4062 } 4063 4064 msdu_data = (struct hal_rx_desc *)msdu->data; 4065 rxcb->err_rel_src = err_info.err_rel_src; 4066 rxcb->err_code = err_info.err_code; 4067 rxcb->is_first_msdu = err_info.first_msdu; 4068 rxcb->is_last_msdu = err_info.last_msdu; 4069 rxcb->is_continuation = err_info.continuation; 4070 rxcb->rx_desc = msdu_data; 4071 4072 if (err_info.continuation) { 4073 __skb_queue_tail(&scatter_msdu_list, msdu); 4074 continue; 4075 } 4076 4077 hw_link_id = ath12k_dp_rx_get_msdu_src_link(partner_ab, 4078 msdu_data); 4079 if (hw_link_id >= ATH12K_GROUP_MAX_RADIO) { 4080 dev_kfree_skb_any(msdu); 4081 4082 /* In any case continuation bit is set 4083 * in the previous record, cleanup scatter_msdu_list 4084 */ 4085 ath12k_dp_clean_up_skb_list(&scatter_msdu_list); 4086 continue; 4087 } 4088 4089 if (!skb_queue_empty(&scatter_msdu_list)) { 4090 struct sk_buff *msdu; 4091 4092 skb_queue_walk(&scatter_msdu_list, msdu) { 4093 rxcb = ATH12K_SKB_RXCB(msdu); 4094 rxcb->hw_link_id = hw_link_id; 4095 } 4096 4097 skb_queue_splice_tail_init(&scatter_msdu_list, 4098 &msdu_list); 4099 } 4100 4101 rxcb = ATH12K_SKB_RXCB(msdu); 4102 rxcb->hw_link_id = hw_link_id; 4103 __skb_queue_tail(&msdu_list, msdu); 4104 } 4105 4106 /* In any case continuation bit is set in the 4107 * last record, cleanup scatter_msdu_list 4108 */ 4109 ath12k_dp_clean_up_skb_list(&scatter_msdu_list); 4110 4111 ath12k_hal_srng_access_end(ab, srng); 4112 4113 spin_unlock_bh(&srng->lock); 4114 4115 if (!total_num_buffs_reaped) 4116 goto done; 4117 4118 for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) { 4119 if (!num_buffs_reaped[device_id]) 4120 continue; 4121 4122 partner_ab = ath12k_ag_to_ab(ag, device_id); 4123 rx_ring = &partner_ab->dp.rx_refill_buf_ring; 4124 4125 ath12k_dp_rx_bufs_replenish(ab, rx_ring, 4126 &rx_desc_used_list[device_id], 4127 num_buffs_reaped[device_id]); 4128 } 4129 4130 rcu_read_lock(); 4131 while ((msdu = __skb_dequeue(&msdu_list))) { 4132 rxcb = ATH12K_SKB_RXCB(msdu); 4133 hw_link_id = rxcb->hw_link_id; 4134 4135 device_id = hw_links[hw_link_id].device_id; 4136 partner_ab = ath12k_ag_to_ab(ag, device_id); 4137 if (unlikely(!partner_ab)) { 4138 ath12k_dbg(ab, ATH12K_DBG_DATA, 4139 "Unable to process WBM error msdu due to invalid hw link id %d device id %d\n", 4140 hw_link_id, device_id); 4141 dev_kfree_skb_any(msdu); 4142 continue; 4143 } 4144 4145 pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params, 4146 hw_links[hw_link_id].pdev_idx); 4147 ar = partner_ab->pdevs[pdev_id].ar; 4148 4149 if (!ar || !rcu_dereference(ar->ab->pdevs_active[pdev_id])) { 4150 dev_kfree_skb_any(msdu); 4151 continue; 4152 } 4153 4154 if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) { 4155 dev_kfree_skb_any(msdu); 4156 continue; 4157 } 4158 ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list); 4159 } 4160 rcu_read_unlock(); 4161 done: 4162 return total_num_buffs_reaped; 4163 } 4164 4165 void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab) 4166 { 4167 struct ath12k_dp *dp = &ab->dp; 4168 struct hal_tlv_64_hdr *hdr; 4169 struct hal_srng *srng; 4170 struct ath12k_dp_rx_reo_cmd *cmd, *tmp; 4171 bool found = false; 4172 u16 tag; 4173 struct hal_reo_status reo_status; 4174 4175 srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id]; 4176 4177 memset(&reo_status, 0, sizeof(reo_status)); 4178 4179 spin_lock_bh(&srng->lock); 4180 4181 ath12k_hal_srng_access_begin(ab, srng); 4182 4183 while ((hdr = ath12k_hal_srng_dst_get_next_entry(ab, srng))) { 4184 tag = le64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG); 4185 4186 switch (tag) { 4187 case HAL_REO_GET_QUEUE_STATS_STATUS: 4188 ath12k_hal_reo_status_queue_stats(ab, hdr, 4189 &reo_status); 4190 break; 4191 case HAL_REO_FLUSH_QUEUE_STATUS: 4192 ath12k_hal_reo_flush_queue_status(ab, hdr, 4193 &reo_status); 4194 break; 4195 case HAL_REO_FLUSH_CACHE_STATUS: 4196 ath12k_hal_reo_flush_cache_status(ab, hdr, 4197 &reo_status); 4198 break; 4199 case HAL_REO_UNBLOCK_CACHE_STATUS: 4200 ath12k_hal_reo_unblk_cache_status(ab, hdr, 4201 &reo_status); 4202 break; 4203 case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS: 4204 ath12k_hal_reo_flush_timeout_list_status(ab, hdr, 4205 &reo_status); 4206 break; 4207 case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS: 4208 ath12k_hal_reo_desc_thresh_reached_status(ab, hdr, 4209 &reo_status); 4210 break; 4211 case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS: 4212 ath12k_hal_reo_update_rx_reo_queue_status(ab, hdr, 4213 &reo_status); 4214 break; 4215 default: 4216 ath12k_warn(ab, "Unknown reo status type %d\n", tag); 4217 continue; 4218 } 4219 4220 spin_lock_bh(&dp->reo_cmd_lock); 4221 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 4222 if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) { 4223 found = true; 4224 list_del(&cmd->list); 4225 break; 4226 } 4227 } 4228 spin_unlock_bh(&dp->reo_cmd_lock); 4229 4230 if (found) { 4231 cmd->handler(dp, (void *)&cmd->data, 4232 reo_status.uniform_hdr.cmd_status); 4233 kfree(cmd); 4234 } 4235 4236 found = false; 4237 } 4238 4239 ath12k_hal_srng_access_end(ab, srng); 4240 4241 spin_unlock_bh(&srng->lock); 4242 } 4243 4244 void ath12k_dp_rx_free(struct ath12k_base *ab) 4245 { 4246 struct ath12k_dp *dp = &ab->dp; 4247 int i; 4248 4249 ath12k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring); 4250 4251 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4252 if (ab->hw_params->rx_mac_buf_ring) 4253 ath12k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]); 4254 } 4255 4256 for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) 4257 ath12k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]); 4258 4259 ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring); 4260 4261 ath12k_dp_rxdma_buf_free(ab); 4262 } 4263 4264 void ath12k_dp_rx_pdev_free(struct ath12k_base *ab, int mac_id) 4265 { 4266 struct ath12k *ar = ab->pdevs[mac_id].ar; 4267 4268 ath12k_dp_rx_pdev_srng_free(ar); 4269 } 4270 4271 int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab) 4272 { 4273 struct ath12k_dp *dp = &ab->dp; 4274 struct htt_rx_ring_tlv_filter tlv_filter = {0}; 4275 u32 ring_id; 4276 int ret; 4277 u32 hal_rx_desc_sz = ab->hal.hal_desc_sz; 4278 4279 ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; 4280 4281 tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING; 4282 tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR; 4283 tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST | 4284 HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST | 4285 HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA; 4286 tlv_filter.offset_valid = true; 4287 tlv_filter.rx_packet_offset = hal_rx_desc_sz; 4288 4289 tlv_filter.rx_mpdu_start_offset = 4290 ab->hal_rx_ops->rx_desc_get_mpdu_start_offset(); 4291 tlv_filter.rx_msdu_end_offset = 4292 ab->hal_rx_ops->rx_desc_get_msdu_end_offset(); 4293 4294 if (ath12k_dp_wmask_compaction_rx_tlv_supported(ab)) { 4295 tlv_filter.rx_mpdu_start_wmask = 4296 ab->hw_params->hal_ops->rxdma_ring_wmask_rx_mpdu_start(); 4297 tlv_filter.rx_msdu_end_wmask = 4298 ab->hw_params->hal_ops->rxdma_ring_wmask_rx_msdu_end(); 4299 ath12k_dbg(ab, ATH12K_DBG_DATA, 4300 "Configuring compact tlv masks rx_mpdu_start_wmask 0x%x rx_msdu_end_wmask 0x%x\n", 4301 tlv_filter.rx_mpdu_start_wmask, tlv_filter.rx_msdu_end_wmask); 4302 } 4303 4304 ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, 0, 4305 HAL_RXDMA_BUF, 4306 DP_RXDMA_REFILL_RING_SIZE, 4307 &tlv_filter); 4308 4309 return ret; 4310 } 4311 4312 int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab) 4313 { 4314 struct ath12k_dp *dp = &ab->dp; 4315 struct htt_rx_ring_tlv_filter tlv_filter = {0}; 4316 u32 ring_id; 4317 int ret = 0; 4318 u32 hal_rx_desc_sz = ab->hal.hal_desc_sz; 4319 int i; 4320 4321 ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; 4322 4323 tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING; 4324 tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR; 4325 tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST | 4326 HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST | 4327 HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA; 4328 tlv_filter.offset_valid = true; 4329 tlv_filter.rx_packet_offset = hal_rx_desc_sz; 4330 4331 tlv_filter.rx_header_offset = offsetof(struct hal_rx_desc_wcn7850, pkt_hdr_tlv); 4332 4333 tlv_filter.rx_mpdu_start_offset = 4334 ab->hal_rx_ops->rx_desc_get_mpdu_start_offset(); 4335 tlv_filter.rx_msdu_end_offset = 4336 ab->hal_rx_ops->rx_desc_get_msdu_end_offset(); 4337 4338 /* TODO: Selectively subscribe to required qwords within msdu_end 4339 * and mpdu_start and setup the mask in below msg 4340 * and modify the rx_desc struct 4341 */ 4342 4343 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4344 ring_id = dp->rx_mac_buf_ring[i].ring_id; 4345 ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, i, 4346 HAL_RXDMA_BUF, 4347 DP_RXDMA_REFILL_RING_SIZE, 4348 &tlv_filter); 4349 } 4350 4351 return ret; 4352 } 4353 4354 int ath12k_dp_rx_htt_setup(struct ath12k_base *ab) 4355 { 4356 struct ath12k_dp *dp = &ab->dp; 4357 u32 ring_id; 4358 int i, ret; 4359 4360 /* TODO: Need to verify the HTT setup for QCN9224 */ 4361 ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; 4362 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 0, HAL_RXDMA_BUF); 4363 if (ret) { 4364 ath12k_warn(ab, "failed to configure rx_refill_buf_ring %d\n", 4365 ret); 4366 return ret; 4367 } 4368 4369 if (ab->hw_params->rx_mac_buf_ring) { 4370 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4371 ring_id = dp->rx_mac_buf_ring[i].ring_id; 4372 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 4373 i, HAL_RXDMA_BUF); 4374 if (ret) { 4375 ath12k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n", 4376 i, ret); 4377 return ret; 4378 } 4379 } 4380 } 4381 4382 for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) { 4383 ring_id = dp->rxdma_err_dst_ring[i].ring_id; 4384 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 4385 i, HAL_RXDMA_DST); 4386 if (ret) { 4387 ath12k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n", 4388 i, ret); 4389 return ret; 4390 } 4391 } 4392 4393 if (ab->hw_params->rxdma1_enable) { 4394 ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id; 4395 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 4396 0, HAL_RXDMA_MONITOR_BUF); 4397 if (ret) { 4398 ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n", 4399 ret); 4400 return ret; 4401 } 4402 } 4403 4404 ret = ab->hw_params->hw_ops->rxdma_ring_sel_config(ab); 4405 if (ret) { 4406 ath12k_warn(ab, "failed to setup rxdma ring selection config\n"); 4407 return ret; 4408 } 4409 4410 return 0; 4411 } 4412 4413 int ath12k_dp_rx_alloc(struct ath12k_base *ab) 4414 { 4415 struct ath12k_dp *dp = &ab->dp; 4416 int i, ret; 4417 4418 idr_init(&dp->rxdma_mon_buf_ring.bufs_idr); 4419 spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock); 4420 4421 ret = ath12k_dp_srng_setup(ab, 4422 &dp->rx_refill_buf_ring.refill_buf_ring, 4423 HAL_RXDMA_BUF, 0, 0, 4424 DP_RXDMA_BUF_RING_SIZE); 4425 if (ret) { 4426 ath12k_warn(ab, "failed to setup rx_refill_buf_ring\n"); 4427 return ret; 4428 } 4429 4430 if (ab->hw_params->rx_mac_buf_ring) { 4431 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4432 ret = ath12k_dp_srng_setup(ab, 4433 &dp->rx_mac_buf_ring[i], 4434 HAL_RXDMA_BUF, 1, 4435 i, DP_RX_MAC_BUF_RING_SIZE); 4436 if (ret) { 4437 ath12k_warn(ab, "failed to setup rx_mac_buf_ring %d\n", 4438 i); 4439 return ret; 4440 } 4441 } 4442 } 4443 4444 for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) { 4445 ret = ath12k_dp_srng_setup(ab, &dp->rxdma_err_dst_ring[i], 4446 HAL_RXDMA_DST, 0, i, 4447 DP_RXDMA_ERR_DST_RING_SIZE); 4448 if (ret) { 4449 ath12k_warn(ab, "failed to setup rxdma_err_dst_ring %d\n", i); 4450 return ret; 4451 } 4452 } 4453 4454 if (ab->hw_params->rxdma1_enable) { 4455 ret = ath12k_dp_srng_setup(ab, 4456 &dp->rxdma_mon_buf_ring.refill_buf_ring, 4457 HAL_RXDMA_MONITOR_BUF, 0, 0, 4458 DP_RXDMA_MONITOR_BUF_RING_SIZE); 4459 if (ret) { 4460 ath12k_warn(ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n"); 4461 return ret; 4462 } 4463 } 4464 4465 ret = ath12k_dp_rxdma_buf_setup(ab); 4466 if (ret) { 4467 ath12k_warn(ab, "failed to setup rxdma ring\n"); 4468 return ret; 4469 } 4470 4471 return 0; 4472 } 4473 4474 int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int mac_id) 4475 { 4476 struct ath12k *ar = ab->pdevs[mac_id].ar; 4477 struct ath12k_pdev_dp *dp = &ar->dp; 4478 u32 ring_id; 4479 int i; 4480 int ret; 4481 4482 if (!ab->hw_params->rxdma1_enable) 4483 goto out; 4484 4485 ret = ath12k_dp_rx_pdev_srng_alloc(ar); 4486 if (ret) { 4487 ath12k_warn(ab, "failed to setup rx srngs\n"); 4488 return ret; 4489 } 4490 4491 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 4492 ring_id = dp->rxdma_mon_dst_ring[i].ring_id; 4493 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 4494 mac_id + i, 4495 HAL_RXDMA_MONITOR_DST); 4496 if (ret) { 4497 ath12k_warn(ab, 4498 "failed to configure rxdma_mon_dst_ring %d %d\n", 4499 i, ret); 4500 return ret; 4501 } 4502 } 4503 out: 4504 return 0; 4505 } 4506 4507 static int ath12k_dp_rx_pdev_mon_status_attach(struct ath12k *ar) 4508 { 4509 struct ath12k_pdev_dp *dp = &ar->dp; 4510 struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&dp->mon_data; 4511 4512 skb_queue_head_init(&pmon->rx_status_q); 4513 4514 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 4515 4516 memset(&pmon->rx_mon_stats, 0, 4517 sizeof(pmon->rx_mon_stats)); 4518 return 0; 4519 } 4520 4521 int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar) 4522 { 4523 struct ath12k_pdev_dp *dp = &ar->dp; 4524 struct ath12k_mon_data *pmon = &dp->mon_data; 4525 int ret = 0; 4526 4527 ret = ath12k_dp_rx_pdev_mon_status_attach(ar); 4528 if (ret) { 4529 ath12k_warn(ar->ab, "pdev_mon_status_attach() failed"); 4530 return ret; 4531 } 4532 4533 /* if rxdma1_enable is false, no need to setup 4534 * rxdma_mon_desc_ring. 4535 */ 4536 if (!ar->ab->hw_params->rxdma1_enable) 4537 return 0; 4538 4539 pmon->mon_last_linkdesc_paddr = 0; 4540 pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1; 4541 INIT_LIST_HEAD(&pmon->dp_rx_mon_mpdu_list); 4542 pmon->mon_mpdu = NULL; 4543 spin_lock_init(&pmon->mon_lock); 4544 4545 return 0; 4546 } 4547