1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 */ 6 7 #include <linux/ieee80211.h> 8 #include <linux/kernel.h> 9 #include <linux/skbuff.h> 10 #include <crypto/hash.h> 11 #include "core.h" 12 #include "debug.h" 13 #include "debugfs_htt_stats.h" 14 #include "debugfs_sta.h" 15 #include "hal_desc.h" 16 #include "hw.h" 17 #include "dp_rx.h" 18 #include "hal_rx.h" 19 #include "dp_tx.h" 20 #include "peer.h" 21 22 #define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ) 23 24 static inline 25 u8 *ath11k_dp_rx_h_80211_hdr(struct ath11k_base *ab, struct hal_rx_desc *desc) 26 { 27 return ab->hw_params.hw_ops->rx_desc_get_hdr_status(desc); 28 } 29 30 static inline 31 enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base *ab, 32 struct hal_rx_desc *desc) 33 { 34 if (!ab->hw_params.hw_ops->rx_desc_encrypt_valid(desc)) 35 return HAL_ENCRYPT_TYPE_OPEN; 36 37 return ab->hw_params.hw_ops->rx_desc_get_encrypt_type(desc); 38 } 39 40 static inline u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab, 41 struct hal_rx_desc *desc) 42 { 43 return ab->hw_params.hw_ops->rx_desc_get_decap_type(desc); 44 } 45 46 static inline 47 bool ath11k_dp_rx_h_msdu_start_ldpc_support(struct ath11k_base *ab, 48 struct hal_rx_desc *desc) 49 { 50 return ab->hw_params.hw_ops->rx_desc_get_ldpc_support(desc); 51 } 52 53 static inline 54 u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab, 55 struct hal_rx_desc *desc) 56 { 57 return ab->hw_params.hw_ops->rx_desc_get_mesh_ctl(desc); 58 } 59 60 static inline 61 bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base *ab, 62 struct hal_rx_desc *desc) 63 { 64 return ab->hw_params.hw_ops->rx_desc_get_mpdu_seq_ctl_vld(desc); 65 } 66 67 static inline bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base *ab, 68 struct hal_rx_desc *desc) 69 { 70 return ab->hw_params.hw_ops->rx_desc_get_mpdu_fc_valid(desc); 71 } 72 73 static inline bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab, 74 struct sk_buff *skb) 75 { 76 struct ieee80211_hdr *hdr; 77 78 hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz); 79 return ieee80211_has_morefrags(hdr->frame_control); 80 } 81 82 static inline u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab, 83 struct sk_buff *skb) 84 { 85 struct ieee80211_hdr *hdr; 86 87 hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz); 88 return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; 89 } 90 91 static inline u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base *ab, 92 struct hal_rx_desc *desc) 93 { 94 return ab->hw_params.hw_ops->rx_desc_get_mpdu_start_seq_no(desc); 95 } 96 97 static inline void *ath11k_dp_rx_get_attention(struct ath11k_base *ab, 98 struct hal_rx_desc *desc) 99 { 100 return ab->hw_params.hw_ops->rx_desc_get_attention(desc); 101 } 102 103 static inline bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention *attn) 104 { 105 return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE, 106 __le32_to_cpu(attn->info2)); 107 } 108 109 static inline bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention *attn) 110 { 111 return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL, 112 __le32_to_cpu(attn->info1)); 113 } 114 115 static inline bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention *attn) 116 { 117 return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL, 118 __le32_to_cpu(attn->info1)); 119 } 120 121 static inline bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention *attn) 122 { 123 return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE, 124 __le32_to_cpu(attn->info2)) == 125 RX_DESC_DECRYPT_STATUS_CODE_OK); 126 } 127 128 static u32 ath11k_dp_rx_h_attn_mpdu_err(struct rx_attention *attn) 129 { 130 u32 info = __le32_to_cpu(attn->info1); 131 u32 errmap = 0; 132 133 if (info & RX_ATTENTION_INFO1_FCS_ERR) 134 errmap |= DP_RX_MPDU_ERR_FCS; 135 136 if (info & RX_ATTENTION_INFO1_DECRYPT_ERR) 137 errmap |= DP_RX_MPDU_ERR_DECRYPT; 138 139 if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR) 140 errmap |= DP_RX_MPDU_ERR_TKIP_MIC; 141 142 if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR) 143 errmap |= DP_RX_MPDU_ERR_AMSDU_ERR; 144 145 if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR) 146 errmap |= DP_RX_MPDU_ERR_OVERFLOW; 147 148 if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR) 149 errmap |= DP_RX_MPDU_ERR_MSDU_LEN; 150 151 if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR) 152 errmap |= DP_RX_MPDU_ERR_MPDU_LEN; 153 154 return errmap; 155 } 156 157 static bool ath11k_dp_rx_h_attn_msdu_len_err(struct ath11k_base *ab, 158 struct hal_rx_desc *desc) 159 { 160 struct rx_attention *rx_attention; 161 u32 errmap; 162 163 rx_attention = ath11k_dp_rx_get_attention(ab, desc); 164 errmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention); 165 166 return errmap & DP_RX_MPDU_ERR_MSDU_LEN; 167 } 168 169 static inline u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab, 170 struct hal_rx_desc *desc) 171 { 172 return ab->hw_params.hw_ops->rx_desc_get_msdu_len(desc); 173 } 174 175 static inline u8 ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base *ab, 176 struct hal_rx_desc *desc) 177 { 178 return ab->hw_params.hw_ops->rx_desc_get_msdu_sgi(desc); 179 } 180 181 static inline u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base *ab, 182 struct hal_rx_desc *desc) 183 { 184 return ab->hw_params.hw_ops->rx_desc_get_msdu_rate_mcs(desc); 185 } 186 187 static inline u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base *ab, 188 struct hal_rx_desc *desc) 189 { 190 return ab->hw_params.hw_ops->rx_desc_get_msdu_rx_bw(desc); 191 } 192 193 static inline u32 ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base *ab, 194 struct hal_rx_desc *desc) 195 { 196 return ab->hw_params.hw_ops->rx_desc_get_msdu_freq(desc); 197 } 198 199 static inline u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base *ab, 200 struct hal_rx_desc *desc) 201 { 202 return ab->hw_params.hw_ops->rx_desc_get_msdu_pkt_type(desc); 203 } 204 205 static inline u8 ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base *ab, 206 struct hal_rx_desc *desc) 207 { 208 return hweight8(ab->hw_params.hw_ops->rx_desc_get_msdu_nss(desc)); 209 } 210 211 static inline u8 ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base *ab, 212 struct hal_rx_desc *desc) 213 { 214 return ab->hw_params.hw_ops->rx_desc_get_mpdu_tid(desc); 215 } 216 217 static inline u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base *ab, 218 struct hal_rx_desc *desc) 219 { 220 return ab->hw_params.hw_ops->rx_desc_get_mpdu_peer_id(desc); 221 } 222 223 static inline u8 ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base *ab, 224 struct hal_rx_desc *desc) 225 { 226 return ab->hw_params.hw_ops->rx_desc_get_l3_pad_bytes(desc); 227 } 228 229 static inline bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base *ab, 230 struct hal_rx_desc *desc) 231 { 232 return ab->hw_params.hw_ops->rx_desc_get_first_msdu(desc); 233 } 234 235 static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct ath11k_base *ab, 236 struct hal_rx_desc *desc) 237 { 238 return ab->hw_params.hw_ops->rx_desc_get_last_msdu(desc); 239 } 240 241 static void ath11k_dp_rx_desc_end_tlv_copy(struct ath11k_base *ab, 242 struct hal_rx_desc *fdesc, 243 struct hal_rx_desc *ldesc) 244 { 245 ab->hw_params.hw_ops->rx_desc_copy_attn_end_tlv(fdesc, ldesc); 246 } 247 248 static inline u32 ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention *attn) 249 { 250 return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR, 251 __le32_to_cpu(attn->info1)); 252 } 253 254 static inline u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab, 255 struct hal_rx_desc *rx_desc) 256 { 257 u8 *rx_pkt_hdr; 258 259 rx_pkt_hdr = ab->hw_params.hw_ops->rx_desc_get_msdu_payload(rx_desc); 260 261 return rx_pkt_hdr; 262 } 263 264 static inline bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab, 265 struct hal_rx_desc *rx_desc) 266 { 267 u32 tlv_tag; 268 269 tlv_tag = ab->hw_params.hw_ops->rx_desc_get_mpdu_start_tag(rx_desc); 270 271 return tlv_tag == HAL_RX_MPDU_START; 272 } 273 274 static inline u32 ath11k_dp_rxdesc_get_ppduid(struct ath11k_base *ab, 275 struct hal_rx_desc *rx_desc) 276 { 277 return ab->hw_params.hw_ops->rx_desc_get_mpdu_ppdu_id(rx_desc); 278 } 279 280 static inline void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab, 281 struct hal_rx_desc *desc, 282 u16 len) 283 { 284 ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len); 285 } 286 287 static bool ath11k_dp_rx_h_attn_is_mcbc(struct ath11k_base *ab, 288 struct hal_rx_desc *desc) 289 { 290 struct rx_attention *attn = ath11k_dp_rx_get_attention(ab, desc); 291 292 return ath11k_dp_rx_h_msdu_end_first_msdu(ab, desc) && 293 (!!FIELD_GET(RX_ATTENTION_INFO1_MCAST_BCAST, 294 __le32_to_cpu(attn->info1))); 295 } 296 297 static bool ath11k_dp_rxdesc_mac_addr2_valid(struct ath11k_base *ab, 298 struct hal_rx_desc *desc) 299 { 300 return ab->hw_params.hw_ops->rx_desc_mac_addr2_valid(desc); 301 } 302 303 static u8 *ath11k_dp_rxdesc_mpdu_start_addr2(struct ath11k_base *ab, 304 struct hal_rx_desc *desc) 305 { 306 return ab->hw_params.hw_ops->rx_desc_mpdu_start_addr2(desc); 307 } 308 309 static void ath11k_dp_service_mon_ring(struct timer_list *t) 310 { 311 struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer); 312 int i; 313 314 for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) 315 ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET); 316 317 mod_timer(&ab->mon_reap_timer, jiffies + 318 msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL)); 319 } 320 321 static int ath11k_dp_purge_mon_ring(struct ath11k_base *ab) 322 { 323 int i, reaped = 0; 324 unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS); 325 326 do { 327 for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) 328 reaped += ath11k_dp_rx_process_mon_rings(ab, i, 329 NULL, 330 DP_MON_SERVICE_BUDGET); 331 332 /* nothing more to reap */ 333 if (reaped < DP_MON_SERVICE_BUDGET) 334 return 0; 335 336 } while (time_before(jiffies, timeout)); 337 338 ath11k_warn(ab, "dp mon ring purge timeout"); 339 340 return -ETIMEDOUT; 341 } 342 343 /* Returns number of Rx buffers replenished */ 344 int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id, 345 struct dp_rxdma_ring *rx_ring, 346 int req_entries, 347 enum hal_rx_buf_return_buf_manager mgr) 348 { 349 struct hal_srng *srng; 350 u32 *desc; 351 struct sk_buff *skb; 352 int num_free; 353 int num_remain; 354 int buf_id; 355 u32 cookie; 356 dma_addr_t paddr; 357 358 req_entries = min(req_entries, rx_ring->bufs_max); 359 360 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 361 362 spin_lock_bh(&srng->lock); 363 364 ath11k_hal_srng_access_begin(ab, srng); 365 366 num_free = ath11k_hal_srng_src_num_free(ab, srng, true); 367 if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4)) 368 req_entries = num_free; 369 370 req_entries = min(num_free, req_entries); 371 num_remain = req_entries; 372 373 while (num_remain > 0) { 374 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + 375 DP_RX_BUFFER_ALIGN_SIZE); 376 if (!skb) 377 break; 378 379 if (!IS_ALIGNED((unsigned long)skb->data, 380 DP_RX_BUFFER_ALIGN_SIZE)) { 381 skb_pull(skb, 382 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - 383 skb->data); 384 } 385 386 paddr = dma_map_single(ab->dev, skb->data, 387 skb->len + skb_tailroom(skb), 388 DMA_FROM_DEVICE); 389 if (dma_mapping_error(ab->dev, paddr)) 390 goto fail_free_skb; 391 392 spin_lock_bh(&rx_ring->idr_lock); 393 buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 1, 394 (rx_ring->bufs_max * 3) + 1, GFP_ATOMIC); 395 spin_unlock_bh(&rx_ring->idr_lock); 396 if (buf_id <= 0) 397 goto fail_dma_unmap; 398 399 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 400 if (!desc) 401 goto fail_idr_remove; 402 403 ATH11K_SKB_RXCB(skb)->paddr = paddr; 404 405 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 406 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 407 408 num_remain--; 409 410 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); 411 } 412 413 ath11k_hal_srng_access_end(ab, srng); 414 415 spin_unlock_bh(&srng->lock); 416 417 return req_entries - num_remain; 418 419 fail_idr_remove: 420 spin_lock_bh(&rx_ring->idr_lock); 421 idr_remove(&rx_ring->bufs_idr, buf_id); 422 spin_unlock_bh(&rx_ring->idr_lock); 423 fail_dma_unmap: 424 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 425 DMA_FROM_DEVICE); 426 fail_free_skb: 427 dev_kfree_skb_any(skb); 428 429 ath11k_hal_srng_access_end(ab, srng); 430 431 spin_unlock_bh(&srng->lock); 432 433 return req_entries - num_remain; 434 } 435 436 static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar, 437 struct dp_rxdma_ring *rx_ring) 438 { 439 struct sk_buff *skb; 440 int buf_id; 441 442 spin_lock_bh(&rx_ring->idr_lock); 443 idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { 444 idr_remove(&rx_ring->bufs_idr, buf_id); 445 /* TODO: Understand where internal driver does this dma_unmap 446 * of rxdma_buffer. 447 */ 448 dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr, 449 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); 450 dev_kfree_skb_any(skb); 451 } 452 453 idr_destroy(&rx_ring->bufs_idr); 454 spin_unlock_bh(&rx_ring->idr_lock); 455 456 return 0; 457 } 458 459 static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar) 460 { 461 struct ath11k_pdev_dp *dp = &ar->dp; 462 struct ath11k_base *ab = ar->ab; 463 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 464 int i; 465 466 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 467 468 rx_ring = &dp->rxdma_mon_buf_ring; 469 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 470 471 for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { 472 rx_ring = &dp->rx_mon_status_refill_ring[i]; 473 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 474 } 475 476 return 0; 477 } 478 479 static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar, 480 struct dp_rxdma_ring *rx_ring, 481 u32 ringtype) 482 { 483 struct ath11k_pdev_dp *dp = &ar->dp; 484 int num_entries; 485 486 num_entries = rx_ring->refill_buf_ring.size / 487 ath11k_hal_srng_get_entrysize(ar->ab, ringtype); 488 489 rx_ring->bufs_max = num_entries; 490 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries, 491 ar->ab->hw_params.hal_params->rx_buf_rbm); 492 return 0; 493 } 494 495 static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar) 496 { 497 struct ath11k_pdev_dp *dp = &ar->dp; 498 struct ath11k_base *ab = ar->ab; 499 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 500 int i; 501 502 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF); 503 504 if (ar->ab->hw_params.rxdma1_enable) { 505 rx_ring = &dp->rxdma_mon_buf_ring; 506 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF); 507 } 508 509 for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { 510 rx_ring = &dp->rx_mon_status_refill_ring[i]; 511 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS); 512 } 513 514 return 0; 515 } 516 517 static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar) 518 { 519 struct ath11k_pdev_dp *dp = &ar->dp; 520 struct ath11k_base *ab = ar->ab; 521 int i; 522 523 ath11k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring); 524 525 for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { 526 if (ab->hw_params.rx_mac_buf_ring) 527 ath11k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]); 528 529 ath11k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]); 530 ath11k_dp_srng_cleanup(ab, 531 &dp->rx_mon_status_refill_ring[i].refill_buf_ring); 532 } 533 534 ath11k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring); 535 } 536 537 void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab) 538 { 539 struct ath11k_dp *dp = &ab->dp; 540 int i; 541 542 for (i = 0; i < DP_REO_DST_RING_MAX; i++) 543 ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]); 544 } 545 546 int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab) 547 { 548 struct ath11k_dp *dp = &ab->dp; 549 int ret; 550 int i; 551 552 for (i = 0; i < DP_REO_DST_RING_MAX; i++) { 553 ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring[i], 554 HAL_REO_DST, i, 0, 555 DP_REO_DST_RING_SIZE); 556 if (ret) { 557 ath11k_warn(ab, "failed to setup reo_dst_ring\n"); 558 goto err_reo_cleanup; 559 } 560 } 561 562 return 0; 563 564 err_reo_cleanup: 565 ath11k_dp_pdev_reo_cleanup(ab); 566 567 return ret; 568 } 569 570 static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar) 571 { 572 struct ath11k_pdev_dp *dp = &ar->dp; 573 struct ath11k_base *ab = ar->ab; 574 struct dp_srng *srng = NULL; 575 int i; 576 int ret; 577 578 ret = ath11k_dp_srng_setup(ar->ab, 579 &dp->rx_refill_buf_ring.refill_buf_ring, 580 HAL_RXDMA_BUF, 0, 581 dp->mac_id, DP_RXDMA_BUF_RING_SIZE); 582 if (ret) { 583 ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n"); 584 return ret; 585 } 586 587 if (ar->ab->hw_params.rx_mac_buf_ring) { 588 for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { 589 ret = ath11k_dp_srng_setup(ar->ab, 590 &dp->rx_mac_buf_ring[i], 591 HAL_RXDMA_BUF, 1, 592 dp->mac_id + i, 1024); 593 if (ret) { 594 ath11k_warn(ar->ab, "failed to setup rx_mac_buf_ring %d\n", 595 i); 596 return ret; 597 } 598 } 599 } 600 601 for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { 602 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring[i], 603 HAL_RXDMA_DST, 0, dp->mac_id + i, 604 DP_RXDMA_ERR_DST_RING_SIZE); 605 if (ret) { 606 ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring %d\n", i); 607 return ret; 608 } 609 } 610 611 for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { 612 srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring; 613 ret = ath11k_dp_srng_setup(ar->ab, 614 srng, 615 HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id + i, 616 DP_RXDMA_MON_STATUS_RING_SIZE); 617 if (ret) { 618 ath11k_warn(ar->ab, 619 "failed to setup rx_mon_status_refill_ring %d\n", i); 620 return ret; 621 } 622 } 623 624 /* if rxdma1_enable is false, then it doesn't need 625 * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring 626 * and rxdma_mon_desc_ring. 627 * init reap timer for QCA6390. 628 */ 629 if (!ar->ab->hw_params.rxdma1_enable) { 630 //init mon status buffer reap timer 631 timer_setup(&ar->ab->mon_reap_timer, 632 ath11k_dp_service_mon_ring, 0); 633 return 0; 634 } 635 636 ret = ath11k_dp_srng_setup(ar->ab, 637 &dp->rxdma_mon_buf_ring.refill_buf_ring, 638 HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id, 639 DP_RXDMA_MONITOR_BUF_RING_SIZE); 640 if (ret) { 641 ath11k_warn(ar->ab, 642 "failed to setup HAL_RXDMA_MONITOR_BUF\n"); 643 return ret; 644 } 645 646 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring, 647 HAL_RXDMA_MONITOR_DST, 0, dp->mac_id, 648 DP_RXDMA_MONITOR_DST_RING_SIZE); 649 if (ret) { 650 ath11k_warn(ar->ab, 651 "failed to setup HAL_RXDMA_MONITOR_DST\n"); 652 return ret; 653 } 654 655 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring, 656 HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id, 657 DP_RXDMA_MONITOR_DESC_RING_SIZE); 658 if (ret) { 659 ath11k_warn(ar->ab, 660 "failed to setup HAL_RXDMA_MONITOR_DESC\n"); 661 return ret; 662 } 663 664 return 0; 665 } 666 667 void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab) 668 { 669 struct ath11k_dp *dp = &ab->dp; 670 struct dp_reo_cmd *cmd, *tmp; 671 struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache; 672 struct dp_rx_tid *rx_tid; 673 674 spin_lock_bh(&dp->reo_cmd_lock); 675 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 676 list_del(&cmd->list); 677 rx_tid = &cmd->data; 678 if (rx_tid->vaddr) { 679 dma_unmap_single(ab->dev, rx_tid->paddr, 680 rx_tid->size, DMA_BIDIRECTIONAL); 681 kfree(rx_tid->vaddr); 682 rx_tid->vaddr = NULL; 683 } 684 kfree(cmd); 685 } 686 687 list_for_each_entry_safe(cmd_cache, tmp_cache, 688 &dp->reo_cmd_cache_flush_list, list) { 689 list_del(&cmd_cache->list); 690 dp->reo_cmd_cache_flush_count--; 691 rx_tid = &cmd_cache->data; 692 if (rx_tid->vaddr) { 693 dma_unmap_single(ab->dev, rx_tid->paddr, 694 rx_tid->size, DMA_BIDIRECTIONAL); 695 kfree(rx_tid->vaddr); 696 rx_tid->vaddr = NULL; 697 } 698 kfree(cmd_cache); 699 } 700 spin_unlock_bh(&dp->reo_cmd_lock); 701 } 702 703 static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx, 704 enum hal_reo_cmd_status status) 705 { 706 struct dp_rx_tid *rx_tid = ctx; 707 708 if (status != HAL_REO_CMD_SUCCESS) 709 ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n", 710 rx_tid->tid, status); 711 if (rx_tid->vaddr) { 712 dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size, 713 DMA_BIDIRECTIONAL); 714 kfree(rx_tid->vaddr); 715 rx_tid->vaddr = NULL; 716 } 717 } 718 719 static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab, 720 struct dp_rx_tid *rx_tid) 721 { 722 struct ath11k_hal_reo_cmd cmd = {0}; 723 unsigned long tot_desc_sz, desc_sz; 724 int ret; 725 726 tot_desc_sz = rx_tid->size; 727 desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID); 728 729 while (tot_desc_sz > desc_sz) { 730 tot_desc_sz -= desc_sz; 731 cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz); 732 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 733 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 734 HAL_REO_CMD_FLUSH_CACHE, &cmd, 735 NULL); 736 if (ret) 737 ath11k_warn(ab, 738 "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n", 739 rx_tid->tid, ret); 740 } 741 742 memset(&cmd, 0, sizeof(cmd)); 743 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 744 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 745 cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS; 746 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 747 HAL_REO_CMD_FLUSH_CACHE, 748 &cmd, ath11k_dp_reo_cmd_free); 749 if (ret) { 750 ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n", 751 rx_tid->tid, ret); 752 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 753 DMA_BIDIRECTIONAL); 754 kfree(rx_tid->vaddr); 755 rx_tid->vaddr = NULL; 756 } 757 } 758 759 static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx, 760 enum hal_reo_cmd_status status) 761 { 762 struct ath11k_base *ab = dp->ab; 763 struct dp_rx_tid *rx_tid = ctx; 764 struct dp_reo_cache_flush_elem *elem, *tmp; 765 766 if (status == HAL_REO_CMD_DRAIN) { 767 goto free_desc; 768 } else if (status != HAL_REO_CMD_SUCCESS) { 769 /* Shouldn't happen! Cleanup in case of other failure? */ 770 ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n", 771 rx_tid->tid, status); 772 return; 773 } 774 775 elem = kzalloc(sizeof(*elem), GFP_ATOMIC); 776 if (!elem) 777 goto free_desc; 778 779 elem->ts = jiffies; 780 memcpy(&elem->data, rx_tid, sizeof(*rx_tid)); 781 782 spin_lock_bh(&dp->reo_cmd_lock); 783 list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list); 784 dp->reo_cmd_cache_flush_count++; 785 786 /* Flush and invalidate aged REO desc from HW cache */ 787 list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list, 788 list) { 789 if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD || 790 time_after(jiffies, elem->ts + 791 msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) { 792 list_del(&elem->list); 793 dp->reo_cmd_cache_flush_count--; 794 spin_unlock_bh(&dp->reo_cmd_lock); 795 796 ath11k_dp_reo_cache_flush(ab, &elem->data); 797 kfree(elem); 798 spin_lock_bh(&dp->reo_cmd_lock); 799 } 800 } 801 spin_unlock_bh(&dp->reo_cmd_lock); 802 803 return; 804 free_desc: 805 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 806 DMA_BIDIRECTIONAL); 807 kfree(rx_tid->vaddr); 808 rx_tid->vaddr = NULL; 809 } 810 811 void ath11k_peer_rx_tid_delete(struct ath11k *ar, 812 struct ath11k_peer *peer, u8 tid) 813 { 814 struct ath11k_hal_reo_cmd cmd = {0}; 815 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 816 int ret; 817 818 if (!rx_tid->active) 819 return; 820 821 rx_tid->active = false; 822 823 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 824 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 825 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 826 cmd.upd0 |= HAL_REO_CMD_UPD0_VLD; 827 ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid, 828 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 829 ath11k_dp_rx_tid_del_func); 830 if (ret) { 831 if (ret != -ESHUTDOWN) 832 ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n", 833 tid, ret); 834 dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size, 835 DMA_BIDIRECTIONAL); 836 kfree(rx_tid->vaddr); 837 rx_tid->vaddr = NULL; 838 } 839 840 rx_tid->paddr = 0; 841 rx_tid->size = 0; 842 } 843 844 static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab, 845 u32 *link_desc, 846 enum hal_wbm_rel_bm_act action) 847 { 848 struct ath11k_dp *dp = &ab->dp; 849 struct hal_srng *srng; 850 u32 *desc; 851 int ret = 0; 852 853 srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id]; 854 855 spin_lock_bh(&srng->lock); 856 857 ath11k_hal_srng_access_begin(ab, srng); 858 859 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 860 if (!desc) { 861 ret = -ENOBUFS; 862 goto exit; 863 } 864 865 ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc, 866 action); 867 868 exit: 869 ath11k_hal_srng_access_end(ab, srng); 870 871 spin_unlock_bh(&srng->lock); 872 873 return ret; 874 } 875 876 static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_desc) 877 { 878 struct ath11k_base *ab = rx_tid->ab; 879 880 lockdep_assert_held(&ab->base_lock); 881 882 if (rx_tid->dst_ring_desc) { 883 if (rel_link_desc) 884 ath11k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc, 885 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 886 kfree(rx_tid->dst_ring_desc); 887 rx_tid->dst_ring_desc = NULL; 888 } 889 890 rx_tid->cur_sn = 0; 891 rx_tid->last_frag_no = 0; 892 rx_tid->rx_frag_bitmap = 0; 893 __skb_queue_purge(&rx_tid->rx_frags); 894 } 895 896 void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer) 897 { 898 struct dp_rx_tid *rx_tid; 899 int i; 900 901 lockdep_assert_held(&ar->ab->base_lock); 902 903 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 904 rx_tid = &peer->rx_tid[i]; 905 906 spin_unlock_bh(&ar->ab->base_lock); 907 del_timer_sync(&rx_tid->frag_timer); 908 spin_lock_bh(&ar->ab->base_lock); 909 910 ath11k_dp_rx_frags_cleanup(rx_tid, true); 911 } 912 } 913 914 void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer) 915 { 916 struct dp_rx_tid *rx_tid; 917 int i; 918 919 lockdep_assert_held(&ar->ab->base_lock); 920 921 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 922 rx_tid = &peer->rx_tid[i]; 923 924 ath11k_peer_rx_tid_delete(ar, peer, i); 925 ath11k_dp_rx_frags_cleanup(rx_tid, true); 926 927 spin_unlock_bh(&ar->ab->base_lock); 928 del_timer_sync(&rx_tid->frag_timer); 929 spin_lock_bh(&ar->ab->base_lock); 930 } 931 } 932 933 static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar, 934 struct ath11k_peer *peer, 935 struct dp_rx_tid *rx_tid, 936 u32 ba_win_sz, u16 ssn, 937 bool update_ssn) 938 { 939 struct ath11k_hal_reo_cmd cmd = {0}; 940 int ret; 941 942 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 943 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 944 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 945 cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE; 946 cmd.ba_window_size = ba_win_sz; 947 948 if (update_ssn) { 949 cmd.upd0 |= HAL_REO_CMD_UPD0_SSN; 950 cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn); 951 } 952 953 ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid, 954 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 955 NULL); 956 if (ret) { 957 ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n", 958 rx_tid->tid, ret); 959 return ret; 960 } 961 962 rx_tid->ba_win_sz = ba_win_sz; 963 964 return 0; 965 } 966 967 static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab, 968 const u8 *peer_mac, int vdev_id, u8 tid) 969 { 970 struct ath11k_peer *peer; 971 struct dp_rx_tid *rx_tid; 972 973 spin_lock_bh(&ab->base_lock); 974 975 peer = ath11k_peer_find(ab, vdev_id, peer_mac); 976 if (!peer) { 977 ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n"); 978 goto unlock_exit; 979 } 980 981 rx_tid = &peer->rx_tid[tid]; 982 if (!rx_tid->active) 983 goto unlock_exit; 984 985 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 986 DMA_BIDIRECTIONAL); 987 kfree(rx_tid->vaddr); 988 rx_tid->vaddr = NULL; 989 990 rx_tid->active = false; 991 992 unlock_exit: 993 spin_unlock_bh(&ab->base_lock); 994 } 995 996 int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, 997 u8 tid, u32 ba_win_sz, u16 ssn, 998 enum hal_pn_type pn_type) 999 { 1000 struct ath11k_base *ab = ar->ab; 1001 struct ath11k_peer *peer; 1002 struct dp_rx_tid *rx_tid; 1003 u32 hw_desc_sz; 1004 u32 *addr_aligned; 1005 void *vaddr; 1006 dma_addr_t paddr; 1007 int ret; 1008 1009 spin_lock_bh(&ab->base_lock); 1010 1011 peer = ath11k_peer_find(ab, vdev_id, peer_mac); 1012 if (!peer) { 1013 ath11k_warn(ab, "failed to find the peer %pM to set up rx tid\n", 1014 peer_mac); 1015 spin_unlock_bh(&ab->base_lock); 1016 return -ENOENT; 1017 } 1018 1019 rx_tid = &peer->rx_tid[tid]; 1020 /* Update the tid queue if it is already setup */ 1021 if (rx_tid->active) { 1022 paddr = rx_tid->paddr; 1023 ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid, 1024 ba_win_sz, ssn, true); 1025 spin_unlock_bh(&ab->base_lock); 1026 if (ret) { 1027 ath11k_warn(ab, "failed to update reo for peer %pM rx tid %d\n: %d", 1028 peer_mac, tid, ret); 1029 return ret; 1030 } 1031 1032 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, 1033 peer_mac, paddr, 1034 tid, 1, ba_win_sz); 1035 if (ret) 1036 ath11k_warn(ab, "failed to send wmi rx reorder queue for peer %pM tid %d: %d\n", 1037 peer_mac, tid, ret); 1038 return ret; 1039 } 1040 1041 rx_tid->tid = tid; 1042 1043 rx_tid->ba_win_sz = ba_win_sz; 1044 1045 /* TODO: Optimize the memory allocation for qos tid based on 1046 * the actual BA window size in REO tid update path. 1047 */ 1048 if (tid == HAL_DESC_REO_NON_QOS_TID) 1049 hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid); 1050 else 1051 hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid); 1052 1053 vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC); 1054 if (!vaddr) { 1055 spin_unlock_bh(&ab->base_lock); 1056 return -ENOMEM; 1057 } 1058 1059 addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN); 1060 1061 ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz, 1062 ssn, pn_type); 1063 1064 paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz, 1065 DMA_BIDIRECTIONAL); 1066 1067 ret = dma_mapping_error(ab->dev, paddr); 1068 if (ret) { 1069 spin_unlock_bh(&ab->base_lock); 1070 ath11k_warn(ab, "failed to setup dma map for peer %pM rx tid %d: %d\n", 1071 peer_mac, tid, ret); 1072 goto err_mem_free; 1073 } 1074 1075 rx_tid->vaddr = vaddr; 1076 rx_tid->paddr = paddr; 1077 rx_tid->size = hw_desc_sz; 1078 rx_tid->active = true; 1079 1080 spin_unlock_bh(&ab->base_lock); 1081 1082 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, 1083 paddr, tid, 1, ba_win_sz); 1084 if (ret) { 1085 ath11k_warn(ar->ab, "failed to setup rx reorder queue for peer %pM tid %d: %d\n", 1086 peer_mac, tid, ret); 1087 ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid); 1088 } 1089 1090 return ret; 1091 1092 err_mem_free: 1093 kfree(rx_tid->vaddr); 1094 rx_tid->vaddr = NULL; 1095 1096 return ret; 1097 } 1098 1099 int ath11k_dp_rx_ampdu_start(struct ath11k *ar, 1100 struct ieee80211_ampdu_params *params) 1101 { 1102 struct ath11k_base *ab = ar->ab; 1103 struct ath11k_sta *arsta = ath11k_sta_to_arsta(params->sta); 1104 int vdev_id = arsta->arvif->vdev_id; 1105 int ret; 1106 1107 ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id, 1108 params->tid, params->buf_size, 1109 params->ssn, arsta->pn_type); 1110 if (ret) 1111 ath11k_warn(ab, "failed to setup rx tid %d\n", ret); 1112 1113 return ret; 1114 } 1115 1116 int ath11k_dp_rx_ampdu_stop(struct ath11k *ar, 1117 struct ieee80211_ampdu_params *params) 1118 { 1119 struct ath11k_base *ab = ar->ab; 1120 struct ath11k_peer *peer; 1121 struct ath11k_sta *arsta = ath11k_sta_to_arsta(params->sta); 1122 int vdev_id = arsta->arvif->vdev_id; 1123 dma_addr_t paddr; 1124 bool active; 1125 int ret; 1126 1127 spin_lock_bh(&ab->base_lock); 1128 1129 peer = ath11k_peer_find(ab, vdev_id, params->sta->addr); 1130 if (!peer) { 1131 ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n"); 1132 spin_unlock_bh(&ab->base_lock); 1133 return -ENOENT; 1134 } 1135 1136 paddr = peer->rx_tid[params->tid].paddr; 1137 active = peer->rx_tid[params->tid].active; 1138 1139 if (!active) { 1140 spin_unlock_bh(&ab->base_lock); 1141 return 0; 1142 } 1143 1144 ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false); 1145 spin_unlock_bh(&ab->base_lock); 1146 if (ret) { 1147 ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n", 1148 params->tid, ret); 1149 return ret; 1150 } 1151 1152 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, 1153 params->sta->addr, paddr, 1154 params->tid, 1, 1); 1155 if (ret) 1156 ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n", 1157 ret); 1158 1159 return ret; 1160 } 1161 1162 int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif, 1163 const u8 *peer_addr, 1164 enum set_key_cmd key_cmd, 1165 struct ieee80211_key_conf *key) 1166 { 1167 struct ath11k *ar = arvif->ar; 1168 struct ath11k_base *ab = ar->ab; 1169 struct ath11k_hal_reo_cmd cmd = {0}; 1170 struct ath11k_peer *peer; 1171 struct dp_rx_tid *rx_tid; 1172 u8 tid; 1173 int ret = 0; 1174 1175 /* NOTE: Enable PN/TSC replay check offload only for unicast frames. 1176 * We use mac80211 PN/TSC replay check functionality for bcast/mcast 1177 * for now. 1178 */ 1179 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) 1180 return 0; 1181 1182 cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS; 1183 cmd.upd0 |= HAL_REO_CMD_UPD0_PN | 1184 HAL_REO_CMD_UPD0_PN_SIZE | 1185 HAL_REO_CMD_UPD0_PN_VALID | 1186 HAL_REO_CMD_UPD0_PN_CHECK | 1187 HAL_REO_CMD_UPD0_SVLD; 1188 1189 switch (key->cipher) { 1190 case WLAN_CIPHER_SUITE_TKIP: 1191 case WLAN_CIPHER_SUITE_CCMP: 1192 case WLAN_CIPHER_SUITE_CCMP_256: 1193 case WLAN_CIPHER_SUITE_GCMP: 1194 case WLAN_CIPHER_SUITE_GCMP_256: 1195 if (key_cmd == SET_KEY) { 1196 cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK; 1197 cmd.pn_size = 48; 1198 } 1199 break; 1200 default: 1201 break; 1202 } 1203 1204 spin_lock_bh(&ab->base_lock); 1205 1206 peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr); 1207 if (!peer) { 1208 ath11k_warn(ab, "failed to find the peer to configure pn replay detection\n"); 1209 spin_unlock_bh(&ab->base_lock); 1210 return -ENOENT; 1211 } 1212 1213 for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) { 1214 rx_tid = &peer->rx_tid[tid]; 1215 if (!rx_tid->active) 1216 continue; 1217 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 1218 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 1219 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 1220 HAL_REO_CMD_UPDATE_RX_QUEUE, 1221 &cmd, NULL); 1222 if (ret) { 1223 ath11k_warn(ab, "failed to configure rx tid %d queue for pn replay detection %d\n", 1224 tid, ret); 1225 break; 1226 } 1227 } 1228 1229 spin_unlock_bh(&ab->base_lock); 1230 1231 return ret; 1232 } 1233 1234 static inline int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats, 1235 u16 peer_id) 1236 { 1237 int i; 1238 1239 for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) { 1240 if (ppdu_stats->user_stats[i].is_valid_peer_id) { 1241 if (peer_id == ppdu_stats->user_stats[i].peer_id) 1242 return i; 1243 } else { 1244 return i; 1245 } 1246 } 1247 1248 return -EINVAL; 1249 } 1250 1251 static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab, 1252 u16 tag, u16 len, const void *ptr, 1253 void *data) 1254 { 1255 struct htt_ppdu_stats_info *ppdu_info; 1256 struct htt_ppdu_user_stats *user_stats; 1257 int cur_user; 1258 u16 peer_id; 1259 1260 ppdu_info = data; 1261 1262 switch (tag) { 1263 case HTT_PPDU_STATS_TAG_COMMON: 1264 if (len < sizeof(struct htt_ppdu_stats_common)) { 1265 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1266 len, tag); 1267 return -EINVAL; 1268 } 1269 memcpy((void *)&ppdu_info->ppdu_stats.common, ptr, 1270 sizeof(struct htt_ppdu_stats_common)); 1271 break; 1272 case HTT_PPDU_STATS_TAG_USR_RATE: 1273 if (len < sizeof(struct htt_ppdu_stats_user_rate)) { 1274 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1275 len, tag); 1276 return -EINVAL; 1277 } 1278 1279 peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id; 1280 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1281 peer_id); 1282 if (cur_user < 0) 1283 return -EINVAL; 1284 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1285 user_stats->peer_id = peer_id; 1286 user_stats->is_valid_peer_id = true; 1287 memcpy((void *)&user_stats->rate, ptr, 1288 sizeof(struct htt_ppdu_stats_user_rate)); 1289 user_stats->tlv_flags |= BIT(tag); 1290 break; 1291 case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON: 1292 if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) { 1293 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1294 len, tag); 1295 return -EINVAL; 1296 } 1297 1298 peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id; 1299 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1300 peer_id); 1301 if (cur_user < 0) 1302 return -EINVAL; 1303 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1304 user_stats->peer_id = peer_id; 1305 user_stats->is_valid_peer_id = true; 1306 memcpy((void *)&user_stats->cmpltn_cmn, ptr, 1307 sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)); 1308 user_stats->tlv_flags |= BIT(tag); 1309 break; 1310 case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS: 1311 if (len < 1312 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) { 1313 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1314 len, tag); 1315 return -EINVAL; 1316 } 1317 1318 peer_id = 1319 ((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id; 1320 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1321 peer_id); 1322 if (cur_user < 0) 1323 return -EINVAL; 1324 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1325 user_stats->peer_id = peer_id; 1326 user_stats->is_valid_peer_id = true; 1327 memcpy((void *)&user_stats->ack_ba, ptr, 1328 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)); 1329 user_stats->tlv_flags |= BIT(tag); 1330 break; 1331 } 1332 return 0; 1333 } 1334 1335 int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len, 1336 int (*iter)(struct ath11k_base *ar, u16 tag, u16 len, 1337 const void *ptr, void *data), 1338 void *data) 1339 { 1340 const struct htt_tlv *tlv; 1341 const void *begin = ptr; 1342 u16 tlv_tag, tlv_len; 1343 int ret = -EINVAL; 1344 1345 while (len > 0) { 1346 if (len < sizeof(*tlv)) { 1347 ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", 1348 ptr - begin, len, sizeof(*tlv)); 1349 return -EINVAL; 1350 } 1351 tlv = (struct htt_tlv *)ptr; 1352 tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header); 1353 tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header); 1354 ptr += sizeof(*tlv); 1355 len -= sizeof(*tlv); 1356 1357 if (tlv_len > len) { 1358 ath11k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n", 1359 tlv_tag, ptr - begin, len, tlv_len); 1360 return -EINVAL; 1361 } 1362 ret = iter(ab, tlv_tag, tlv_len, ptr, data); 1363 if (ret == -ENOMEM) 1364 return ret; 1365 1366 ptr += tlv_len; 1367 len -= tlv_len; 1368 } 1369 return 0; 1370 } 1371 1372 static void 1373 ath11k_update_per_peer_tx_stats(struct ath11k *ar, 1374 struct htt_ppdu_stats *ppdu_stats, u8 user) 1375 { 1376 struct ath11k_base *ab = ar->ab; 1377 struct ath11k_peer *peer; 1378 struct ieee80211_sta *sta; 1379 struct ath11k_sta *arsta; 1380 struct htt_ppdu_stats_user_rate *user_rate; 1381 struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats; 1382 struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user]; 1383 struct htt_ppdu_stats_common *common = &ppdu_stats->common; 1384 int ret; 1385 u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0; 1386 u32 succ_bytes = 0; 1387 u16 rate = 0, succ_pkts = 0; 1388 u32 tx_duration = 0; 1389 u8 tid = HTT_PPDU_STATS_NON_QOS_TID; 1390 bool is_ampdu = false; 1391 1392 if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE))) 1393 return; 1394 1395 if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) 1396 is_ampdu = 1397 HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags); 1398 1399 if (usr_stats->tlv_flags & 1400 BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) { 1401 succ_bytes = usr_stats->ack_ba.success_bytes; 1402 succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M, 1403 usr_stats->ack_ba.info); 1404 tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM, 1405 usr_stats->ack_ba.info); 1406 } 1407 1408 if (common->fes_duration_us) 1409 tx_duration = common->fes_duration_us; 1410 1411 user_rate = &usr_stats->rate; 1412 flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags); 1413 bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2; 1414 nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1; 1415 mcs = HTT_USR_RATE_MCS(user_rate->rate_flags); 1416 sgi = HTT_USR_RATE_GI(user_rate->rate_flags); 1417 dcm = HTT_USR_RATE_DCM(user_rate->rate_flags); 1418 1419 /* Note: If host configured fixed rates and in some other special 1420 * cases, the broadcast/management frames are sent in different rates. 1421 * Firmware rate's control to be skipped for this? 1422 */ 1423 1424 if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) { 1425 ath11k_warn(ab, "Invalid HE mcs %d peer stats", mcs); 1426 return; 1427 } 1428 1429 if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) { 1430 ath11k_warn(ab, "Invalid VHT mcs %d peer stats", mcs); 1431 return; 1432 } 1433 1434 if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) { 1435 ath11k_warn(ab, "Invalid HT mcs %d nss %d peer stats", 1436 mcs, nss); 1437 return; 1438 } 1439 1440 if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) { 1441 ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs, 1442 flags, 1443 &rate_idx, 1444 &rate); 1445 if (ret < 0) 1446 return; 1447 } 1448 1449 rcu_read_lock(); 1450 spin_lock_bh(&ab->base_lock); 1451 peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id); 1452 1453 if (!peer || !peer->sta) { 1454 spin_unlock_bh(&ab->base_lock); 1455 rcu_read_unlock(); 1456 return; 1457 } 1458 1459 sta = peer->sta; 1460 arsta = ath11k_sta_to_arsta(sta); 1461 1462 memset(&arsta->txrate, 0, sizeof(arsta->txrate)); 1463 1464 switch (flags) { 1465 case WMI_RATE_PREAMBLE_OFDM: 1466 arsta->txrate.legacy = rate; 1467 break; 1468 case WMI_RATE_PREAMBLE_CCK: 1469 arsta->txrate.legacy = rate; 1470 break; 1471 case WMI_RATE_PREAMBLE_HT: 1472 arsta->txrate.mcs = mcs + 8 * (nss - 1); 1473 arsta->txrate.flags = RATE_INFO_FLAGS_MCS; 1474 if (sgi) 1475 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1476 break; 1477 case WMI_RATE_PREAMBLE_VHT: 1478 arsta->txrate.mcs = mcs; 1479 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; 1480 if (sgi) 1481 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1482 break; 1483 case WMI_RATE_PREAMBLE_HE: 1484 arsta->txrate.mcs = mcs; 1485 arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS; 1486 arsta->txrate.he_dcm = dcm; 1487 arsta->txrate.he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi); 1488 arsta->txrate.he_ru_alloc = ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc 1489 ((user_rate->ru_end - 1490 user_rate->ru_start) + 1); 1491 break; 1492 } 1493 1494 arsta->txrate.nss = nss; 1495 1496 arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw); 1497 arsta->tx_duration += tx_duration; 1498 memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info)); 1499 1500 /* PPDU stats reported for mgmt packet doesn't have valid tx bytes. 1501 * So skip peer stats update for mgmt packets. 1502 */ 1503 if (tid < HTT_PPDU_STATS_NON_QOS_TID) { 1504 memset(peer_stats, 0, sizeof(*peer_stats)); 1505 peer_stats->succ_pkts = succ_pkts; 1506 peer_stats->succ_bytes = succ_bytes; 1507 peer_stats->is_ampdu = is_ampdu; 1508 peer_stats->duration = tx_duration; 1509 peer_stats->ba_fails = 1510 HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) + 1511 HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags); 1512 1513 if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) 1514 ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx); 1515 } 1516 1517 spin_unlock_bh(&ab->base_lock); 1518 rcu_read_unlock(); 1519 } 1520 1521 static void ath11k_htt_update_ppdu_stats(struct ath11k *ar, 1522 struct htt_ppdu_stats *ppdu_stats) 1523 { 1524 u8 user; 1525 1526 for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++) 1527 ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user); 1528 } 1529 1530 static 1531 struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar, 1532 u32 ppdu_id) 1533 { 1534 struct htt_ppdu_stats_info *ppdu_info; 1535 1536 lockdep_assert_held(&ar->data_lock); 1537 1538 if (!list_empty(&ar->ppdu_stats_info)) { 1539 list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) { 1540 if (ppdu_info->ppdu_id == ppdu_id) 1541 return ppdu_info; 1542 } 1543 1544 if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) { 1545 ppdu_info = list_first_entry(&ar->ppdu_stats_info, 1546 typeof(*ppdu_info), list); 1547 list_del(&ppdu_info->list); 1548 ar->ppdu_stat_list_depth--; 1549 ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats); 1550 kfree(ppdu_info); 1551 } 1552 } 1553 1554 ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC); 1555 if (!ppdu_info) 1556 return NULL; 1557 1558 list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info); 1559 ar->ppdu_stat_list_depth++; 1560 1561 return ppdu_info; 1562 } 1563 1564 static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab, 1565 struct sk_buff *skb) 1566 { 1567 struct ath11k_htt_ppdu_stats_msg *msg; 1568 struct htt_ppdu_stats_info *ppdu_info; 1569 struct ath11k *ar; 1570 int ret; 1571 u8 pdev_id; 1572 u32 ppdu_id, len; 1573 1574 msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data; 1575 len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info); 1576 pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info); 1577 ppdu_id = msg->ppdu_id; 1578 1579 rcu_read_lock(); 1580 ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); 1581 if (!ar) { 1582 ret = -EINVAL; 1583 goto out; 1584 } 1585 1586 if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) 1587 trace_ath11k_htt_ppdu_stats(ar, skb->data, len); 1588 1589 spin_lock_bh(&ar->data_lock); 1590 ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id); 1591 if (!ppdu_info) { 1592 ret = -EINVAL; 1593 goto out_unlock_data; 1594 } 1595 1596 ppdu_info->ppdu_id = ppdu_id; 1597 ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len, 1598 ath11k_htt_tlv_ppdu_stats_parse, 1599 (void *)ppdu_info); 1600 if (ret) { 1601 ath11k_warn(ab, "Failed to parse tlv %d\n", ret); 1602 goto out_unlock_data; 1603 } 1604 1605 out_unlock_data: 1606 spin_unlock_bh(&ar->data_lock); 1607 1608 out: 1609 rcu_read_unlock(); 1610 1611 return ret; 1612 } 1613 1614 static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb) 1615 { 1616 struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data; 1617 struct ath_pktlog_hdr *hdr = (struct ath_pktlog_hdr *)data; 1618 struct ath11k *ar; 1619 u8 pdev_id; 1620 1621 pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr); 1622 1623 rcu_read_lock(); 1624 1625 ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); 1626 if (!ar) { 1627 ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id); 1628 goto out; 1629 } 1630 1631 trace_ath11k_htt_pktlog(ar, data->payload, hdr->size, 1632 ar->ab->pktlog_defs_checksum); 1633 1634 out: 1635 rcu_read_unlock(); 1636 } 1637 1638 static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab, 1639 struct sk_buff *skb) 1640 { 1641 u32 *data = (u32 *)skb->data; 1642 u8 pdev_id, ring_type, ring_id, pdev_idx; 1643 u16 hp, tp; 1644 u32 backpressure_time; 1645 struct ath11k_bp_stats *bp_stats; 1646 1647 pdev_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M, *data); 1648 ring_type = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M, *data); 1649 ring_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_ID_M, *data); 1650 ++data; 1651 1652 hp = FIELD_GET(HTT_BACKPRESSURE_EVENT_HP_M, *data); 1653 tp = FIELD_GET(HTT_BACKPRESSURE_EVENT_TP_M, *data); 1654 ++data; 1655 1656 backpressure_time = *data; 1657 1658 ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n", 1659 pdev_id, ring_type, ring_id, hp, tp, backpressure_time); 1660 1661 if (ring_type == HTT_BACKPRESSURE_UMAC_RING_TYPE) { 1662 if (ring_id >= HTT_SW_UMAC_RING_IDX_MAX) 1663 return; 1664 1665 bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[ring_id]; 1666 } else if (ring_type == HTT_BACKPRESSURE_LMAC_RING_TYPE) { 1667 pdev_idx = DP_HW2SW_MACID(pdev_id); 1668 1669 if (ring_id >= HTT_SW_LMAC_RING_IDX_MAX || pdev_idx >= MAX_RADIOS) 1670 return; 1671 1672 bp_stats = &ab->soc_stats.bp_stats.lmac_ring_bp_stats[ring_id][pdev_idx]; 1673 } else { 1674 ath11k_warn(ab, "unknown ring type received in htt bp event %d\n", 1675 ring_type); 1676 return; 1677 } 1678 1679 spin_lock_bh(&ab->base_lock); 1680 bp_stats->hp = hp; 1681 bp_stats->tp = tp; 1682 bp_stats->count++; 1683 bp_stats->jiffies = jiffies; 1684 spin_unlock_bh(&ab->base_lock); 1685 } 1686 1687 void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab, 1688 struct sk_buff *skb) 1689 { 1690 struct ath11k_dp *dp = &ab->dp; 1691 struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data; 1692 enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp); 1693 u16 peer_id; 1694 u8 vdev_id; 1695 u8 mac_addr[ETH_ALEN]; 1696 u16 peer_mac_h16; 1697 u16 ast_hash; 1698 u16 hw_peer_id; 1699 1700 ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type); 1701 1702 switch (type) { 1703 case HTT_T2H_MSG_TYPE_VERSION_CONF: 1704 dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR, 1705 resp->version_msg.version); 1706 dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR, 1707 resp->version_msg.version); 1708 complete(&dp->htt_tgt_version_received); 1709 break; 1710 case HTT_T2H_MSG_TYPE_PEER_MAP: 1711 vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID, 1712 resp->peer_map_ev.info); 1713 peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID, 1714 resp->peer_map_ev.info); 1715 peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16, 1716 resp->peer_map_ev.info1); 1717 ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32, 1718 peer_mac_h16, mac_addr); 1719 ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0); 1720 break; 1721 case HTT_T2H_MSG_TYPE_PEER_MAP2: 1722 vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID, 1723 resp->peer_map_ev.info); 1724 peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID, 1725 resp->peer_map_ev.info); 1726 peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16, 1727 resp->peer_map_ev.info1); 1728 ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32, 1729 peer_mac_h16, mac_addr); 1730 ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL, 1731 resp->peer_map_ev.info2); 1732 hw_peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID, 1733 resp->peer_map_ev.info1); 1734 ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash, 1735 hw_peer_id); 1736 break; 1737 case HTT_T2H_MSG_TYPE_PEER_UNMAP: 1738 case HTT_T2H_MSG_TYPE_PEER_UNMAP2: 1739 peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID, 1740 resp->peer_unmap_ev.info); 1741 ath11k_peer_unmap_event(ab, peer_id); 1742 break; 1743 case HTT_T2H_MSG_TYPE_PPDU_STATS_IND: 1744 ath11k_htt_pull_ppdu_stats(ab, skb); 1745 break; 1746 case HTT_T2H_MSG_TYPE_EXT_STATS_CONF: 1747 ath11k_debugfs_htt_ext_stats_handler(ab, skb); 1748 break; 1749 case HTT_T2H_MSG_TYPE_PKTLOG: 1750 ath11k_htt_pktlog(ab, skb); 1751 break; 1752 case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND: 1753 ath11k_htt_backpressure_event_handler(ab, skb); 1754 break; 1755 default: 1756 ath11k_warn(ab, "htt event %d not handled\n", type); 1757 break; 1758 } 1759 1760 dev_kfree_skb_any(skb); 1761 } 1762 1763 static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar, 1764 struct sk_buff_head *msdu_list, 1765 struct sk_buff *first, struct sk_buff *last, 1766 u8 l3pad_bytes, int msdu_len) 1767 { 1768 struct ath11k_base *ab = ar->ab; 1769 struct sk_buff *skb; 1770 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first); 1771 int buf_first_hdr_len, buf_first_len; 1772 struct hal_rx_desc *ldesc; 1773 int space_extra, rem_len, buf_len; 1774 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 1775 1776 /* As the msdu is spread across multiple rx buffers, 1777 * find the offset to the start of msdu for computing 1778 * the length of the msdu in the first buffer. 1779 */ 1780 buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes; 1781 buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len; 1782 1783 if (WARN_ON_ONCE(msdu_len <= buf_first_len)) { 1784 skb_put(first, buf_first_hdr_len + msdu_len); 1785 skb_pull(first, buf_first_hdr_len); 1786 return 0; 1787 } 1788 1789 ldesc = (struct hal_rx_desc *)last->data; 1790 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ab, ldesc); 1791 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ab, ldesc); 1792 1793 /* MSDU spans over multiple buffers because the length of the MSDU 1794 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data 1795 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. 1796 */ 1797 skb_put(first, DP_RX_BUFFER_SIZE); 1798 skb_pull(first, buf_first_hdr_len); 1799 1800 /* When an MSDU spread over multiple buffers attention, MSDU_END and 1801 * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs. 1802 */ 1803 ath11k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc); 1804 1805 space_extra = msdu_len - (buf_first_len + skb_tailroom(first)); 1806 if (space_extra > 0 && 1807 (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) { 1808 /* Free up all buffers of the MSDU */ 1809 while ((skb = __skb_dequeue(msdu_list)) != NULL) { 1810 rxcb = ATH11K_SKB_RXCB(skb); 1811 if (!rxcb->is_continuation) { 1812 dev_kfree_skb_any(skb); 1813 break; 1814 } 1815 dev_kfree_skb_any(skb); 1816 } 1817 return -ENOMEM; 1818 } 1819 1820 rem_len = msdu_len - buf_first_len; 1821 while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) { 1822 rxcb = ATH11K_SKB_RXCB(skb); 1823 if (rxcb->is_continuation) 1824 buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz; 1825 else 1826 buf_len = rem_len; 1827 1828 if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) { 1829 WARN_ON_ONCE(1); 1830 dev_kfree_skb_any(skb); 1831 return -EINVAL; 1832 } 1833 1834 skb_put(skb, buf_len + hal_rx_desc_sz); 1835 skb_pull(skb, hal_rx_desc_sz); 1836 skb_copy_from_linear_data(skb, skb_put(first, buf_len), 1837 buf_len); 1838 dev_kfree_skb_any(skb); 1839 1840 rem_len -= buf_len; 1841 if (!rxcb->is_continuation) 1842 break; 1843 } 1844 1845 return 0; 1846 } 1847 1848 static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list, 1849 struct sk_buff *first) 1850 { 1851 struct sk_buff *skb; 1852 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first); 1853 1854 if (!rxcb->is_continuation) 1855 return first; 1856 1857 skb_queue_walk(msdu_list, skb) { 1858 rxcb = ATH11K_SKB_RXCB(skb); 1859 if (!rxcb->is_continuation) 1860 return skb; 1861 } 1862 1863 return NULL; 1864 } 1865 1866 static void ath11k_dp_rx_h_csum_offload(struct ath11k *ar, struct sk_buff *msdu) 1867 { 1868 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1869 struct rx_attention *rx_attention; 1870 bool ip_csum_fail, l4_csum_fail; 1871 1872 rx_attention = ath11k_dp_rx_get_attention(ar->ab, rxcb->rx_desc); 1873 ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rx_attention); 1874 l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rx_attention); 1875 1876 msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ? 1877 CHECKSUM_NONE : CHECKSUM_UNNECESSARY; 1878 } 1879 1880 static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, 1881 enum hal_encrypt_type enctype) 1882 { 1883 switch (enctype) { 1884 case HAL_ENCRYPT_TYPE_OPEN: 1885 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1886 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1887 return 0; 1888 case HAL_ENCRYPT_TYPE_CCMP_128: 1889 return IEEE80211_CCMP_MIC_LEN; 1890 case HAL_ENCRYPT_TYPE_CCMP_256: 1891 return IEEE80211_CCMP_256_MIC_LEN; 1892 case HAL_ENCRYPT_TYPE_GCMP_128: 1893 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1894 return IEEE80211_GCMP_MIC_LEN; 1895 case HAL_ENCRYPT_TYPE_WEP_40: 1896 case HAL_ENCRYPT_TYPE_WEP_104: 1897 case HAL_ENCRYPT_TYPE_WEP_128: 1898 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1899 case HAL_ENCRYPT_TYPE_WAPI: 1900 break; 1901 } 1902 1903 ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype); 1904 return 0; 1905 } 1906 1907 static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar, 1908 enum hal_encrypt_type enctype) 1909 { 1910 switch (enctype) { 1911 case HAL_ENCRYPT_TYPE_OPEN: 1912 return 0; 1913 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1914 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1915 return IEEE80211_TKIP_IV_LEN; 1916 case HAL_ENCRYPT_TYPE_CCMP_128: 1917 return IEEE80211_CCMP_HDR_LEN; 1918 case HAL_ENCRYPT_TYPE_CCMP_256: 1919 return IEEE80211_CCMP_256_HDR_LEN; 1920 case HAL_ENCRYPT_TYPE_GCMP_128: 1921 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1922 return IEEE80211_GCMP_HDR_LEN; 1923 case HAL_ENCRYPT_TYPE_WEP_40: 1924 case HAL_ENCRYPT_TYPE_WEP_104: 1925 case HAL_ENCRYPT_TYPE_WEP_128: 1926 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1927 case HAL_ENCRYPT_TYPE_WAPI: 1928 break; 1929 } 1930 1931 ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 1932 return 0; 1933 } 1934 1935 static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar, 1936 enum hal_encrypt_type enctype) 1937 { 1938 switch (enctype) { 1939 case HAL_ENCRYPT_TYPE_OPEN: 1940 case HAL_ENCRYPT_TYPE_CCMP_128: 1941 case HAL_ENCRYPT_TYPE_CCMP_256: 1942 case HAL_ENCRYPT_TYPE_GCMP_128: 1943 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1944 return 0; 1945 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1946 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1947 return IEEE80211_TKIP_ICV_LEN; 1948 case HAL_ENCRYPT_TYPE_WEP_40: 1949 case HAL_ENCRYPT_TYPE_WEP_104: 1950 case HAL_ENCRYPT_TYPE_WEP_128: 1951 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1952 case HAL_ENCRYPT_TYPE_WAPI: 1953 break; 1954 } 1955 1956 ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 1957 return 0; 1958 } 1959 1960 static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar, 1961 struct sk_buff *msdu, 1962 u8 *first_hdr, 1963 enum hal_encrypt_type enctype, 1964 struct ieee80211_rx_status *status) 1965 { 1966 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1967 u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN]; 1968 struct ieee80211_hdr *hdr; 1969 size_t hdr_len; 1970 u8 da[ETH_ALEN]; 1971 u8 sa[ETH_ALEN]; 1972 u16 qos_ctl = 0; 1973 u8 *qos; 1974 1975 /* copy SA & DA and pull decapped header */ 1976 hdr = (struct ieee80211_hdr *)msdu->data; 1977 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1978 ether_addr_copy(da, ieee80211_get_DA(hdr)); 1979 ether_addr_copy(sa, ieee80211_get_SA(hdr)); 1980 skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control)); 1981 1982 if (rxcb->is_first_msdu) { 1983 /* original 802.11 header is valid for the first msdu 1984 * hence we can reuse the same header 1985 */ 1986 hdr = (struct ieee80211_hdr *)first_hdr; 1987 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1988 1989 /* Each A-MSDU subframe will be reported as a separate MSDU, 1990 * so strip the A-MSDU bit from QoS Ctl. 1991 */ 1992 if (ieee80211_is_data_qos(hdr->frame_control)) { 1993 qos = ieee80211_get_qos_ctl(hdr); 1994 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 1995 } 1996 } else { 1997 /* Rebuild qos header if this is a middle/last msdu */ 1998 hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 1999 2000 /* Reset the order bit as the HT_Control header is stripped */ 2001 hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER)); 2002 2003 qos_ctl = rxcb->tid; 2004 2005 if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(ar->ab, rxcb->rx_desc)) 2006 qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT; 2007 2008 /* TODO Add other QoS ctl fields when required */ 2009 2010 /* copy decap header before overwriting for reuse below */ 2011 memcpy(decap_hdr, (uint8_t *)hdr, hdr_len); 2012 } 2013 2014 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 2015 memcpy(skb_push(msdu, 2016 ath11k_dp_rx_crypto_param_len(ar, enctype)), 2017 (void *)hdr + hdr_len, 2018 ath11k_dp_rx_crypto_param_len(ar, enctype)); 2019 } 2020 2021 if (!rxcb->is_first_msdu) { 2022 memcpy(skb_push(msdu, 2023 IEEE80211_QOS_CTL_LEN), &qos_ctl, 2024 IEEE80211_QOS_CTL_LEN); 2025 memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len); 2026 return; 2027 } 2028 2029 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 2030 2031 /* original 802.11 header has a different DA and in 2032 * case of 4addr it may also have different SA 2033 */ 2034 hdr = (struct ieee80211_hdr *)msdu->data; 2035 ether_addr_copy(ieee80211_get_DA(hdr), da); 2036 ether_addr_copy(ieee80211_get_SA(hdr), sa); 2037 } 2038 2039 static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu, 2040 enum hal_encrypt_type enctype, 2041 struct ieee80211_rx_status *status, 2042 bool decrypted) 2043 { 2044 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 2045 struct ieee80211_hdr *hdr; 2046 size_t hdr_len; 2047 size_t crypto_len; 2048 2049 if (!rxcb->is_first_msdu || 2050 !(rxcb->is_first_msdu && rxcb->is_last_msdu)) { 2051 WARN_ON_ONCE(1); 2052 return; 2053 } 2054 2055 skb_trim(msdu, msdu->len - FCS_LEN); 2056 2057 if (!decrypted) 2058 return; 2059 2060 hdr = (void *)msdu->data; 2061 2062 /* Tail */ 2063 if (status->flag & RX_FLAG_IV_STRIPPED) { 2064 skb_trim(msdu, msdu->len - 2065 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 2066 2067 skb_trim(msdu, msdu->len - 2068 ath11k_dp_rx_crypto_icv_len(ar, enctype)); 2069 } else { 2070 /* MIC */ 2071 if (status->flag & RX_FLAG_MIC_STRIPPED) 2072 skb_trim(msdu, msdu->len - 2073 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 2074 2075 /* ICV */ 2076 if (status->flag & RX_FLAG_ICV_STRIPPED) 2077 skb_trim(msdu, msdu->len - 2078 ath11k_dp_rx_crypto_icv_len(ar, enctype)); 2079 } 2080 2081 /* MMIC */ 2082 if ((status->flag & RX_FLAG_MMIC_STRIPPED) && 2083 !ieee80211_has_morefrags(hdr->frame_control) && 2084 enctype == HAL_ENCRYPT_TYPE_TKIP_MIC) 2085 skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN); 2086 2087 /* Head */ 2088 if (status->flag & RX_FLAG_IV_STRIPPED) { 2089 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2090 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 2091 2092 memmove((void *)msdu->data + crypto_len, 2093 (void *)msdu->data, hdr_len); 2094 skb_pull(msdu, crypto_len); 2095 } 2096 } 2097 2098 static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar, 2099 struct sk_buff *msdu, 2100 enum hal_encrypt_type enctype) 2101 { 2102 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 2103 struct ieee80211_hdr *hdr; 2104 size_t hdr_len, crypto_len; 2105 void *rfc1042; 2106 bool is_amsdu; 2107 2108 is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu); 2109 hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(ar->ab, rxcb->rx_desc); 2110 rfc1042 = hdr; 2111 2112 if (rxcb->is_first_msdu) { 2113 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2114 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 2115 2116 rfc1042 += hdr_len + crypto_len; 2117 } 2118 2119 if (is_amsdu) 2120 rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr); 2121 2122 return rfc1042; 2123 } 2124 2125 static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar, 2126 struct sk_buff *msdu, 2127 u8 *first_hdr, 2128 enum hal_encrypt_type enctype, 2129 struct ieee80211_rx_status *status) 2130 { 2131 struct ieee80211_hdr *hdr; 2132 struct ethhdr *eth; 2133 size_t hdr_len; 2134 u8 da[ETH_ALEN]; 2135 u8 sa[ETH_ALEN]; 2136 void *rfc1042; 2137 2138 rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype); 2139 if (WARN_ON_ONCE(!rfc1042)) 2140 return; 2141 2142 /* pull decapped header and copy SA & DA */ 2143 eth = (struct ethhdr *)msdu->data; 2144 ether_addr_copy(da, eth->h_dest); 2145 ether_addr_copy(sa, eth->h_source); 2146 skb_pull(msdu, sizeof(struct ethhdr)); 2147 2148 /* push rfc1042/llc/snap */ 2149 memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042, 2150 sizeof(struct ath11k_dp_rfc1042_hdr)); 2151 2152 /* push original 802.11 header */ 2153 hdr = (struct ieee80211_hdr *)first_hdr; 2154 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2155 2156 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 2157 memcpy(skb_push(msdu, 2158 ath11k_dp_rx_crypto_param_len(ar, enctype)), 2159 (void *)hdr + hdr_len, 2160 ath11k_dp_rx_crypto_param_len(ar, enctype)); 2161 } 2162 2163 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 2164 2165 /* original 802.11 header has a different DA and in 2166 * case of 4addr it may also have different SA 2167 */ 2168 hdr = (struct ieee80211_hdr *)msdu->data; 2169 ether_addr_copy(ieee80211_get_DA(hdr), da); 2170 ether_addr_copy(ieee80211_get_SA(hdr), sa); 2171 } 2172 2173 static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu, 2174 struct hal_rx_desc *rx_desc, 2175 enum hal_encrypt_type enctype, 2176 struct ieee80211_rx_status *status, 2177 bool decrypted) 2178 { 2179 u8 *first_hdr; 2180 u8 decap; 2181 struct ethhdr *ehdr; 2182 2183 first_hdr = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc); 2184 decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc); 2185 2186 switch (decap) { 2187 case DP_RX_DECAP_TYPE_NATIVE_WIFI: 2188 ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr, 2189 enctype, status); 2190 break; 2191 case DP_RX_DECAP_TYPE_RAW: 2192 ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status, 2193 decrypted); 2194 break; 2195 case DP_RX_DECAP_TYPE_ETHERNET2_DIX: 2196 ehdr = (struct ethhdr *)msdu->data; 2197 2198 /* mac80211 allows fast path only for authorized STA */ 2199 if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) { 2200 ATH11K_SKB_RXCB(msdu)->is_eapol = true; 2201 ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr, 2202 enctype, status); 2203 break; 2204 } 2205 2206 /* PN for mcast packets will be validated in mac80211; 2207 * remove eth header and add 802.11 header. 2208 */ 2209 if (ATH11K_SKB_RXCB(msdu)->is_mcbc && decrypted) 2210 ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr, 2211 enctype, status); 2212 break; 2213 case DP_RX_DECAP_TYPE_8023: 2214 /* TODO: Handle undecap for these formats */ 2215 break; 2216 } 2217 } 2218 2219 static struct ath11k_peer * 2220 ath11k_dp_rx_h_find_peer(struct ath11k_base *ab, struct sk_buff *msdu) 2221 { 2222 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 2223 struct hal_rx_desc *rx_desc = rxcb->rx_desc; 2224 struct ath11k_peer *peer = NULL; 2225 2226 lockdep_assert_held(&ab->base_lock); 2227 2228 if (rxcb->peer_id) 2229 peer = ath11k_peer_find_by_id(ab, rxcb->peer_id); 2230 2231 if (peer) 2232 return peer; 2233 2234 if (!rx_desc || !(ath11k_dp_rxdesc_mac_addr2_valid(ab, rx_desc))) 2235 return NULL; 2236 2237 peer = ath11k_peer_find_by_addr(ab, 2238 ath11k_dp_rxdesc_mpdu_start_addr2(ab, rx_desc)); 2239 return peer; 2240 } 2241 2242 static void ath11k_dp_rx_h_mpdu(struct ath11k *ar, 2243 struct sk_buff *msdu, 2244 struct hal_rx_desc *rx_desc, 2245 struct ieee80211_rx_status *rx_status) 2246 { 2247 bool fill_crypto_hdr; 2248 enum hal_encrypt_type enctype; 2249 bool is_decrypted = false; 2250 struct ath11k_skb_rxcb *rxcb; 2251 struct ieee80211_hdr *hdr; 2252 struct ath11k_peer *peer; 2253 struct rx_attention *rx_attention; 2254 u32 err_bitmap; 2255 2256 /* PN for multicast packets will be checked in mac80211 */ 2257 rxcb = ATH11K_SKB_RXCB(msdu); 2258 fill_crypto_hdr = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc); 2259 rxcb->is_mcbc = fill_crypto_hdr; 2260 2261 if (rxcb->is_mcbc) { 2262 rxcb->peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc); 2263 rxcb->seq_no = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc); 2264 } 2265 2266 spin_lock_bh(&ar->ab->base_lock); 2267 peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu); 2268 if (peer) { 2269 if (rxcb->is_mcbc) 2270 enctype = peer->sec_type_grp; 2271 else 2272 enctype = peer->sec_type; 2273 } else { 2274 enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc); 2275 } 2276 spin_unlock_bh(&ar->ab->base_lock); 2277 2278 rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc); 2279 err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention); 2280 if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap) 2281 is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention); 2282 2283 /* Clear per-MPDU flags while leaving per-PPDU flags intact */ 2284 rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | 2285 RX_FLAG_MMIC_ERROR | 2286 RX_FLAG_DECRYPTED | 2287 RX_FLAG_IV_STRIPPED | 2288 RX_FLAG_MMIC_STRIPPED); 2289 2290 if (err_bitmap & DP_RX_MPDU_ERR_FCS) 2291 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 2292 if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC) 2293 rx_status->flag |= RX_FLAG_MMIC_ERROR; 2294 2295 if (is_decrypted) { 2296 rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED; 2297 2298 if (fill_crypto_hdr) 2299 rx_status->flag |= RX_FLAG_MIC_STRIPPED | 2300 RX_FLAG_ICV_STRIPPED; 2301 else 2302 rx_status->flag |= RX_FLAG_IV_STRIPPED | 2303 RX_FLAG_PN_VALIDATED; 2304 } 2305 2306 ath11k_dp_rx_h_csum_offload(ar, msdu); 2307 ath11k_dp_rx_h_undecap(ar, msdu, rx_desc, 2308 enctype, rx_status, is_decrypted); 2309 2310 if (!is_decrypted || fill_crypto_hdr) 2311 return; 2312 2313 if (ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc) != 2314 DP_RX_DECAP_TYPE_ETHERNET2_DIX) { 2315 hdr = (void *)msdu->data; 2316 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 2317 } 2318 } 2319 2320 static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc, 2321 struct ieee80211_rx_status *rx_status) 2322 { 2323 struct ieee80211_supported_band *sband; 2324 enum rx_msdu_start_pkt_type pkt_type; 2325 u8 bw; 2326 u8 rate_mcs, nss; 2327 u8 sgi; 2328 bool is_cck, is_ldpc; 2329 2330 pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(ar->ab, rx_desc); 2331 bw = ath11k_dp_rx_h_msdu_start_rx_bw(ar->ab, rx_desc); 2332 rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(ar->ab, rx_desc); 2333 nss = ath11k_dp_rx_h_msdu_start_nss(ar->ab, rx_desc); 2334 sgi = ath11k_dp_rx_h_msdu_start_sgi(ar->ab, rx_desc); 2335 2336 switch (pkt_type) { 2337 case RX_MSDU_START_PKT_TYPE_11A: 2338 case RX_MSDU_START_PKT_TYPE_11B: 2339 is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B); 2340 sband = &ar->mac.sbands[rx_status->band]; 2341 rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs, 2342 is_cck); 2343 break; 2344 case RX_MSDU_START_PKT_TYPE_11N: 2345 rx_status->encoding = RX_ENC_HT; 2346 if (rate_mcs > ATH11K_HT_MCS_MAX) { 2347 ath11k_warn(ar->ab, 2348 "Received with invalid mcs in HT mode %d\n", 2349 rate_mcs); 2350 break; 2351 } 2352 rx_status->rate_idx = rate_mcs + (8 * (nss - 1)); 2353 if (sgi) 2354 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 2355 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 2356 break; 2357 case RX_MSDU_START_PKT_TYPE_11AC: 2358 rx_status->encoding = RX_ENC_VHT; 2359 rx_status->rate_idx = rate_mcs; 2360 if (rate_mcs > ATH11K_VHT_MCS_MAX) { 2361 ath11k_warn(ar->ab, 2362 "Received with invalid mcs in VHT mode %d\n", 2363 rate_mcs); 2364 break; 2365 } 2366 rx_status->nss = nss; 2367 if (sgi) 2368 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 2369 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 2370 is_ldpc = ath11k_dp_rx_h_msdu_start_ldpc_support(ar->ab, rx_desc); 2371 if (is_ldpc) 2372 rx_status->enc_flags |= RX_ENC_FLAG_LDPC; 2373 break; 2374 case RX_MSDU_START_PKT_TYPE_11AX: 2375 rx_status->rate_idx = rate_mcs; 2376 if (rate_mcs > ATH11K_HE_MCS_MAX) { 2377 ath11k_warn(ar->ab, 2378 "Received with invalid mcs in HE mode %d\n", 2379 rate_mcs); 2380 break; 2381 } 2382 rx_status->encoding = RX_ENC_HE; 2383 rx_status->nss = nss; 2384 rx_status->he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi); 2385 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 2386 break; 2387 } 2388 } 2389 2390 static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc, 2391 struct ieee80211_rx_status *rx_status) 2392 { 2393 u8 channel_num; 2394 u32 center_freq, meta_data; 2395 struct ieee80211_channel *channel; 2396 2397 rx_status->freq = 0; 2398 rx_status->rate_idx = 0; 2399 rx_status->nss = 0; 2400 rx_status->encoding = RX_ENC_LEGACY; 2401 rx_status->bw = RATE_INFO_BW_20; 2402 2403 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 2404 2405 meta_data = ath11k_dp_rx_h_msdu_start_freq(ar->ab, rx_desc); 2406 channel_num = meta_data; 2407 center_freq = meta_data >> 16; 2408 2409 if (center_freq >= ATH11K_MIN_6G_FREQ && 2410 center_freq <= ATH11K_MAX_6G_FREQ) { 2411 rx_status->band = NL80211_BAND_6GHZ; 2412 rx_status->freq = center_freq; 2413 } else if (channel_num >= 1 && channel_num <= 14) { 2414 rx_status->band = NL80211_BAND_2GHZ; 2415 } else if (channel_num >= 36 && channel_num <= 177) { 2416 rx_status->band = NL80211_BAND_5GHZ; 2417 } else { 2418 spin_lock_bh(&ar->data_lock); 2419 channel = ar->rx_channel; 2420 if (channel) { 2421 rx_status->band = channel->band; 2422 channel_num = 2423 ieee80211_frequency_to_channel(channel->center_freq); 2424 } 2425 spin_unlock_bh(&ar->data_lock); 2426 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ", 2427 rx_desc, sizeof(struct hal_rx_desc)); 2428 } 2429 2430 if (rx_status->band != NL80211_BAND_6GHZ) 2431 rx_status->freq = ieee80211_channel_to_frequency(channel_num, 2432 rx_status->band); 2433 2434 ath11k_dp_rx_h_rate(ar, rx_desc, rx_status); 2435 } 2436 2437 static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi, 2438 struct sk_buff *msdu, 2439 struct ieee80211_rx_status *status) 2440 { 2441 static const struct ieee80211_radiotap_he known = { 2442 .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | 2443 IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN), 2444 .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN), 2445 }; 2446 struct ieee80211_rx_status *rx_status; 2447 struct ieee80211_radiotap_he *he = NULL; 2448 struct ieee80211_sta *pubsta = NULL; 2449 struct ath11k_peer *peer; 2450 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 2451 u8 decap = DP_RX_DECAP_TYPE_RAW; 2452 bool is_mcbc = rxcb->is_mcbc; 2453 bool is_eapol = rxcb->is_eapol; 2454 2455 if (status->encoding == RX_ENC_HE && 2456 !(status->flag & RX_FLAG_RADIOTAP_HE) && 2457 !(status->flag & RX_FLAG_SKIP_MONITOR)) { 2458 he = skb_push(msdu, sizeof(known)); 2459 memcpy(he, &known, sizeof(known)); 2460 status->flag |= RX_FLAG_RADIOTAP_HE; 2461 } 2462 2463 if (!(status->flag & RX_FLAG_ONLY_MONITOR)) 2464 decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rxcb->rx_desc); 2465 2466 spin_lock_bh(&ar->ab->base_lock); 2467 peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu); 2468 if (peer && peer->sta) 2469 pubsta = peer->sta; 2470 spin_unlock_bh(&ar->ab->base_lock); 2471 2472 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 2473 "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", 2474 msdu, 2475 msdu->len, 2476 peer ? peer->addr : NULL, 2477 rxcb->tid, 2478 is_mcbc ? "mcast" : "ucast", 2479 rxcb->seq_no, 2480 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", 2481 (status->encoding == RX_ENC_HT) ? "ht" : "", 2482 (status->encoding == RX_ENC_VHT) ? "vht" : "", 2483 (status->encoding == RX_ENC_HE) ? "he" : "", 2484 (status->bw == RATE_INFO_BW_40) ? "40" : "", 2485 (status->bw == RATE_INFO_BW_80) ? "80" : "", 2486 (status->bw == RATE_INFO_BW_160) ? "160" : "", 2487 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", 2488 status->rate_idx, 2489 status->nss, 2490 status->freq, 2491 status->band, status->flag, 2492 !!(status->flag & RX_FLAG_FAILED_FCS_CRC), 2493 !!(status->flag & RX_FLAG_MMIC_ERROR), 2494 !!(status->flag & RX_FLAG_AMSDU_MORE)); 2495 2496 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DP_RX, NULL, "dp rx msdu: ", 2497 msdu->data, msdu->len); 2498 2499 rx_status = IEEE80211_SKB_RXCB(msdu); 2500 *rx_status = *status; 2501 2502 /* TODO: trace rx packet */ 2503 2504 /* PN for multicast packets are not validate in HW, 2505 * so skip 802.3 rx path 2506 * Also, fast_rx expects the STA to be authorized, hence 2507 * eapol packets are sent in slow path. 2508 */ 2509 if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol && 2510 !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED)) 2511 rx_status->flag |= RX_FLAG_8023; 2512 2513 ieee80211_rx_napi(ar->hw, pubsta, msdu, napi); 2514 } 2515 2516 static int ath11k_dp_rx_process_msdu(struct ath11k *ar, 2517 struct sk_buff *msdu, 2518 struct sk_buff_head *msdu_list, 2519 struct ieee80211_rx_status *rx_status) 2520 { 2521 struct ath11k_base *ab = ar->ab; 2522 struct hal_rx_desc *rx_desc, *lrx_desc; 2523 struct rx_attention *rx_attention; 2524 struct ath11k_skb_rxcb *rxcb; 2525 struct sk_buff *last_buf; 2526 u8 l3_pad_bytes; 2527 u8 *hdr_status; 2528 u16 msdu_len; 2529 int ret; 2530 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 2531 2532 last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu); 2533 if (!last_buf) { 2534 ath11k_warn(ab, 2535 "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n"); 2536 ret = -EIO; 2537 goto free_out; 2538 } 2539 2540 rx_desc = (struct hal_rx_desc *)msdu->data; 2541 if (ath11k_dp_rx_h_attn_msdu_len_err(ab, rx_desc)) { 2542 ath11k_warn(ar->ab, "msdu len not valid\n"); 2543 ret = -EIO; 2544 goto free_out; 2545 } 2546 2547 lrx_desc = (struct hal_rx_desc *)last_buf->data; 2548 rx_attention = ath11k_dp_rx_get_attention(ab, lrx_desc); 2549 if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) { 2550 ath11k_warn(ab, "msdu_done bit in attention is not set\n"); 2551 ret = -EIO; 2552 goto free_out; 2553 } 2554 2555 rxcb = ATH11K_SKB_RXCB(msdu); 2556 rxcb->rx_desc = rx_desc; 2557 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ab, rx_desc); 2558 l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ab, lrx_desc); 2559 2560 if (rxcb->is_frag) { 2561 skb_pull(msdu, hal_rx_desc_sz); 2562 } else if (!rxcb->is_continuation) { 2563 if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) { 2564 hdr_status = ath11k_dp_rx_h_80211_hdr(ab, rx_desc); 2565 ret = -EINVAL; 2566 ath11k_warn(ab, "invalid msdu len %u\n", msdu_len); 2567 ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", hdr_status, 2568 sizeof(struct ieee80211_hdr)); 2569 ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", rx_desc, 2570 sizeof(struct hal_rx_desc)); 2571 goto free_out; 2572 } 2573 skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len); 2574 skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes); 2575 } else { 2576 ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list, 2577 msdu, last_buf, 2578 l3_pad_bytes, msdu_len); 2579 if (ret) { 2580 ath11k_warn(ab, 2581 "failed to coalesce msdu rx buffer%d\n", ret); 2582 goto free_out; 2583 } 2584 } 2585 2586 ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status); 2587 ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status); 2588 2589 rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED; 2590 2591 return 0; 2592 2593 free_out: 2594 return ret; 2595 } 2596 2597 static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab, 2598 struct napi_struct *napi, 2599 struct sk_buff_head *msdu_list, 2600 int mac_id) 2601 { 2602 struct sk_buff *msdu; 2603 struct ath11k *ar; 2604 struct ieee80211_rx_status rx_status = {0}; 2605 int ret; 2606 2607 if (skb_queue_empty(msdu_list)) 2608 return; 2609 2610 if (unlikely(!rcu_access_pointer(ab->pdevs_active[mac_id]))) { 2611 __skb_queue_purge(msdu_list); 2612 return; 2613 } 2614 2615 ar = ab->pdevs[mac_id].ar; 2616 if (unlikely(test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags))) { 2617 __skb_queue_purge(msdu_list); 2618 return; 2619 } 2620 2621 while ((msdu = __skb_dequeue(msdu_list))) { 2622 ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status); 2623 if (unlikely(ret)) { 2624 ath11k_dbg(ab, ATH11K_DBG_DATA, 2625 "Unable to process msdu %d", ret); 2626 dev_kfree_skb_any(msdu); 2627 continue; 2628 } 2629 2630 ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status); 2631 } 2632 } 2633 2634 int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id, 2635 struct napi_struct *napi, int budget) 2636 { 2637 struct ath11k_dp *dp = &ab->dp; 2638 struct dp_rxdma_ring *rx_ring; 2639 int num_buffs_reaped[MAX_RADIOS] = {0}; 2640 struct sk_buff_head msdu_list[MAX_RADIOS]; 2641 struct ath11k_skb_rxcb *rxcb; 2642 int total_msdu_reaped = 0; 2643 struct hal_srng *srng; 2644 struct sk_buff *msdu; 2645 bool done = false; 2646 int buf_id, mac_id; 2647 struct ath11k *ar; 2648 struct hal_reo_dest_ring *desc; 2649 enum hal_reo_dest_ring_push_reason push_reason; 2650 u32 cookie; 2651 int i; 2652 2653 for (i = 0; i < MAX_RADIOS; i++) 2654 __skb_queue_head_init(&msdu_list[i]); 2655 2656 srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id]; 2657 2658 spin_lock_bh(&srng->lock); 2659 2660 try_again: 2661 ath11k_hal_srng_access_begin(ab, srng); 2662 2663 while (likely(desc = 2664 (struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab, 2665 srng))) { 2666 cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, 2667 desc->buf_addr_info.info1); 2668 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 2669 cookie); 2670 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie); 2671 2672 if (unlikely(buf_id == 0)) 2673 continue; 2674 2675 ar = ab->pdevs[mac_id].ar; 2676 rx_ring = &ar->dp.rx_refill_buf_ring; 2677 spin_lock_bh(&rx_ring->idr_lock); 2678 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 2679 if (unlikely(!msdu)) { 2680 ath11k_warn(ab, "frame rx with invalid buf_id %d\n", 2681 buf_id); 2682 spin_unlock_bh(&rx_ring->idr_lock); 2683 continue; 2684 } 2685 2686 idr_remove(&rx_ring->bufs_idr, buf_id); 2687 spin_unlock_bh(&rx_ring->idr_lock); 2688 2689 rxcb = ATH11K_SKB_RXCB(msdu); 2690 dma_unmap_single(ab->dev, rxcb->paddr, 2691 msdu->len + skb_tailroom(msdu), 2692 DMA_FROM_DEVICE); 2693 2694 num_buffs_reaped[mac_id]++; 2695 2696 push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON, 2697 desc->info0); 2698 if (unlikely(push_reason != 2699 HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) { 2700 dev_kfree_skb_any(msdu); 2701 ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++; 2702 continue; 2703 } 2704 2705 rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 & 2706 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU); 2707 rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 & 2708 RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU); 2709 rxcb->is_continuation = !!(desc->rx_msdu_info.info0 & 2710 RX_MSDU_DESC_INFO0_MSDU_CONTINUATION); 2711 rxcb->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID, 2712 desc->rx_mpdu_info.meta_data); 2713 rxcb->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM, 2714 desc->rx_mpdu_info.info0); 2715 rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM, 2716 desc->info0); 2717 2718 rxcb->mac_id = mac_id; 2719 __skb_queue_tail(&msdu_list[mac_id], msdu); 2720 2721 if (rxcb->is_continuation) { 2722 done = false; 2723 } else { 2724 total_msdu_reaped++; 2725 done = true; 2726 } 2727 2728 if (total_msdu_reaped >= budget) 2729 break; 2730 } 2731 2732 /* Hw might have updated the head pointer after we cached it. 2733 * In this case, even though there are entries in the ring we'll 2734 * get rx_desc NULL. Give the read another try with updated cached 2735 * head pointer so that we can reap complete MPDU in the current 2736 * rx processing. 2737 */ 2738 if (unlikely(!done && ath11k_hal_srng_dst_num_free(ab, srng, true))) { 2739 ath11k_hal_srng_access_end(ab, srng); 2740 goto try_again; 2741 } 2742 2743 ath11k_hal_srng_access_end(ab, srng); 2744 2745 spin_unlock_bh(&srng->lock); 2746 2747 if (unlikely(!total_msdu_reaped)) 2748 goto exit; 2749 2750 for (i = 0; i < ab->num_radios; i++) { 2751 if (!num_buffs_reaped[i]) 2752 continue; 2753 2754 ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list[i], i); 2755 2756 ar = ab->pdevs[i].ar; 2757 rx_ring = &ar->dp.rx_refill_buf_ring; 2758 2759 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i], 2760 ab->hw_params.hal_params->rx_buf_rbm); 2761 } 2762 exit: 2763 return total_msdu_reaped; 2764 } 2765 2766 static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta, 2767 struct hal_rx_mon_ppdu_info *ppdu_info) 2768 { 2769 struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats; 2770 u32 num_msdu; 2771 int i; 2772 2773 if (!rx_stats) 2774 return; 2775 2776 arsta->rssi_comb = ppdu_info->rssi_comb; 2777 ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb); 2778 2779 num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count + 2780 ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count; 2781 2782 rx_stats->num_msdu += num_msdu; 2783 rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count + 2784 ppdu_info->tcp_ack_msdu_count; 2785 rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count; 2786 rx_stats->other_msdu_count += ppdu_info->other_msdu_count; 2787 2788 if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A || 2789 ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) { 2790 ppdu_info->nss = 1; 2791 ppdu_info->mcs = HAL_RX_MAX_MCS; 2792 ppdu_info->tid = IEEE80211_NUM_TIDS; 2793 } 2794 2795 if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS) 2796 rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu; 2797 2798 if (ppdu_info->mcs <= HAL_RX_MAX_MCS) 2799 rx_stats->mcs_count[ppdu_info->mcs] += num_msdu; 2800 2801 if (ppdu_info->gi < HAL_RX_GI_MAX) 2802 rx_stats->gi_count[ppdu_info->gi] += num_msdu; 2803 2804 if (ppdu_info->bw < HAL_RX_BW_MAX) 2805 rx_stats->bw_count[ppdu_info->bw] += num_msdu; 2806 2807 if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX) 2808 rx_stats->coding_count[ppdu_info->ldpc] += num_msdu; 2809 2810 if (ppdu_info->tid <= IEEE80211_NUM_TIDS) 2811 rx_stats->tid_count[ppdu_info->tid] += num_msdu; 2812 2813 if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX) 2814 rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu; 2815 2816 if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX) 2817 rx_stats->reception_type[ppdu_info->reception_type] += num_msdu; 2818 2819 if (ppdu_info->is_stbc) 2820 rx_stats->stbc_count += num_msdu; 2821 2822 if (ppdu_info->beamformed) 2823 rx_stats->beamformed_count += num_msdu; 2824 2825 if (ppdu_info->num_mpdu_fcs_ok > 1) 2826 rx_stats->ampdu_msdu_count += num_msdu; 2827 else 2828 rx_stats->non_ampdu_msdu_count += num_msdu; 2829 2830 rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok; 2831 rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err; 2832 rx_stats->dcm_count += ppdu_info->dcm; 2833 rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu; 2834 2835 arsta->rssi_comb = ppdu_info->rssi_comb; 2836 2837 BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) > 2838 ARRAY_SIZE(ppdu_info->rssi_chain_pri20)); 2839 2840 for (i = 0; i < ARRAY_SIZE(arsta->chain_signal); i++) 2841 arsta->chain_signal[i] = ppdu_info->rssi_chain_pri20[i]; 2842 2843 rx_stats->rx_duration += ppdu_info->rx_duration; 2844 arsta->rx_duration = rx_stats->rx_duration; 2845 } 2846 2847 static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab, 2848 struct dp_rxdma_ring *rx_ring, 2849 int *buf_id) 2850 { 2851 struct sk_buff *skb; 2852 dma_addr_t paddr; 2853 2854 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + 2855 DP_RX_BUFFER_ALIGN_SIZE); 2856 2857 if (!skb) 2858 goto fail_alloc_skb; 2859 2860 if (!IS_ALIGNED((unsigned long)skb->data, 2861 DP_RX_BUFFER_ALIGN_SIZE)) { 2862 skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - 2863 skb->data); 2864 } 2865 2866 paddr = dma_map_single(ab->dev, skb->data, 2867 skb->len + skb_tailroom(skb), 2868 DMA_FROM_DEVICE); 2869 if (unlikely(dma_mapping_error(ab->dev, paddr))) 2870 goto fail_free_skb; 2871 2872 spin_lock_bh(&rx_ring->idr_lock); 2873 *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, 2874 rx_ring->bufs_max, GFP_ATOMIC); 2875 spin_unlock_bh(&rx_ring->idr_lock); 2876 if (*buf_id < 0) 2877 goto fail_dma_unmap; 2878 2879 ATH11K_SKB_RXCB(skb)->paddr = paddr; 2880 return skb; 2881 2882 fail_dma_unmap: 2883 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 2884 DMA_FROM_DEVICE); 2885 fail_free_skb: 2886 dev_kfree_skb_any(skb); 2887 fail_alloc_skb: 2888 return NULL; 2889 } 2890 2891 int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id, 2892 struct dp_rxdma_ring *rx_ring, 2893 int req_entries, 2894 enum hal_rx_buf_return_buf_manager mgr) 2895 { 2896 struct hal_srng *srng; 2897 u32 *desc; 2898 struct sk_buff *skb; 2899 int num_free; 2900 int num_remain; 2901 int buf_id; 2902 u32 cookie; 2903 dma_addr_t paddr; 2904 2905 req_entries = min(req_entries, rx_ring->bufs_max); 2906 2907 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 2908 2909 spin_lock_bh(&srng->lock); 2910 2911 ath11k_hal_srng_access_begin(ab, srng); 2912 2913 num_free = ath11k_hal_srng_src_num_free(ab, srng, true); 2914 2915 req_entries = min(num_free, req_entries); 2916 num_remain = req_entries; 2917 2918 while (num_remain > 0) { 2919 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, 2920 &buf_id); 2921 if (!skb) 2922 break; 2923 paddr = ATH11K_SKB_RXCB(skb)->paddr; 2924 2925 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 2926 if (!desc) 2927 goto fail_desc_get; 2928 2929 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 2930 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 2931 2932 num_remain--; 2933 2934 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); 2935 } 2936 2937 ath11k_hal_srng_access_end(ab, srng); 2938 2939 spin_unlock_bh(&srng->lock); 2940 2941 return req_entries - num_remain; 2942 2943 fail_desc_get: 2944 spin_lock_bh(&rx_ring->idr_lock); 2945 idr_remove(&rx_ring->bufs_idr, buf_id); 2946 spin_unlock_bh(&rx_ring->idr_lock); 2947 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 2948 DMA_FROM_DEVICE); 2949 dev_kfree_skb_any(skb); 2950 ath11k_hal_srng_access_end(ab, srng); 2951 spin_unlock_bh(&srng->lock); 2952 2953 return req_entries - num_remain; 2954 } 2955 2956 #define ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP 32535 2957 2958 static void 2959 ath11k_dp_rx_mon_update_status_buf_state(struct ath11k_mon_data *pmon, 2960 struct hal_tlv_hdr *tlv) 2961 { 2962 struct hal_rx_ppdu_start *ppdu_start; 2963 u16 ppdu_id_diff, ppdu_id, tlv_len; 2964 u8 *ptr; 2965 2966 /* PPDU id is part of second tlv, move ptr to second tlv */ 2967 tlv_len = FIELD_GET(HAL_TLV_HDR_LEN, tlv->tl); 2968 ptr = (u8 *)tlv; 2969 ptr += sizeof(*tlv) + tlv_len; 2970 tlv = (struct hal_tlv_hdr *)ptr; 2971 2972 if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_PPDU_START) 2973 return; 2974 2975 ptr += sizeof(*tlv); 2976 ppdu_start = (struct hal_rx_ppdu_start *)ptr; 2977 ppdu_id = FIELD_GET(HAL_RX_PPDU_START_INFO0_PPDU_ID, 2978 __le32_to_cpu(ppdu_start->info0)); 2979 2980 if (pmon->sw_mon_entries.ppdu_id < ppdu_id) { 2981 pmon->buf_state = DP_MON_STATUS_LEAD; 2982 ppdu_id_diff = ppdu_id - pmon->sw_mon_entries.ppdu_id; 2983 if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP) 2984 pmon->buf_state = DP_MON_STATUS_LAG; 2985 } else if (pmon->sw_mon_entries.ppdu_id > ppdu_id) { 2986 pmon->buf_state = DP_MON_STATUS_LAG; 2987 ppdu_id_diff = pmon->sw_mon_entries.ppdu_id - ppdu_id; 2988 if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP) 2989 pmon->buf_state = DP_MON_STATUS_LEAD; 2990 } 2991 } 2992 2993 static enum dp_mon_status_buf_state 2994 ath11k_dp_rx_mon_buf_done(struct ath11k_base *ab, struct hal_srng *srng, 2995 struct dp_rxdma_ring *rx_ring) 2996 { 2997 struct ath11k_skb_rxcb *rxcb; 2998 struct hal_tlv_hdr *tlv; 2999 struct sk_buff *skb; 3000 void *status_desc; 3001 dma_addr_t paddr; 3002 u32 cookie; 3003 int buf_id; 3004 u8 rbm; 3005 3006 status_desc = ath11k_hal_srng_src_next_peek(ab, srng); 3007 if (!status_desc) 3008 return DP_MON_STATUS_NO_DMA; 3009 3010 ath11k_hal_rx_buf_addr_info_get(status_desc, &paddr, &cookie, &rbm); 3011 3012 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie); 3013 3014 spin_lock_bh(&rx_ring->idr_lock); 3015 skb = idr_find(&rx_ring->bufs_idr, buf_id); 3016 spin_unlock_bh(&rx_ring->idr_lock); 3017 3018 if (!skb) 3019 return DP_MON_STATUS_NO_DMA; 3020 3021 rxcb = ATH11K_SKB_RXCB(skb); 3022 dma_sync_single_for_cpu(ab->dev, rxcb->paddr, 3023 skb->len + skb_tailroom(skb), 3024 DMA_FROM_DEVICE); 3025 3026 tlv = (struct hal_tlv_hdr *)skb->data; 3027 if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_STATUS_BUFFER_DONE) 3028 return DP_MON_STATUS_NO_DMA; 3029 3030 return DP_MON_STATUS_REPLINISH; 3031 } 3032 3033 static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id, 3034 int *budget, struct sk_buff_head *skb_list) 3035 { 3036 struct ath11k *ar; 3037 const struct ath11k_hw_hal_params *hal_params; 3038 enum dp_mon_status_buf_state reap_status; 3039 struct ath11k_pdev_dp *dp; 3040 struct dp_rxdma_ring *rx_ring; 3041 struct ath11k_mon_data *pmon; 3042 struct hal_srng *srng; 3043 void *rx_mon_status_desc; 3044 struct sk_buff *skb; 3045 struct ath11k_skb_rxcb *rxcb; 3046 struct hal_tlv_hdr *tlv; 3047 u32 cookie; 3048 int buf_id, srng_id; 3049 dma_addr_t paddr; 3050 u8 rbm; 3051 int num_buffs_reaped = 0; 3052 3053 ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar; 3054 dp = &ar->dp; 3055 pmon = &dp->mon_data; 3056 srng_id = ath11k_hw_mac_id_to_srng_id(&ab->hw_params, mac_id); 3057 rx_ring = &dp->rx_mon_status_refill_ring[srng_id]; 3058 3059 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 3060 3061 spin_lock_bh(&srng->lock); 3062 3063 ath11k_hal_srng_access_begin(ab, srng); 3064 while (*budget) { 3065 *budget -= 1; 3066 rx_mon_status_desc = 3067 ath11k_hal_srng_src_peek(ab, srng); 3068 if (!rx_mon_status_desc) { 3069 pmon->buf_state = DP_MON_STATUS_REPLINISH; 3070 break; 3071 } 3072 3073 ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr, 3074 &cookie, &rbm); 3075 if (paddr) { 3076 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie); 3077 3078 spin_lock_bh(&rx_ring->idr_lock); 3079 skb = idr_find(&rx_ring->bufs_idr, buf_id); 3080 spin_unlock_bh(&rx_ring->idr_lock); 3081 3082 if (!skb) { 3083 ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n", 3084 buf_id); 3085 pmon->buf_state = DP_MON_STATUS_REPLINISH; 3086 goto move_next; 3087 } 3088 3089 rxcb = ATH11K_SKB_RXCB(skb); 3090 3091 dma_sync_single_for_cpu(ab->dev, rxcb->paddr, 3092 skb->len + skb_tailroom(skb), 3093 DMA_FROM_DEVICE); 3094 3095 tlv = (struct hal_tlv_hdr *)skb->data; 3096 if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != 3097 HAL_RX_STATUS_BUFFER_DONE) { 3098 ath11k_warn(ab, "mon status DONE not set %lx, buf_id %d\n", 3099 FIELD_GET(HAL_TLV_HDR_TAG, 3100 tlv->tl), buf_id); 3101 /* RxDMA status done bit might not be set even 3102 * though tp is moved by HW. 3103 */ 3104 3105 /* If done status is missing: 3106 * 1. As per MAC team's suggestion, 3107 * when HP + 1 entry is peeked and if DMA 3108 * is not done and if HP + 2 entry's DMA done 3109 * is set. skip HP + 1 entry and 3110 * start processing in next interrupt. 3111 * 2. If HP + 2 entry's DMA done is not set, 3112 * poll onto HP + 1 entry DMA done to be set. 3113 * Check status for same buffer for next time 3114 * dp_rx_mon_status_srng_process 3115 */ 3116 3117 reap_status = ath11k_dp_rx_mon_buf_done(ab, srng, 3118 rx_ring); 3119 if (reap_status == DP_MON_STATUS_NO_DMA) 3120 continue; 3121 3122 spin_lock_bh(&rx_ring->idr_lock); 3123 idr_remove(&rx_ring->bufs_idr, buf_id); 3124 spin_unlock_bh(&rx_ring->idr_lock); 3125 3126 dma_unmap_single(ab->dev, rxcb->paddr, 3127 skb->len + skb_tailroom(skb), 3128 DMA_FROM_DEVICE); 3129 3130 dev_kfree_skb_any(skb); 3131 pmon->buf_state = DP_MON_STATUS_REPLINISH; 3132 goto move_next; 3133 } 3134 3135 spin_lock_bh(&rx_ring->idr_lock); 3136 idr_remove(&rx_ring->bufs_idr, buf_id); 3137 spin_unlock_bh(&rx_ring->idr_lock); 3138 if (ab->hw_params.full_monitor_mode) { 3139 ath11k_dp_rx_mon_update_status_buf_state(pmon, tlv); 3140 if (paddr == pmon->mon_status_paddr) 3141 pmon->buf_state = DP_MON_STATUS_MATCH; 3142 } 3143 3144 dma_unmap_single(ab->dev, rxcb->paddr, 3145 skb->len + skb_tailroom(skb), 3146 DMA_FROM_DEVICE); 3147 3148 __skb_queue_tail(skb_list, skb); 3149 } else { 3150 pmon->buf_state = DP_MON_STATUS_REPLINISH; 3151 } 3152 move_next: 3153 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, 3154 &buf_id); 3155 3156 if (!skb) { 3157 hal_params = ab->hw_params.hal_params; 3158 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0, 3159 hal_params->rx_buf_rbm); 3160 num_buffs_reaped++; 3161 break; 3162 } 3163 rxcb = ATH11K_SKB_RXCB(skb); 3164 3165 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 3166 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 3167 3168 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr, 3169 cookie, 3170 ab->hw_params.hal_params->rx_buf_rbm); 3171 ath11k_hal_srng_src_get_next_entry(ab, srng); 3172 num_buffs_reaped++; 3173 } 3174 ath11k_hal_srng_access_end(ab, srng); 3175 spin_unlock_bh(&srng->lock); 3176 3177 return num_buffs_reaped; 3178 } 3179 3180 static void ath11k_dp_rx_frag_timer(struct timer_list *timer) 3181 { 3182 struct dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer); 3183 3184 spin_lock_bh(&rx_tid->ab->base_lock); 3185 if (rx_tid->last_frag_no && 3186 rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) { 3187 spin_unlock_bh(&rx_tid->ab->base_lock); 3188 return; 3189 } 3190 ath11k_dp_rx_frags_cleanup(rx_tid, true); 3191 spin_unlock_bh(&rx_tid->ab->base_lock); 3192 } 3193 3194 int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id) 3195 { 3196 struct ath11k_base *ab = ar->ab; 3197 struct crypto_shash *tfm; 3198 struct ath11k_peer *peer; 3199 struct dp_rx_tid *rx_tid; 3200 int i; 3201 3202 tfm = crypto_alloc_shash("michael_mic", 0, 0); 3203 if (IS_ERR(tfm)) { 3204 ath11k_warn(ab, "failed to allocate michael_mic shash: %ld\n", 3205 PTR_ERR(tfm)); 3206 return PTR_ERR(tfm); 3207 } 3208 3209 spin_lock_bh(&ab->base_lock); 3210 3211 peer = ath11k_peer_find(ab, vdev_id, peer_mac); 3212 if (!peer) { 3213 ath11k_warn(ab, "failed to find the peer to set up fragment info\n"); 3214 spin_unlock_bh(&ab->base_lock); 3215 crypto_free_shash(tfm); 3216 return -ENOENT; 3217 } 3218 3219 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 3220 rx_tid = &peer->rx_tid[i]; 3221 rx_tid->ab = ab; 3222 timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0); 3223 skb_queue_head_init(&rx_tid->rx_frags); 3224 } 3225 3226 peer->tfm_mmic = tfm; 3227 peer->dp_setup_done = true; 3228 spin_unlock_bh(&ab->base_lock); 3229 3230 return 0; 3231 } 3232 3233 static int ath11k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key, 3234 struct ieee80211_hdr *hdr, u8 *data, 3235 size_t data_len, u8 *mic) 3236 { 3237 SHASH_DESC_ON_STACK(desc, tfm); 3238 u8 mic_hdr[16] = {0}; 3239 u8 tid = 0; 3240 int ret; 3241 3242 if (!tfm) 3243 return -EINVAL; 3244 3245 desc->tfm = tfm; 3246 3247 ret = crypto_shash_setkey(tfm, key, 8); 3248 if (ret) 3249 goto out; 3250 3251 ret = crypto_shash_init(desc); 3252 if (ret) 3253 goto out; 3254 3255 /* TKIP MIC header */ 3256 memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN); 3257 memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN); 3258 if (ieee80211_is_data_qos(hdr->frame_control)) 3259 tid = ieee80211_get_tid(hdr); 3260 mic_hdr[12] = tid; 3261 3262 ret = crypto_shash_update(desc, mic_hdr, 16); 3263 if (ret) 3264 goto out; 3265 ret = crypto_shash_update(desc, data, data_len); 3266 if (ret) 3267 goto out; 3268 ret = crypto_shash_final(desc, mic); 3269 out: 3270 shash_desc_zero(desc); 3271 return ret; 3272 } 3273 3274 static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer *peer, 3275 struct sk_buff *msdu) 3276 { 3277 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data; 3278 struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu); 3279 struct ieee80211_key_conf *key_conf; 3280 struct ieee80211_hdr *hdr; 3281 u8 mic[IEEE80211_CCMP_MIC_LEN]; 3282 int head_len, tail_len, ret; 3283 size_t data_len; 3284 u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 3285 u8 *key, *data; 3286 u8 key_idx; 3287 3288 if (ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc) != 3289 HAL_ENCRYPT_TYPE_TKIP_MIC) 3290 return 0; 3291 3292 hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz); 3293 hdr_len = ieee80211_hdrlen(hdr->frame_control); 3294 head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN; 3295 tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN; 3296 3297 if (!is_multicast_ether_addr(hdr->addr1)) 3298 key_idx = peer->ucast_keyidx; 3299 else 3300 key_idx = peer->mcast_keyidx; 3301 3302 key_conf = peer->keys[key_idx]; 3303 3304 data = msdu->data + head_len; 3305 data_len = msdu->len - head_len - tail_len; 3306 key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; 3307 3308 ret = ath11k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic); 3309 if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN)) 3310 goto mic_fail; 3311 3312 return 0; 3313 3314 mic_fail: 3315 (ATH11K_SKB_RXCB(msdu))->is_first_msdu = true; 3316 (ATH11K_SKB_RXCB(msdu))->is_last_msdu = true; 3317 3318 rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED | 3319 RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED; 3320 skb_pull(msdu, hal_rx_desc_sz); 3321 3322 ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs); 3323 ath11k_dp_rx_h_undecap(ar, msdu, rx_desc, 3324 HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true); 3325 ieee80211_rx(ar->hw, msdu); 3326 return -EINVAL; 3327 } 3328 3329 static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu, 3330 enum hal_encrypt_type enctype, u32 flags) 3331 { 3332 struct ieee80211_hdr *hdr; 3333 size_t hdr_len; 3334 size_t crypto_len; 3335 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 3336 3337 if (!flags) 3338 return; 3339 3340 hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz); 3341 3342 if (flags & RX_FLAG_MIC_STRIPPED) 3343 skb_trim(msdu, msdu->len - 3344 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 3345 3346 if (flags & RX_FLAG_ICV_STRIPPED) 3347 skb_trim(msdu, msdu->len - 3348 ath11k_dp_rx_crypto_icv_len(ar, enctype)); 3349 3350 if (flags & RX_FLAG_IV_STRIPPED) { 3351 hdr_len = ieee80211_hdrlen(hdr->frame_control); 3352 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 3353 3354 memmove((void *)msdu->data + hal_rx_desc_sz + crypto_len, 3355 (void *)msdu->data + hal_rx_desc_sz, hdr_len); 3356 skb_pull(msdu, crypto_len); 3357 } 3358 } 3359 3360 static int ath11k_dp_rx_h_defrag(struct ath11k *ar, 3361 struct ath11k_peer *peer, 3362 struct dp_rx_tid *rx_tid, 3363 struct sk_buff **defrag_skb) 3364 { 3365 struct hal_rx_desc *rx_desc; 3366 struct sk_buff *skb, *first_frag, *last_frag; 3367 struct ieee80211_hdr *hdr; 3368 struct rx_attention *rx_attention; 3369 enum hal_encrypt_type enctype; 3370 bool is_decrypted = false; 3371 int msdu_len = 0; 3372 int extra_space; 3373 u32 flags, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 3374 3375 first_frag = skb_peek(&rx_tid->rx_frags); 3376 last_frag = skb_peek_tail(&rx_tid->rx_frags); 3377 3378 skb_queue_walk(&rx_tid->rx_frags, skb) { 3379 flags = 0; 3380 rx_desc = (struct hal_rx_desc *)skb->data; 3381 hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz); 3382 3383 enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc); 3384 if (enctype != HAL_ENCRYPT_TYPE_OPEN) { 3385 rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc); 3386 is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention); 3387 } 3388 3389 if (is_decrypted) { 3390 if (skb != first_frag) 3391 flags |= RX_FLAG_IV_STRIPPED; 3392 if (skb != last_frag) 3393 flags |= RX_FLAG_ICV_STRIPPED | 3394 RX_FLAG_MIC_STRIPPED; 3395 } 3396 3397 /* RX fragments are always raw packets */ 3398 if (skb != last_frag) 3399 skb_trim(skb, skb->len - FCS_LEN); 3400 ath11k_dp_rx_h_undecap_frag(ar, skb, enctype, flags); 3401 3402 if (skb != first_frag) 3403 skb_pull(skb, hal_rx_desc_sz + 3404 ieee80211_hdrlen(hdr->frame_control)); 3405 msdu_len += skb->len; 3406 } 3407 3408 extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag)); 3409 if (extra_space > 0 && 3410 (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0)) 3411 return -ENOMEM; 3412 3413 __skb_unlink(first_frag, &rx_tid->rx_frags); 3414 while ((skb = __skb_dequeue(&rx_tid->rx_frags))) { 3415 skb_put_data(first_frag, skb->data, skb->len); 3416 dev_kfree_skb_any(skb); 3417 } 3418 3419 hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz); 3420 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); 3421 ATH11K_SKB_RXCB(first_frag)->is_frag = 1; 3422 3423 if (ath11k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag)) 3424 first_frag = NULL; 3425 3426 *defrag_skb = first_frag; 3427 return 0; 3428 } 3429 3430 static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_tid *rx_tid, 3431 struct sk_buff *defrag_skb) 3432 { 3433 struct ath11k_base *ab = ar->ab; 3434 struct ath11k_pdev_dp *dp = &ar->dp; 3435 struct dp_rxdma_ring *rx_refill_ring = &dp->rx_refill_buf_ring; 3436 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data; 3437 struct hal_reo_entrance_ring *reo_ent_ring; 3438 struct hal_reo_dest_ring *reo_dest_ring; 3439 struct dp_link_desc_bank *link_desc_banks; 3440 struct hal_rx_msdu_link *msdu_link; 3441 struct hal_rx_msdu_details *msdu0; 3442 struct hal_srng *srng; 3443 dma_addr_t paddr; 3444 u32 desc_bank, msdu_info, mpdu_info; 3445 u32 dst_idx, cookie, hal_rx_desc_sz; 3446 int ret, buf_id; 3447 3448 hal_rx_desc_sz = ab->hw_params.hal_desc_sz; 3449 link_desc_banks = ab->dp.link_desc_banks; 3450 reo_dest_ring = rx_tid->dst_ring_desc; 3451 3452 ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank); 3453 msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr + 3454 (paddr - link_desc_banks[desc_bank].paddr)); 3455 msdu0 = &msdu_link->msdu_link[0]; 3456 dst_idx = FIELD_GET(RX_MSDU_DESC_INFO0_REO_DEST_IND, msdu0->rx_msdu_info.info0); 3457 memset(msdu0, 0, sizeof(*msdu0)); 3458 3459 msdu_info = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1) | 3460 FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1) | 3461 FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION, 0) | 3462 FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH, 3463 defrag_skb->len - hal_rx_desc_sz) | 3464 FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND, dst_idx) | 3465 FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA, 1) | 3466 FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA, 1); 3467 msdu0->rx_msdu_info.info0 = msdu_info; 3468 3469 /* change msdu len in hal rx desc */ 3470 ath11k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz); 3471 3472 paddr = dma_map_single(ab->dev, defrag_skb->data, 3473 defrag_skb->len + skb_tailroom(defrag_skb), 3474 DMA_TO_DEVICE); 3475 if (dma_mapping_error(ab->dev, paddr)) 3476 return -ENOMEM; 3477 3478 spin_lock_bh(&rx_refill_ring->idr_lock); 3479 buf_id = idr_alloc(&rx_refill_ring->bufs_idr, defrag_skb, 0, 3480 rx_refill_ring->bufs_max * 3, GFP_ATOMIC); 3481 spin_unlock_bh(&rx_refill_ring->idr_lock); 3482 if (buf_id < 0) { 3483 ret = -ENOMEM; 3484 goto err_unmap_dma; 3485 } 3486 3487 ATH11K_SKB_RXCB(defrag_skb)->paddr = paddr; 3488 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, dp->mac_id) | 3489 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 3490 3491 ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie, 3492 ab->hw_params.hal_params->rx_buf_rbm); 3493 3494 /* Fill mpdu details into reo entrance ring */ 3495 srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id]; 3496 3497 spin_lock_bh(&srng->lock); 3498 ath11k_hal_srng_access_begin(ab, srng); 3499 3500 reo_ent_ring = (struct hal_reo_entrance_ring *) 3501 ath11k_hal_srng_src_get_next_entry(ab, srng); 3502 if (!reo_ent_ring) { 3503 ath11k_hal_srng_access_end(ab, srng); 3504 spin_unlock_bh(&srng->lock); 3505 ret = -ENOSPC; 3506 goto err_free_idr; 3507 } 3508 memset(reo_ent_ring, 0, sizeof(*reo_ent_ring)); 3509 3510 ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank); 3511 ath11k_hal_rx_buf_addr_info_set(reo_ent_ring, paddr, desc_bank, 3512 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST); 3513 3514 mpdu_info = FIELD_PREP(RX_MPDU_DESC_INFO0_MSDU_COUNT, 1) | 3515 FIELD_PREP(RX_MPDU_DESC_INFO0_SEQ_NUM, rx_tid->cur_sn) | 3516 FIELD_PREP(RX_MPDU_DESC_INFO0_FRAG_FLAG, 0) | 3517 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_SA, 1) | 3518 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_DA, 1) | 3519 FIELD_PREP(RX_MPDU_DESC_INFO0_RAW_MPDU, 1) | 3520 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_PN, 1); 3521 3522 reo_ent_ring->rx_mpdu_info.info0 = mpdu_info; 3523 reo_ent_ring->rx_mpdu_info.meta_data = reo_dest_ring->rx_mpdu_info.meta_data; 3524 reo_ent_ring->queue_addr_lo = reo_dest_ring->queue_addr_lo; 3525 reo_ent_ring->info0 = FIELD_PREP(HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI, 3526 FIELD_GET(HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI, 3527 reo_dest_ring->info0)) | 3528 FIELD_PREP(HAL_REO_ENTR_RING_INFO0_DEST_IND, dst_idx); 3529 ath11k_hal_srng_access_end(ab, srng); 3530 spin_unlock_bh(&srng->lock); 3531 3532 return 0; 3533 3534 err_free_idr: 3535 spin_lock_bh(&rx_refill_ring->idr_lock); 3536 idr_remove(&rx_refill_ring->bufs_idr, buf_id); 3537 spin_unlock_bh(&rx_refill_ring->idr_lock); 3538 err_unmap_dma: 3539 dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb), 3540 DMA_TO_DEVICE); 3541 return ret; 3542 } 3543 3544 static int ath11k_dp_rx_h_cmp_frags(struct ath11k *ar, 3545 struct sk_buff *a, struct sk_buff *b) 3546 { 3547 int frag1, frag2; 3548 3549 frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, a); 3550 frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, b); 3551 3552 return frag1 - frag2; 3553 } 3554 3555 static void ath11k_dp_rx_h_sort_frags(struct ath11k *ar, 3556 struct sk_buff_head *frag_list, 3557 struct sk_buff *cur_frag) 3558 { 3559 struct sk_buff *skb; 3560 int cmp; 3561 3562 skb_queue_walk(frag_list, skb) { 3563 cmp = ath11k_dp_rx_h_cmp_frags(ar, skb, cur_frag); 3564 if (cmp < 0) 3565 continue; 3566 __skb_queue_before(frag_list, skb, cur_frag); 3567 return; 3568 } 3569 __skb_queue_tail(frag_list, cur_frag); 3570 } 3571 3572 static u64 ath11k_dp_rx_h_get_pn(struct ath11k *ar, struct sk_buff *skb) 3573 { 3574 struct ieee80211_hdr *hdr; 3575 u64 pn = 0; 3576 u8 *ehdr; 3577 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 3578 3579 hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz); 3580 ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control); 3581 3582 pn = ehdr[0]; 3583 pn |= (u64)ehdr[1] << 8; 3584 pn |= (u64)ehdr[4] << 16; 3585 pn |= (u64)ehdr[5] << 24; 3586 pn |= (u64)ehdr[6] << 32; 3587 pn |= (u64)ehdr[7] << 40; 3588 3589 return pn; 3590 } 3591 3592 static bool 3593 ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k *ar, struct dp_rx_tid *rx_tid) 3594 { 3595 enum hal_encrypt_type encrypt_type; 3596 struct sk_buff *first_frag, *skb; 3597 struct hal_rx_desc *desc; 3598 u64 last_pn; 3599 u64 cur_pn; 3600 3601 first_frag = skb_peek(&rx_tid->rx_frags); 3602 desc = (struct hal_rx_desc *)first_frag->data; 3603 3604 encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, desc); 3605 if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 && 3606 encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 && 3607 encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 && 3608 encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256) 3609 return true; 3610 3611 last_pn = ath11k_dp_rx_h_get_pn(ar, first_frag); 3612 skb_queue_walk(&rx_tid->rx_frags, skb) { 3613 if (skb == first_frag) 3614 continue; 3615 3616 cur_pn = ath11k_dp_rx_h_get_pn(ar, skb); 3617 if (cur_pn != last_pn + 1) 3618 return false; 3619 last_pn = cur_pn; 3620 } 3621 return true; 3622 } 3623 3624 static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar, 3625 struct sk_buff *msdu, 3626 u32 *ring_desc) 3627 { 3628 struct ath11k_base *ab = ar->ab; 3629 struct hal_rx_desc *rx_desc; 3630 struct ath11k_peer *peer; 3631 struct dp_rx_tid *rx_tid; 3632 struct sk_buff *defrag_skb = NULL; 3633 u32 peer_id; 3634 u16 seqno, frag_no; 3635 u8 tid; 3636 int ret = 0; 3637 bool more_frags; 3638 bool is_mcbc; 3639 3640 rx_desc = (struct hal_rx_desc *)msdu->data; 3641 peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc); 3642 tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, rx_desc); 3643 seqno = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc); 3644 frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, msdu); 3645 more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(ar->ab, msdu); 3646 is_mcbc = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc); 3647 3648 /* Multicast/Broadcast fragments are not expected */ 3649 if (is_mcbc) 3650 return -EINVAL; 3651 3652 if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(ar->ab, rx_desc) || 3653 !ath11k_dp_rx_h_mpdu_start_fc_valid(ar->ab, rx_desc) || 3654 tid > IEEE80211_NUM_TIDS) 3655 return -EINVAL; 3656 3657 /* received unfragmented packet in reo 3658 * exception ring, this shouldn't happen 3659 * as these packets typically come from 3660 * reo2sw srngs. 3661 */ 3662 if (WARN_ON_ONCE(!frag_no && !more_frags)) 3663 return -EINVAL; 3664 3665 spin_lock_bh(&ab->base_lock); 3666 peer = ath11k_peer_find_by_id(ab, peer_id); 3667 if (!peer) { 3668 ath11k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n", 3669 peer_id); 3670 ret = -ENOENT; 3671 goto out_unlock; 3672 } 3673 if (!peer->dp_setup_done) { 3674 ath11k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n", 3675 peer->addr, peer_id); 3676 ret = -ENOENT; 3677 goto out_unlock; 3678 } 3679 3680 rx_tid = &peer->rx_tid[tid]; 3681 3682 if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) || 3683 skb_queue_empty(&rx_tid->rx_frags)) { 3684 /* Flush stored fragments and start a new sequence */ 3685 ath11k_dp_rx_frags_cleanup(rx_tid, true); 3686 rx_tid->cur_sn = seqno; 3687 } 3688 3689 if (rx_tid->rx_frag_bitmap & BIT(frag_no)) { 3690 /* Fragment already present */ 3691 ret = -EINVAL; 3692 goto out_unlock; 3693 } 3694 3695 if (!rx_tid->rx_frag_bitmap || (frag_no > __fls(rx_tid->rx_frag_bitmap))) 3696 __skb_queue_tail(&rx_tid->rx_frags, msdu); 3697 else 3698 ath11k_dp_rx_h_sort_frags(ar, &rx_tid->rx_frags, msdu); 3699 3700 rx_tid->rx_frag_bitmap |= BIT(frag_no); 3701 if (!more_frags) 3702 rx_tid->last_frag_no = frag_no; 3703 3704 if (frag_no == 0) { 3705 rx_tid->dst_ring_desc = kmemdup(ring_desc, 3706 sizeof(*rx_tid->dst_ring_desc), 3707 GFP_ATOMIC); 3708 if (!rx_tid->dst_ring_desc) { 3709 ret = -ENOMEM; 3710 goto out_unlock; 3711 } 3712 } else { 3713 ath11k_dp_rx_link_desc_return(ab, ring_desc, 3714 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3715 } 3716 3717 if (!rx_tid->last_frag_no || 3718 rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) { 3719 mod_timer(&rx_tid->frag_timer, jiffies + 3720 ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS); 3721 goto out_unlock; 3722 } 3723 3724 spin_unlock_bh(&ab->base_lock); 3725 del_timer_sync(&rx_tid->frag_timer); 3726 spin_lock_bh(&ab->base_lock); 3727 3728 peer = ath11k_peer_find_by_id(ab, peer_id); 3729 if (!peer) 3730 goto err_frags_cleanup; 3731 3732 if (!ath11k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid)) 3733 goto err_frags_cleanup; 3734 3735 if (ath11k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb)) 3736 goto err_frags_cleanup; 3737 3738 if (!defrag_skb) 3739 goto err_frags_cleanup; 3740 3741 if (ath11k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb)) 3742 goto err_frags_cleanup; 3743 3744 ath11k_dp_rx_frags_cleanup(rx_tid, false); 3745 goto out_unlock; 3746 3747 err_frags_cleanup: 3748 dev_kfree_skb_any(defrag_skb); 3749 ath11k_dp_rx_frags_cleanup(rx_tid, true); 3750 out_unlock: 3751 spin_unlock_bh(&ab->base_lock); 3752 return ret; 3753 } 3754 3755 static int 3756 ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool drop) 3757 { 3758 struct ath11k_pdev_dp *dp = &ar->dp; 3759 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 3760 struct sk_buff *msdu; 3761 struct ath11k_skb_rxcb *rxcb; 3762 struct hal_rx_desc *rx_desc; 3763 u8 *hdr_status; 3764 u16 msdu_len; 3765 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 3766 3767 spin_lock_bh(&rx_ring->idr_lock); 3768 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 3769 if (!msdu) { 3770 ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n", 3771 buf_id); 3772 spin_unlock_bh(&rx_ring->idr_lock); 3773 return -EINVAL; 3774 } 3775 3776 idr_remove(&rx_ring->bufs_idr, buf_id); 3777 spin_unlock_bh(&rx_ring->idr_lock); 3778 3779 rxcb = ATH11K_SKB_RXCB(msdu); 3780 dma_unmap_single(ar->ab->dev, rxcb->paddr, 3781 msdu->len + skb_tailroom(msdu), 3782 DMA_FROM_DEVICE); 3783 3784 if (drop) { 3785 dev_kfree_skb_any(msdu); 3786 return 0; 3787 } 3788 3789 rcu_read_lock(); 3790 if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) { 3791 dev_kfree_skb_any(msdu); 3792 goto exit; 3793 } 3794 3795 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 3796 dev_kfree_skb_any(msdu); 3797 goto exit; 3798 } 3799 3800 rx_desc = (struct hal_rx_desc *)msdu->data; 3801 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, rx_desc); 3802 if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) { 3803 hdr_status = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc); 3804 ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len); 3805 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status, 3806 sizeof(struct ieee80211_hdr)); 3807 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc, 3808 sizeof(struct hal_rx_desc)); 3809 dev_kfree_skb_any(msdu); 3810 goto exit; 3811 } 3812 3813 skb_put(msdu, hal_rx_desc_sz + msdu_len); 3814 3815 if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) { 3816 dev_kfree_skb_any(msdu); 3817 ath11k_dp_rx_link_desc_return(ar->ab, ring_desc, 3818 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3819 } 3820 exit: 3821 rcu_read_unlock(); 3822 return 0; 3823 } 3824 3825 int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi, 3826 int budget) 3827 { 3828 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; 3829 struct dp_link_desc_bank *link_desc_banks; 3830 enum hal_rx_buf_return_buf_manager rbm; 3831 int tot_n_bufs_reaped, quota, ret, i; 3832 int n_bufs_reaped[MAX_RADIOS] = {0}; 3833 struct dp_rxdma_ring *rx_ring; 3834 struct dp_srng *reo_except; 3835 u32 desc_bank, num_msdus; 3836 struct hal_srng *srng; 3837 struct ath11k_dp *dp; 3838 void *link_desc_va; 3839 int buf_id, mac_id; 3840 struct ath11k *ar; 3841 dma_addr_t paddr; 3842 u32 *desc; 3843 bool is_frag; 3844 u8 drop = 0; 3845 3846 tot_n_bufs_reaped = 0; 3847 quota = budget; 3848 3849 dp = &ab->dp; 3850 reo_except = &dp->reo_except_ring; 3851 link_desc_banks = dp->link_desc_banks; 3852 3853 srng = &ab->hal.srng_list[reo_except->ring_id]; 3854 3855 spin_lock_bh(&srng->lock); 3856 3857 ath11k_hal_srng_access_begin(ab, srng); 3858 3859 while (budget && 3860 (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 3861 struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc; 3862 3863 ab->soc_stats.err_ring_pkts++; 3864 ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr, 3865 &desc_bank); 3866 if (ret) { 3867 ath11k_warn(ab, "failed to parse error reo desc %d\n", 3868 ret); 3869 continue; 3870 } 3871 link_desc_va = link_desc_banks[desc_bank].vaddr + 3872 (paddr - link_desc_banks[desc_bank].paddr); 3873 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies, 3874 &rbm); 3875 if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST && 3876 rbm != HAL_RX_BUF_RBM_SW3_BM) { 3877 ab->soc_stats.invalid_rbm++; 3878 ath11k_warn(ab, "invalid return buffer manager %d\n", rbm); 3879 ath11k_dp_rx_link_desc_return(ab, desc, 3880 HAL_WBM_REL_BM_ACT_REL_MSDU); 3881 continue; 3882 } 3883 3884 is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG); 3885 3886 /* Process only rx fragments with one msdu per link desc below, and drop 3887 * msdu's indicated due to error reasons. 3888 */ 3889 if (!is_frag || num_msdus > 1) { 3890 drop = 1; 3891 /* Return the link desc back to wbm idle list */ 3892 ath11k_dp_rx_link_desc_return(ab, desc, 3893 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3894 } 3895 3896 for (i = 0; i < num_msdus; i++) { 3897 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 3898 msdu_cookies[i]); 3899 3900 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, 3901 msdu_cookies[i]); 3902 3903 ar = ab->pdevs[mac_id].ar; 3904 3905 if (!ath11k_dp_process_rx_err_buf(ar, desc, buf_id, drop)) { 3906 n_bufs_reaped[mac_id]++; 3907 tot_n_bufs_reaped++; 3908 } 3909 } 3910 3911 if (tot_n_bufs_reaped >= quota) { 3912 tot_n_bufs_reaped = quota; 3913 goto exit; 3914 } 3915 3916 budget = quota - tot_n_bufs_reaped; 3917 } 3918 3919 exit: 3920 ath11k_hal_srng_access_end(ab, srng); 3921 3922 spin_unlock_bh(&srng->lock); 3923 3924 for (i = 0; i < ab->num_radios; i++) { 3925 if (!n_bufs_reaped[i]) 3926 continue; 3927 3928 ar = ab->pdevs[i].ar; 3929 rx_ring = &ar->dp.rx_refill_buf_ring; 3930 3931 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i], 3932 ab->hw_params.hal_params->rx_buf_rbm); 3933 } 3934 3935 return tot_n_bufs_reaped; 3936 } 3937 3938 static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar, 3939 int msdu_len, 3940 struct sk_buff_head *msdu_list) 3941 { 3942 struct sk_buff *skb, *tmp; 3943 struct ath11k_skb_rxcb *rxcb; 3944 int n_buffs; 3945 3946 n_buffs = DIV_ROUND_UP(msdu_len, 3947 (DP_RX_BUFFER_SIZE - ar->ab->hw_params.hal_desc_sz)); 3948 3949 skb_queue_walk_safe(msdu_list, skb, tmp) { 3950 rxcb = ATH11K_SKB_RXCB(skb); 3951 if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO && 3952 rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) { 3953 if (!n_buffs) 3954 break; 3955 __skb_unlink(skb, msdu_list); 3956 dev_kfree_skb_any(skb); 3957 n_buffs--; 3958 } 3959 } 3960 } 3961 3962 static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu, 3963 struct ieee80211_rx_status *status, 3964 struct sk_buff_head *msdu_list) 3965 { 3966 u16 msdu_len; 3967 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 3968 struct rx_attention *rx_attention; 3969 u8 l3pad_bytes; 3970 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3971 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 3972 3973 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc); 3974 3975 if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) { 3976 /* First buffer will be freed by the caller, so deduct it's length */ 3977 msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz); 3978 ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list); 3979 return -EINVAL; 3980 } 3981 3982 rx_attention = ath11k_dp_rx_get_attention(ar->ab, desc); 3983 if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) { 3984 ath11k_warn(ar->ab, 3985 "msdu_done bit not set in null_q_des processing\n"); 3986 __skb_queue_purge(msdu_list); 3987 return -EIO; 3988 } 3989 3990 /* Handle NULL queue descriptor violations arising out a missing 3991 * REO queue for a given peer or a given TID. This typically 3992 * may happen if a packet is received on a QOS enabled TID before the 3993 * ADDBA negotiation for that TID, when the TID queue is setup. Or 3994 * it may also happen for MC/BC frames if they are not routed to the 3995 * non-QOS TID queue, in the absence of any other default TID queue. 3996 * This error can show up both in a REO destination or WBM release ring. 3997 */ 3998 3999 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc); 4000 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc); 4001 4002 if (rxcb->is_frag) { 4003 skb_pull(msdu, hal_rx_desc_sz); 4004 } else { 4005 l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc); 4006 4007 if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) 4008 return -EINVAL; 4009 4010 skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); 4011 skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); 4012 } 4013 ath11k_dp_rx_h_ppdu(ar, desc, status); 4014 4015 ath11k_dp_rx_h_mpdu(ar, msdu, desc, status); 4016 4017 rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, desc); 4018 4019 /* Please note that caller will having the access to msdu and completing 4020 * rx with mac80211. Need not worry about cleaning up amsdu_list. 4021 */ 4022 4023 return 0; 4024 } 4025 4026 static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu, 4027 struct ieee80211_rx_status *status, 4028 struct sk_buff_head *msdu_list) 4029 { 4030 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 4031 bool drop = false; 4032 4033 ar->ab->soc_stats.reo_error[rxcb->err_code]++; 4034 4035 switch (rxcb->err_code) { 4036 case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO: 4037 if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list)) 4038 drop = true; 4039 break; 4040 case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED: 4041 /* TODO: Do not drop PN failed packets in the driver; 4042 * instead, it is good to drop such packets in mac80211 4043 * after incrementing the replay counters. 4044 */ 4045 fallthrough; 4046 default: 4047 /* TODO: Review other errors and process them to mac80211 4048 * as appropriate. 4049 */ 4050 drop = true; 4051 break; 4052 } 4053 4054 return drop; 4055 } 4056 4057 static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu, 4058 struct ieee80211_rx_status *status) 4059 { 4060 u16 msdu_len; 4061 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 4062 u8 l3pad_bytes; 4063 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 4064 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 4065 4066 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc); 4067 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc); 4068 4069 l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc); 4070 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc); 4071 skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); 4072 skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); 4073 4074 ath11k_dp_rx_h_ppdu(ar, desc, status); 4075 4076 status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR | 4077 RX_FLAG_DECRYPTED); 4078 4079 ath11k_dp_rx_h_undecap(ar, msdu, desc, 4080 HAL_ENCRYPT_TYPE_TKIP_MIC, status, false); 4081 } 4082 4083 static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar, struct sk_buff *msdu, 4084 struct ieee80211_rx_status *status) 4085 { 4086 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 4087 bool drop = false; 4088 4089 ar->ab->soc_stats.rxdma_error[rxcb->err_code]++; 4090 4091 switch (rxcb->err_code) { 4092 case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR: 4093 ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status); 4094 break; 4095 default: 4096 /* TODO: Review other rxdma error code to check if anything is 4097 * worth reporting to mac80211 4098 */ 4099 drop = true; 4100 break; 4101 } 4102 4103 return drop; 4104 } 4105 4106 static void ath11k_dp_rx_wbm_err(struct ath11k *ar, 4107 struct napi_struct *napi, 4108 struct sk_buff *msdu, 4109 struct sk_buff_head *msdu_list) 4110 { 4111 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 4112 struct ieee80211_rx_status rxs = {0}; 4113 bool drop = true; 4114 4115 switch (rxcb->err_rel_src) { 4116 case HAL_WBM_REL_SRC_MODULE_REO: 4117 drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list); 4118 break; 4119 case HAL_WBM_REL_SRC_MODULE_RXDMA: 4120 drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs); 4121 break; 4122 default: 4123 /* msdu will get freed */ 4124 break; 4125 } 4126 4127 if (drop) { 4128 dev_kfree_skb_any(msdu); 4129 return; 4130 } 4131 4132 ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs); 4133 } 4134 4135 int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab, 4136 struct napi_struct *napi, int budget) 4137 { 4138 struct ath11k *ar; 4139 struct ath11k_dp *dp = &ab->dp; 4140 struct dp_rxdma_ring *rx_ring; 4141 struct hal_rx_wbm_rel_info err_info; 4142 struct hal_srng *srng; 4143 struct sk_buff *msdu; 4144 struct sk_buff_head msdu_list[MAX_RADIOS]; 4145 struct ath11k_skb_rxcb *rxcb; 4146 u32 *rx_desc; 4147 int buf_id, mac_id; 4148 int num_buffs_reaped[MAX_RADIOS] = {0}; 4149 int total_num_buffs_reaped = 0; 4150 int ret, i; 4151 4152 for (i = 0; i < ab->num_radios; i++) 4153 __skb_queue_head_init(&msdu_list[i]); 4154 4155 srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id]; 4156 4157 spin_lock_bh(&srng->lock); 4158 4159 ath11k_hal_srng_access_begin(ab, srng); 4160 4161 while (budget) { 4162 rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng); 4163 if (!rx_desc) 4164 break; 4165 4166 ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info); 4167 if (ret) { 4168 ath11k_warn(ab, 4169 "failed to parse rx error in wbm_rel ring desc %d\n", 4170 ret); 4171 continue; 4172 } 4173 4174 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie); 4175 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie); 4176 4177 ar = ab->pdevs[mac_id].ar; 4178 rx_ring = &ar->dp.rx_refill_buf_ring; 4179 4180 spin_lock_bh(&rx_ring->idr_lock); 4181 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 4182 if (!msdu) { 4183 ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n", 4184 buf_id, mac_id); 4185 spin_unlock_bh(&rx_ring->idr_lock); 4186 continue; 4187 } 4188 4189 idr_remove(&rx_ring->bufs_idr, buf_id); 4190 spin_unlock_bh(&rx_ring->idr_lock); 4191 4192 rxcb = ATH11K_SKB_RXCB(msdu); 4193 dma_unmap_single(ab->dev, rxcb->paddr, 4194 msdu->len + skb_tailroom(msdu), 4195 DMA_FROM_DEVICE); 4196 4197 num_buffs_reaped[mac_id]++; 4198 total_num_buffs_reaped++; 4199 budget--; 4200 4201 if (err_info.push_reason != 4202 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { 4203 dev_kfree_skb_any(msdu); 4204 continue; 4205 } 4206 4207 rxcb->err_rel_src = err_info.err_rel_src; 4208 rxcb->err_code = err_info.err_code; 4209 rxcb->rx_desc = (struct hal_rx_desc *)msdu->data; 4210 __skb_queue_tail(&msdu_list[mac_id], msdu); 4211 } 4212 4213 ath11k_hal_srng_access_end(ab, srng); 4214 4215 spin_unlock_bh(&srng->lock); 4216 4217 if (!total_num_buffs_reaped) 4218 goto done; 4219 4220 for (i = 0; i < ab->num_radios; i++) { 4221 if (!num_buffs_reaped[i]) 4222 continue; 4223 4224 ar = ab->pdevs[i].ar; 4225 rx_ring = &ar->dp.rx_refill_buf_ring; 4226 4227 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i], 4228 ab->hw_params.hal_params->rx_buf_rbm); 4229 } 4230 4231 rcu_read_lock(); 4232 for (i = 0; i < ab->num_radios; i++) { 4233 if (!rcu_dereference(ab->pdevs_active[i])) { 4234 __skb_queue_purge(&msdu_list[i]); 4235 continue; 4236 } 4237 4238 ar = ab->pdevs[i].ar; 4239 4240 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 4241 __skb_queue_purge(&msdu_list[i]); 4242 continue; 4243 } 4244 4245 while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL) 4246 ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]); 4247 } 4248 rcu_read_unlock(); 4249 done: 4250 return total_num_buffs_reaped; 4251 } 4252 4253 int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget) 4254 { 4255 struct ath11k *ar; 4256 struct dp_srng *err_ring; 4257 struct dp_rxdma_ring *rx_ring; 4258 struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks; 4259 struct hal_srng *srng; 4260 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; 4261 enum hal_rx_buf_return_buf_manager rbm; 4262 enum hal_reo_entr_rxdma_ecode rxdma_err_code; 4263 struct ath11k_skb_rxcb *rxcb; 4264 struct sk_buff *skb; 4265 struct hal_reo_entrance_ring *entr_ring; 4266 void *desc; 4267 int num_buf_freed = 0; 4268 int quota = budget; 4269 dma_addr_t paddr; 4270 u32 desc_bank; 4271 void *link_desc_va; 4272 int num_msdus; 4273 int i; 4274 int buf_id; 4275 4276 ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar; 4277 err_ring = &ar->dp.rxdma_err_dst_ring[ath11k_hw_mac_id_to_srng_id(&ab->hw_params, 4278 mac_id)]; 4279 rx_ring = &ar->dp.rx_refill_buf_ring; 4280 4281 srng = &ab->hal.srng_list[err_ring->ring_id]; 4282 4283 spin_lock_bh(&srng->lock); 4284 4285 ath11k_hal_srng_access_begin(ab, srng); 4286 4287 while (quota-- && 4288 (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 4289 ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank); 4290 4291 entr_ring = (struct hal_reo_entrance_ring *)desc; 4292 rxdma_err_code = 4293 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE, 4294 entr_ring->info1); 4295 ab->soc_stats.rxdma_error[rxdma_err_code]++; 4296 4297 link_desc_va = link_desc_banks[desc_bank].vaddr + 4298 (paddr - link_desc_banks[desc_bank].paddr); 4299 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, 4300 msdu_cookies, &rbm); 4301 4302 for (i = 0; i < num_msdus; i++) { 4303 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 4304 msdu_cookies[i]); 4305 4306 spin_lock_bh(&rx_ring->idr_lock); 4307 skb = idr_find(&rx_ring->bufs_idr, buf_id); 4308 if (!skb) { 4309 ath11k_warn(ab, "rxdma error with invalid buf_id %d\n", 4310 buf_id); 4311 spin_unlock_bh(&rx_ring->idr_lock); 4312 continue; 4313 } 4314 4315 idr_remove(&rx_ring->bufs_idr, buf_id); 4316 spin_unlock_bh(&rx_ring->idr_lock); 4317 4318 rxcb = ATH11K_SKB_RXCB(skb); 4319 dma_unmap_single(ab->dev, rxcb->paddr, 4320 skb->len + skb_tailroom(skb), 4321 DMA_FROM_DEVICE); 4322 dev_kfree_skb_any(skb); 4323 4324 num_buf_freed++; 4325 } 4326 4327 ath11k_dp_rx_link_desc_return(ab, desc, 4328 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 4329 } 4330 4331 ath11k_hal_srng_access_end(ab, srng); 4332 4333 spin_unlock_bh(&srng->lock); 4334 4335 if (num_buf_freed) 4336 ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed, 4337 ab->hw_params.hal_params->rx_buf_rbm); 4338 4339 return budget - quota; 4340 } 4341 4342 void ath11k_dp_process_reo_status(struct ath11k_base *ab) 4343 { 4344 struct ath11k_dp *dp = &ab->dp; 4345 struct hal_srng *srng; 4346 struct dp_reo_cmd *cmd, *tmp; 4347 bool found = false; 4348 u32 *reo_desc; 4349 u16 tag; 4350 struct hal_reo_status reo_status; 4351 4352 srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id]; 4353 4354 memset(&reo_status, 0, sizeof(reo_status)); 4355 4356 spin_lock_bh(&srng->lock); 4357 4358 ath11k_hal_srng_access_begin(ab, srng); 4359 4360 while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 4361 tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc); 4362 4363 switch (tag) { 4364 case HAL_REO_GET_QUEUE_STATS_STATUS: 4365 ath11k_hal_reo_status_queue_stats(ab, reo_desc, 4366 &reo_status); 4367 break; 4368 case HAL_REO_FLUSH_QUEUE_STATUS: 4369 ath11k_hal_reo_flush_queue_status(ab, reo_desc, 4370 &reo_status); 4371 break; 4372 case HAL_REO_FLUSH_CACHE_STATUS: 4373 ath11k_hal_reo_flush_cache_status(ab, reo_desc, 4374 &reo_status); 4375 break; 4376 case HAL_REO_UNBLOCK_CACHE_STATUS: 4377 ath11k_hal_reo_unblk_cache_status(ab, reo_desc, 4378 &reo_status); 4379 break; 4380 case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS: 4381 ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc, 4382 &reo_status); 4383 break; 4384 case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS: 4385 ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc, 4386 &reo_status); 4387 break; 4388 case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS: 4389 ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc, 4390 &reo_status); 4391 break; 4392 default: 4393 ath11k_warn(ab, "Unknown reo status type %d\n", tag); 4394 continue; 4395 } 4396 4397 spin_lock_bh(&dp->reo_cmd_lock); 4398 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 4399 if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) { 4400 found = true; 4401 list_del(&cmd->list); 4402 break; 4403 } 4404 } 4405 spin_unlock_bh(&dp->reo_cmd_lock); 4406 4407 if (found) { 4408 cmd->handler(dp, (void *)&cmd->data, 4409 reo_status.uniform_hdr.cmd_status); 4410 kfree(cmd); 4411 } 4412 4413 found = false; 4414 } 4415 4416 ath11k_hal_srng_access_end(ab, srng); 4417 4418 spin_unlock_bh(&srng->lock); 4419 } 4420 4421 void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id) 4422 { 4423 struct ath11k *ar = ab->pdevs[mac_id].ar; 4424 4425 ath11k_dp_rx_pdev_srng_free(ar); 4426 ath11k_dp_rxdma_pdev_buf_free(ar); 4427 } 4428 4429 int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id) 4430 { 4431 struct ath11k *ar = ab->pdevs[mac_id].ar; 4432 struct ath11k_pdev_dp *dp = &ar->dp; 4433 u32 ring_id; 4434 int i; 4435 int ret; 4436 4437 ret = ath11k_dp_rx_pdev_srng_alloc(ar); 4438 if (ret) { 4439 ath11k_warn(ab, "failed to setup rx srngs\n"); 4440 return ret; 4441 } 4442 4443 ret = ath11k_dp_rxdma_pdev_buf_setup(ar); 4444 if (ret) { 4445 ath11k_warn(ab, "failed to setup rxdma ring\n"); 4446 return ret; 4447 } 4448 4449 ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; 4450 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF); 4451 if (ret) { 4452 ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n", 4453 ret); 4454 return ret; 4455 } 4456 4457 if (ab->hw_params.rx_mac_buf_ring) { 4458 for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { 4459 ring_id = dp->rx_mac_buf_ring[i].ring_id; 4460 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, 4461 mac_id + i, HAL_RXDMA_BUF); 4462 if (ret) { 4463 ath11k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n", 4464 i, ret); 4465 return ret; 4466 } 4467 } 4468 } 4469 4470 for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { 4471 ring_id = dp->rxdma_err_dst_ring[i].ring_id; 4472 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, 4473 mac_id + i, HAL_RXDMA_DST); 4474 if (ret) { 4475 ath11k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n", 4476 i, ret); 4477 return ret; 4478 } 4479 } 4480 4481 if (!ab->hw_params.rxdma1_enable) 4482 goto config_refill_ring; 4483 4484 ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id; 4485 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, 4486 mac_id, HAL_RXDMA_MONITOR_BUF); 4487 if (ret) { 4488 ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n", 4489 ret); 4490 return ret; 4491 } 4492 ret = ath11k_dp_tx_htt_srng_setup(ab, 4493 dp->rxdma_mon_dst_ring.ring_id, 4494 mac_id, HAL_RXDMA_MONITOR_DST); 4495 if (ret) { 4496 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n", 4497 ret); 4498 return ret; 4499 } 4500 ret = ath11k_dp_tx_htt_srng_setup(ab, 4501 dp->rxdma_mon_desc_ring.ring_id, 4502 mac_id, HAL_RXDMA_MONITOR_DESC); 4503 if (ret) { 4504 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n", 4505 ret); 4506 return ret; 4507 } 4508 4509 config_refill_ring: 4510 for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { 4511 ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; 4512 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i, 4513 HAL_RXDMA_MONITOR_STATUS); 4514 if (ret) { 4515 ath11k_warn(ab, 4516 "failed to configure mon_status_refill_ring%d %d\n", 4517 i, ret); 4518 return ret; 4519 } 4520 } 4521 4522 return 0; 4523 } 4524 4525 static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len) 4526 { 4527 if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) { 4528 *frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc); 4529 *total_len -= *frag_len; 4530 } else { 4531 *frag_len = *total_len; 4532 *total_len = 0; 4533 } 4534 } 4535 4536 static 4537 int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar, 4538 void *p_last_buf_addr_info, 4539 u8 mac_id) 4540 { 4541 struct ath11k_pdev_dp *dp = &ar->dp; 4542 struct dp_srng *dp_srng; 4543 void *hal_srng; 4544 void *src_srng_desc; 4545 int ret = 0; 4546 4547 if (ar->ab->hw_params.rxdma1_enable) { 4548 dp_srng = &dp->rxdma_mon_desc_ring; 4549 hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id]; 4550 } else { 4551 dp_srng = &ar->ab->dp.wbm_desc_rel_ring; 4552 hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id]; 4553 } 4554 4555 ath11k_hal_srng_access_begin(ar->ab, hal_srng); 4556 4557 src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng); 4558 4559 if (src_srng_desc) { 4560 struct ath11k_buffer_addr *src_desc = src_srng_desc; 4561 4562 *src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info); 4563 } else { 4564 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4565 "Monitor Link Desc Ring %d Full", mac_id); 4566 ret = -ENOMEM; 4567 } 4568 4569 ath11k_hal_srng_access_end(ar->ab, hal_srng); 4570 return ret; 4571 } 4572 4573 static 4574 void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc, 4575 dma_addr_t *paddr, u32 *sw_cookie, 4576 u8 *rbm, 4577 void **pp_buf_addr_info) 4578 { 4579 struct hal_rx_msdu_link *msdu_link = rx_msdu_link_desc; 4580 struct ath11k_buffer_addr *buf_addr_info; 4581 4582 buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info; 4583 4584 ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm); 4585 4586 *pp_buf_addr_info = (void *)buf_addr_info; 4587 } 4588 4589 static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len) 4590 { 4591 if (skb->len > len) { 4592 skb_trim(skb, len); 4593 } else { 4594 if (skb_tailroom(skb) < len - skb->len) { 4595 if ((pskb_expand_head(skb, 0, 4596 len - skb->len - skb_tailroom(skb), 4597 GFP_ATOMIC))) { 4598 dev_kfree_skb_any(skb); 4599 return -ENOMEM; 4600 } 4601 } 4602 skb_put(skb, (len - skb->len)); 4603 } 4604 return 0; 4605 } 4606 4607 static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar, 4608 void *msdu_link_desc, 4609 struct hal_rx_msdu_list *msdu_list, 4610 u16 *num_msdus) 4611 { 4612 struct hal_rx_msdu_details *msdu_details = NULL; 4613 struct rx_msdu_desc *msdu_desc_info = NULL; 4614 struct hal_rx_msdu_link *msdu_link = NULL; 4615 int i; 4616 u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1); 4617 u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1); 4618 u8 tmp = 0; 4619 4620 msdu_link = msdu_link_desc; 4621 msdu_details = &msdu_link->msdu_link[0]; 4622 4623 for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) { 4624 if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR, 4625 msdu_details[i].buf_addr_info.info0) == 0) { 4626 msdu_desc_info = &msdu_details[i - 1].rx_msdu_info; 4627 msdu_desc_info->info0 |= last; 4628 ; 4629 break; 4630 } 4631 msdu_desc_info = &msdu_details[i].rx_msdu_info; 4632 4633 if (!i) 4634 msdu_desc_info->info0 |= first; 4635 else if (i == (HAL_RX_NUM_MSDU_DESC - 1)) 4636 msdu_desc_info->info0 |= last; 4637 msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0; 4638 msdu_list->msdu_info[i].msdu_len = 4639 HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0); 4640 msdu_list->sw_cookie[i] = 4641 FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, 4642 msdu_details[i].buf_addr_info.info1); 4643 tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR, 4644 msdu_details[i].buf_addr_info.info1); 4645 msdu_list->rbm[i] = tmp; 4646 } 4647 *num_msdus = i; 4648 } 4649 4650 static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id, 4651 u32 *rx_bufs_used) 4652 { 4653 u32 ret = 0; 4654 4655 if ((*ppdu_id < msdu_ppdu_id) && 4656 ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) { 4657 *ppdu_id = msdu_ppdu_id; 4658 ret = msdu_ppdu_id; 4659 } else if ((*ppdu_id > msdu_ppdu_id) && 4660 ((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) { 4661 /* mon_dst is behind than mon_status 4662 * skip dst_ring and free it 4663 */ 4664 *rx_bufs_used += 1; 4665 *ppdu_id = msdu_ppdu_id; 4666 ret = msdu_ppdu_id; 4667 } 4668 return ret; 4669 } 4670 4671 static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info, 4672 bool *is_frag, u32 *total_len, 4673 u32 *frag_len, u32 *msdu_cnt) 4674 { 4675 if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) { 4676 if (!*is_frag) { 4677 *total_len = info->msdu_len; 4678 *is_frag = true; 4679 } 4680 ath11k_dp_mon_set_frag_len(total_len, 4681 frag_len); 4682 } else { 4683 if (*is_frag) { 4684 ath11k_dp_mon_set_frag_len(total_len, 4685 frag_len); 4686 } else { 4687 *frag_len = info->msdu_len; 4688 } 4689 *is_frag = false; 4690 *msdu_cnt -= 1; 4691 } 4692 } 4693 4694 static u32 4695 ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id, 4696 void *ring_entry, struct sk_buff **head_msdu, 4697 struct sk_buff **tail_msdu, u32 *npackets, 4698 u32 *ppdu_id) 4699 { 4700 struct ath11k_pdev_dp *dp = &ar->dp; 4701 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4702 struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring; 4703 struct sk_buff *msdu = NULL, *last = NULL; 4704 struct hal_rx_msdu_list msdu_list; 4705 void *p_buf_addr_info, *p_last_buf_addr_info; 4706 struct hal_rx_desc *rx_desc; 4707 void *rx_msdu_link_desc; 4708 dma_addr_t paddr; 4709 u16 num_msdus = 0; 4710 u32 rx_buf_size, rx_pkt_offset, sw_cookie; 4711 u32 rx_bufs_used = 0, i = 0; 4712 u32 msdu_ppdu_id = 0, msdu_cnt = 0; 4713 u32 total_len = 0, frag_len = 0; 4714 bool is_frag, is_first_msdu; 4715 bool drop_mpdu = false; 4716 struct ath11k_skb_rxcb *rxcb; 4717 struct hal_reo_entrance_ring *ent_desc = ring_entry; 4718 int buf_id; 4719 u32 rx_link_buf_info[2]; 4720 u8 rbm; 4721 4722 if (!ar->ab->hw_params.rxdma1_enable) 4723 rx_ring = &dp->rx_refill_buf_ring; 4724 4725 ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr, 4726 &sw_cookie, 4727 &p_last_buf_addr_info, &rbm, 4728 &msdu_cnt); 4729 4730 if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON, 4731 ent_desc->info1) == 4732 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { 4733 u8 rxdma_err = 4734 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE, 4735 ent_desc->info1); 4736 if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR || 4737 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR || 4738 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) { 4739 drop_mpdu = true; 4740 pmon->rx_mon_stats.dest_mpdu_drop++; 4741 } 4742 } 4743 4744 is_frag = false; 4745 is_first_msdu = true; 4746 4747 do { 4748 if (pmon->mon_last_linkdesc_paddr == paddr) { 4749 pmon->rx_mon_stats.dup_mon_linkdesc_cnt++; 4750 return rx_bufs_used; 4751 } 4752 4753 if (ar->ab->hw_params.rxdma1_enable) 4754 rx_msdu_link_desc = 4755 (void *)pmon->link_desc_banks[sw_cookie].vaddr + 4756 (paddr - pmon->link_desc_banks[sw_cookie].paddr); 4757 else 4758 rx_msdu_link_desc = 4759 (void *)ar->ab->dp.link_desc_banks[sw_cookie].vaddr + 4760 (paddr - ar->ab->dp.link_desc_banks[sw_cookie].paddr); 4761 4762 ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list, 4763 &num_msdus); 4764 4765 for (i = 0; i < num_msdus; i++) { 4766 u32 l2_hdr_offset; 4767 4768 if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) { 4769 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4770 "i %d last_cookie %d is same\n", 4771 i, pmon->mon_last_buf_cookie); 4772 drop_mpdu = true; 4773 pmon->rx_mon_stats.dup_mon_buf_cnt++; 4774 continue; 4775 } 4776 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 4777 msdu_list.sw_cookie[i]); 4778 4779 spin_lock_bh(&rx_ring->idr_lock); 4780 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 4781 spin_unlock_bh(&rx_ring->idr_lock); 4782 if (!msdu) { 4783 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4784 "msdu_pop: invalid buf_id %d\n", buf_id); 4785 break; 4786 } 4787 rxcb = ATH11K_SKB_RXCB(msdu); 4788 if (!rxcb->unmapped) { 4789 dma_unmap_single(ar->ab->dev, rxcb->paddr, 4790 msdu->len + 4791 skb_tailroom(msdu), 4792 DMA_FROM_DEVICE); 4793 rxcb->unmapped = 1; 4794 } 4795 if (drop_mpdu) { 4796 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4797 "i %d drop msdu %p *ppdu_id %x\n", 4798 i, msdu, *ppdu_id); 4799 dev_kfree_skb_any(msdu); 4800 msdu = NULL; 4801 goto next_msdu; 4802 } 4803 4804 rx_desc = (struct hal_rx_desc *)msdu->data; 4805 4806 rx_pkt_offset = sizeof(struct hal_rx_desc); 4807 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc); 4808 4809 if (is_first_msdu) { 4810 if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) { 4811 drop_mpdu = true; 4812 dev_kfree_skb_any(msdu); 4813 msdu = NULL; 4814 pmon->mon_last_linkdesc_paddr = paddr; 4815 goto next_msdu; 4816 } 4817 4818 msdu_ppdu_id = 4819 ath11k_dp_rxdesc_get_ppduid(ar->ab, rx_desc); 4820 4821 if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id, 4822 ppdu_id, 4823 &rx_bufs_used)) { 4824 if (rx_bufs_used) { 4825 drop_mpdu = true; 4826 dev_kfree_skb_any(msdu); 4827 msdu = NULL; 4828 goto next_msdu; 4829 } 4830 return rx_bufs_used; 4831 } 4832 pmon->mon_last_linkdesc_paddr = paddr; 4833 is_first_msdu = false; 4834 } 4835 ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i], 4836 &is_frag, &total_len, 4837 &frag_len, &msdu_cnt); 4838 rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len; 4839 4840 ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size); 4841 4842 if (!(*head_msdu)) 4843 *head_msdu = msdu; 4844 else if (last) 4845 last->next = msdu; 4846 4847 last = msdu; 4848 next_msdu: 4849 pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i]; 4850 rx_bufs_used++; 4851 spin_lock_bh(&rx_ring->idr_lock); 4852 idr_remove(&rx_ring->bufs_idr, buf_id); 4853 spin_unlock_bh(&rx_ring->idr_lock); 4854 } 4855 4856 ath11k_hal_rx_buf_addr_info_set(rx_link_buf_info, paddr, sw_cookie, rbm); 4857 4858 ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr, 4859 &sw_cookie, &rbm, 4860 &p_buf_addr_info); 4861 4862 if (ar->ab->hw_params.rxdma1_enable) { 4863 if (ath11k_dp_rx_monitor_link_desc_return(ar, 4864 p_last_buf_addr_info, 4865 dp->mac_id)) 4866 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4867 "dp_rx_monitor_link_desc_return failed"); 4868 } else { 4869 ath11k_dp_rx_link_desc_return(ar->ab, rx_link_buf_info, 4870 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 4871 } 4872 4873 p_last_buf_addr_info = p_buf_addr_info; 4874 4875 } while (paddr && msdu_cnt); 4876 4877 if (last) 4878 last->next = NULL; 4879 4880 *tail_msdu = msdu; 4881 4882 if (msdu_cnt == 0) 4883 *npackets = 1; 4884 4885 return rx_bufs_used; 4886 } 4887 4888 static void ath11k_dp_rx_msdus_set_payload(struct ath11k *ar, struct sk_buff *msdu) 4889 { 4890 u32 rx_pkt_offset, l2_hdr_offset; 4891 4892 rx_pkt_offset = ar->ab->hw_params.hal_desc_sz; 4893 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, 4894 (struct hal_rx_desc *)msdu->data); 4895 skb_pull(msdu, rx_pkt_offset + l2_hdr_offset); 4896 } 4897 4898 static struct sk_buff * 4899 ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, 4900 u32 mac_id, struct sk_buff *head_msdu, 4901 struct sk_buff *last_msdu, 4902 struct ieee80211_rx_status *rxs, bool *fcs_err) 4903 { 4904 struct ath11k_base *ab = ar->ab; 4905 struct sk_buff *msdu, *prev_buf; 4906 struct hal_rx_desc *rx_desc; 4907 char *hdr_desc; 4908 u8 *dest, decap_format; 4909 struct ieee80211_hdr_3addr *wh; 4910 struct rx_attention *rx_attention; 4911 u32 err_bitmap; 4912 4913 if (!head_msdu) 4914 goto err_merge_fail; 4915 4916 rx_desc = (struct hal_rx_desc *)head_msdu->data; 4917 rx_attention = ath11k_dp_rx_get_attention(ab, rx_desc); 4918 err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention); 4919 4920 if (err_bitmap & DP_RX_MPDU_ERR_FCS) 4921 *fcs_err = true; 4922 4923 if (ath11k_dp_rxdesc_get_mpdulen_err(rx_attention)) 4924 return NULL; 4925 4926 decap_format = ath11k_dp_rx_h_msdu_start_decap_type(ab, rx_desc); 4927 4928 ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs); 4929 4930 if (decap_format == DP_RX_DECAP_TYPE_RAW) { 4931 ath11k_dp_rx_msdus_set_payload(ar, head_msdu); 4932 4933 prev_buf = head_msdu; 4934 msdu = head_msdu->next; 4935 4936 while (msdu) { 4937 ath11k_dp_rx_msdus_set_payload(ar, msdu); 4938 4939 prev_buf = msdu; 4940 msdu = msdu->next; 4941 } 4942 4943 prev_buf->next = NULL; 4944 4945 skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN); 4946 } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) { 4947 u8 qos_pkt = 0; 4948 4949 rx_desc = (struct hal_rx_desc *)head_msdu->data; 4950 hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc); 4951 4952 /* Base size */ 4953 wh = (struct ieee80211_hdr_3addr *)hdr_desc; 4954 4955 if (ieee80211_is_data_qos(wh->frame_control)) 4956 qos_pkt = 1; 4957 4958 msdu = head_msdu; 4959 4960 while (msdu) { 4961 ath11k_dp_rx_msdus_set_payload(ar, msdu); 4962 if (qos_pkt) { 4963 dest = skb_push(msdu, sizeof(__le16)); 4964 if (!dest) 4965 goto err_merge_fail; 4966 memcpy(dest, hdr_desc, sizeof(struct ieee80211_qos_hdr)); 4967 } 4968 prev_buf = msdu; 4969 msdu = msdu->next; 4970 } 4971 dest = skb_put(prev_buf, HAL_RX_FCS_LEN); 4972 if (!dest) 4973 goto err_merge_fail; 4974 4975 ath11k_dbg(ab, ATH11K_DBG_DATA, 4976 "mpdu_buf %p mpdu_buf->len %u", 4977 prev_buf, prev_buf->len); 4978 } else { 4979 ath11k_dbg(ab, ATH11K_DBG_DATA, 4980 "decap format %d is not supported!\n", 4981 decap_format); 4982 goto err_merge_fail; 4983 } 4984 4985 return head_msdu; 4986 4987 err_merge_fail: 4988 return NULL; 4989 } 4990 4991 static void 4992 ath11k_dp_rx_update_radiotap_he(struct hal_rx_mon_ppdu_info *rx_status, 4993 u8 *rtap_buf) 4994 { 4995 u32 rtap_len = 0; 4996 4997 put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]); 4998 rtap_len += 2; 4999 5000 put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]); 5001 rtap_len += 2; 5002 5003 put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]); 5004 rtap_len += 2; 5005 5006 put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]); 5007 rtap_len += 2; 5008 5009 put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]); 5010 rtap_len += 2; 5011 5012 put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]); 5013 } 5014 5015 static void 5016 ath11k_dp_rx_update_radiotap_he_mu(struct hal_rx_mon_ppdu_info *rx_status, 5017 u8 *rtap_buf) 5018 { 5019 u32 rtap_len = 0; 5020 5021 put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]); 5022 rtap_len += 2; 5023 5024 put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]); 5025 rtap_len += 2; 5026 5027 rtap_buf[rtap_len] = rx_status->he_RU[0]; 5028 rtap_len += 1; 5029 5030 rtap_buf[rtap_len] = rx_status->he_RU[1]; 5031 rtap_len += 1; 5032 5033 rtap_buf[rtap_len] = rx_status->he_RU[2]; 5034 rtap_len += 1; 5035 5036 rtap_buf[rtap_len] = rx_status->he_RU[3]; 5037 } 5038 5039 static void ath11k_update_radiotap(struct ath11k *ar, 5040 struct hal_rx_mon_ppdu_info *ppduinfo, 5041 struct sk_buff *mon_skb, 5042 struct ieee80211_rx_status *rxs) 5043 { 5044 struct ieee80211_supported_band *sband; 5045 u8 *ptr = NULL; 5046 5047 rxs->flag |= RX_FLAG_MACTIME_START; 5048 rxs->signal = ppduinfo->rssi_comb + ATH11K_DEFAULT_NOISE_FLOOR; 5049 5050 if (ppduinfo->nss) 5051 rxs->nss = ppduinfo->nss; 5052 5053 if (ppduinfo->he_mu_flags) { 5054 rxs->flag |= RX_FLAG_RADIOTAP_HE_MU; 5055 rxs->encoding = RX_ENC_HE; 5056 ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu)); 5057 ath11k_dp_rx_update_radiotap_he_mu(ppduinfo, ptr); 5058 } else if (ppduinfo->he_flags) { 5059 rxs->flag |= RX_FLAG_RADIOTAP_HE; 5060 rxs->encoding = RX_ENC_HE; 5061 ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he)); 5062 ath11k_dp_rx_update_radiotap_he(ppduinfo, ptr); 5063 rxs->rate_idx = ppduinfo->rate; 5064 } else if (ppduinfo->vht_flags) { 5065 rxs->encoding = RX_ENC_VHT; 5066 rxs->rate_idx = ppduinfo->rate; 5067 } else if (ppduinfo->ht_flags) { 5068 rxs->encoding = RX_ENC_HT; 5069 rxs->rate_idx = ppduinfo->rate; 5070 } else { 5071 rxs->encoding = RX_ENC_LEGACY; 5072 sband = &ar->mac.sbands[rxs->band]; 5073 rxs->rate_idx = ath11k_mac_hw_rate_to_idx(sband, ppduinfo->rate, 5074 ppduinfo->cck_flag); 5075 } 5076 5077 rxs->mactime = ppduinfo->tsft; 5078 } 5079 5080 static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id, 5081 struct sk_buff *head_msdu, 5082 struct hal_rx_mon_ppdu_info *ppduinfo, 5083 struct sk_buff *tail_msdu, 5084 struct napi_struct *napi) 5085 { 5086 struct ath11k_pdev_dp *dp = &ar->dp; 5087 struct sk_buff *mon_skb, *skb_next, *header; 5088 struct ieee80211_rx_status *rxs = &dp->rx_status; 5089 bool fcs_err = false; 5090 5091 mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu, 5092 tail_msdu, rxs, &fcs_err); 5093 5094 if (!mon_skb) 5095 goto mon_deliver_fail; 5096 5097 header = mon_skb; 5098 5099 rxs->flag = 0; 5100 5101 if (fcs_err) 5102 rxs->flag = RX_FLAG_FAILED_FCS_CRC; 5103 5104 do { 5105 skb_next = mon_skb->next; 5106 if (!skb_next) 5107 rxs->flag &= ~RX_FLAG_AMSDU_MORE; 5108 else 5109 rxs->flag |= RX_FLAG_AMSDU_MORE; 5110 5111 if (mon_skb == header) { 5112 header = NULL; 5113 rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN; 5114 } else { 5115 rxs->flag |= RX_FLAG_ALLOW_SAME_PN; 5116 } 5117 rxs->flag |= RX_FLAG_ONLY_MONITOR; 5118 ath11k_update_radiotap(ar, ppduinfo, mon_skb, rxs); 5119 5120 ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb, rxs); 5121 mon_skb = skb_next; 5122 } while (mon_skb); 5123 rxs->flag = 0; 5124 5125 return 0; 5126 5127 mon_deliver_fail: 5128 mon_skb = head_msdu; 5129 while (mon_skb) { 5130 skb_next = mon_skb->next; 5131 dev_kfree_skb_any(mon_skb); 5132 mon_skb = skb_next; 5133 } 5134 return -EINVAL; 5135 } 5136 5137 /* The destination ring processing is stuck if the destination is not 5138 * moving while status ring moves 16 PPDU. The destination ring processing 5139 * skips this destination ring PPDU as a workaround. 5140 */ 5141 #define MON_DEST_RING_STUCK_MAX_CNT 16 5142 5143 static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id, 5144 u32 quota, struct napi_struct *napi) 5145 { 5146 struct ath11k_pdev_dp *dp = &ar->dp; 5147 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 5148 const struct ath11k_hw_hal_params *hal_params; 5149 void *ring_entry; 5150 void *mon_dst_srng; 5151 u32 ppdu_id; 5152 u32 rx_bufs_used; 5153 u32 ring_id; 5154 struct ath11k_pdev_mon_stats *rx_mon_stats; 5155 u32 npackets = 0; 5156 u32 mpdu_rx_bufs_used; 5157 5158 if (ar->ab->hw_params.rxdma1_enable) 5159 ring_id = dp->rxdma_mon_dst_ring.ring_id; 5160 else 5161 ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id; 5162 5163 mon_dst_srng = &ar->ab->hal.srng_list[ring_id]; 5164 5165 spin_lock_bh(&pmon->mon_lock); 5166 5167 ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng); 5168 5169 ppdu_id = pmon->mon_ppdu_info.ppdu_id; 5170 rx_bufs_used = 0; 5171 rx_mon_stats = &pmon->rx_mon_stats; 5172 5173 while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) { 5174 struct sk_buff *head_msdu, *tail_msdu; 5175 5176 head_msdu = NULL; 5177 tail_msdu = NULL; 5178 5179 mpdu_rx_bufs_used = ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry, 5180 &head_msdu, 5181 &tail_msdu, 5182 &npackets, &ppdu_id); 5183 5184 rx_bufs_used += mpdu_rx_bufs_used; 5185 5186 if (mpdu_rx_bufs_used) { 5187 dp->mon_dest_ring_stuck_cnt = 0; 5188 } else { 5189 dp->mon_dest_ring_stuck_cnt++; 5190 rx_mon_stats->dest_mon_not_reaped++; 5191 } 5192 5193 if (dp->mon_dest_ring_stuck_cnt > MON_DEST_RING_STUCK_MAX_CNT) { 5194 rx_mon_stats->dest_mon_stuck++; 5195 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 5196 "status ring ppdu_id=%d dest ring ppdu_id=%d mon_dest_ring_stuck_cnt=%d dest_mon_not_reaped=%u dest_mon_stuck=%u\n", 5197 pmon->mon_ppdu_info.ppdu_id, ppdu_id, 5198 dp->mon_dest_ring_stuck_cnt, 5199 rx_mon_stats->dest_mon_not_reaped, 5200 rx_mon_stats->dest_mon_stuck); 5201 pmon->mon_ppdu_info.ppdu_id = ppdu_id; 5202 continue; 5203 } 5204 5205 if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) { 5206 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 5207 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 5208 "dest_rx: new ppdu_id %x != status ppdu_id %x dest_mon_not_reaped = %u dest_mon_stuck = %u\n", 5209 ppdu_id, pmon->mon_ppdu_info.ppdu_id, 5210 rx_mon_stats->dest_mon_not_reaped, 5211 rx_mon_stats->dest_mon_stuck); 5212 break; 5213 } 5214 if (head_msdu && tail_msdu) { 5215 ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu, 5216 &pmon->mon_ppdu_info, 5217 tail_msdu, napi); 5218 rx_mon_stats->dest_mpdu_done++; 5219 } 5220 5221 ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab, 5222 mon_dst_srng); 5223 } 5224 ath11k_hal_srng_access_end(ar->ab, mon_dst_srng); 5225 5226 spin_unlock_bh(&pmon->mon_lock); 5227 5228 if (rx_bufs_used) { 5229 rx_mon_stats->dest_ppdu_done++; 5230 hal_params = ar->ab->hw_params.hal_params; 5231 5232 if (ar->ab->hw_params.rxdma1_enable) 5233 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, 5234 &dp->rxdma_mon_buf_ring, 5235 rx_bufs_used, 5236 hal_params->rx_buf_rbm); 5237 else 5238 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, 5239 &dp->rx_refill_buf_ring, 5240 rx_bufs_used, 5241 hal_params->rx_buf_rbm); 5242 } 5243 } 5244 5245 int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id, 5246 struct napi_struct *napi, int budget) 5247 { 5248 struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id); 5249 enum hal_rx_mon_status hal_status; 5250 struct sk_buff *skb; 5251 struct sk_buff_head skb_list; 5252 struct ath11k_peer *peer; 5253 struct ath11k_sta *arsta; 5254 int num_buffs_reaped = 0; 5255 u32 rx_buf_sz; 5256 u16 log_type; 5257 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&ar->dp.mon_data; 5258 struct ath11k_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats; 5259 struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info; 5260 5261 __skb_queue_head_init(&skb_list); 5262 5263 num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget, 5264 &skb_list); 5265 if (!num_buffs_reaped) 5266 goto exit; 5267 5268 memset(ppdu_info, 0, sizeof(*ppdu_info)); 5269 ppdu_info->peer_id = HAL_INVALID_PEERID; 5270 5271 while ((skb = __skb_dequeue(&skb_list))) { 5272 if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) { 5273 log_type = ATH11K_PKTLOG_TYPE_LITE_RX; 5274 rx_buf_sz = DP_RX_BUFFER_SIZE_LITE; 5275 } else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) { 5276 log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF; 5277 rx_buf_sz = DP_RX_BUFFER_SIZE; 5278 } else { 5279 log_type = ATH11K_PKTLOG_TYPE_INVALID; 5280 rx_buf_sz = 0; 5281 } 5282 5283 if (log_type != ATH11K_PKTLOG_TYPE_INVALID) 5284 trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz); 5285 5286 memset(ppdu_info, 0, sizeof(*ppdu_info)); 5287 ppdu_info->peer_id = HAL_INVALID_PEERID; 5288 hal_status = ath11k_hal_rx_parse_mon_status(ab, ppdu_info, skb); 5289 5290 if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) && 5291 pmon->mon_ppdu_status == DP_PPDU_STATUS_START && 5292 hal_status == HAL_TLV_STATUS_PPDU_DONE) { 5293 rx_mon_stats->status_ppdu_done++; 5294 pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE; 5295 ath11k_dp_rx_mon_dest_process(ar, mac_id, budget, napi); 5296 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 5297 } 5298 5299 if (ppdu_info->peer_id == HAL_INVALID_PEERID || 5300 hal_status != HAL_RX_MON_STATUS_PPDU_DONE) { 5301 dev_kfree_skb_any(skb); 5302 continue; 5303 } 5304 5305 rcu_read_lock(); 5306 spin_lock_bh(&ab->base_lock); 5307 peer = ath11k_peer_find_by_id(ab, ppdu_info->peer_id); 5308 5309 if (!peer || !peer->sta) { 5310 ath11k_dbg(ab, ATH11K_DBG_DATA, 5311 "failed to find the peer with peer_id %d\n", 5312 ppdu_info->peer_id); 5313 goto next_skb; 5314 } 5315 5316 arsta = ath11k_sta_to_arsta(peer->sta); 5317 ath11k_dp_rx_update_peer_stats(arsta, ppdu_info); 5318 5319 if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr)) 5320 trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz); 5321 5322 next_skb: 5323 spin_unlock_bh(&ab->base_lock); 5324 rcu_read_unlock(); 5325 5326 dev_kfree_skb_any(skb); 5327 memset(ppdu_info, 0, sizeof(*ppdu_info)); 5328 ppdu_info->peer_id = HAL_INVALID_PEERID; 5329 } 5330 exit: 5331 return num_buffs_reaped; 5332 } 5333 5334 static u32 5335 ath11k_dp_rx_full_mon_mpdu_pop(struct ath11k *ar, 5336 void *ring_entry, struct sk_buff **head_msdu, 5337 struct sk_buff **tail_msdu, 5338 struct hal_sw_mon_ring_entries *sw_mon_entries) 5339 { 5340 struct ath11k_pdev_dp *dp = &ar->dp; 5341 struct ath11k_mon_data *pmon = &dp->mon_data; 5342 struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring; 5343 struct sk_buff *msdu = NULL, *last = NULL; 5344 struct hal_sw_monitor_ring *sw_desc = ring_entry; 5345 struct hal_rx_msdu_list msdu_list; 5346 struct hal_rx_desc *rx_desc; 5347 struct ath11k_skb_rxcb *rxcb; 5348 void *rx_msdu_link_desc; 5349 void *p_buf_addr_info, *p_last_buf_addr_info; 5350 int buf_id, i = 0; 5351 u32 rx_buf_size, rx_pkt_offset, l2_hdr_offset; 5352 u32 rx_bufs_used = 0, msdu_cnt = 0; 5353 u32 total_len = 0, frag_len = 0, sw_cookie; 5354 u16 num_msdus = 0; 5355 u8 rxdma_err, rbm; 5356 bool is_frag, is_first_msdu; 5357 bool drop_mpdu = false; 5358 5359 ath11k_hal_rx_sw_mon_ring_buf_paddr_get(ring_entry, sw_mon_entries); 5360 5361 sw_cookie = sw_mon_entries->mon_dst_sw_cookie; 5362 sw_mon_entries->end_of_ppdu = false; 5363 sw_mon_entries->drop_ppdu = false; 5364 p_last_buf_addr_info = sw_mon_entries->dst_buf_addr_info; 5365 msdu_cnt = sw_mon_entries->msdu_cnt; 5366 5367 sw_mon_entries->end_of_ppdu = 5368 FIELD_GET(HAL_SW_MON_RING_INFO0_END_OF_PPDU, sw_desc->info0); 5369 if (sw_mon_entries->end_of_ppdu) 5370 return rx_bufs_used; 5371 5372 if (FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_PUSH_REASON, 5373 sw_desc->info0) == 5374 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { 5375 rxdma_err = 5376 FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_ERROR_CODE, 5377 sw_desc->info0); 5378 if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR || 5379 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR || 5380 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) { 5381 pmon->rx_mon_stats.dest_mpdu_drop++; 5382 drop_mpdu = true; 5383 } 5384 } 5385 5386 is_frag = false; 5387 is_first_msdu = true; 5388 5389 do { 5390 rx_msdu_link_desc = 5391 (u8 *)pmon->link_desc_banks[sw_cookie].vaddr + 5392 (sw_mon_entries->mon_dst_paddr - 5393 pmon->link_desc_banks[sw_cookie].paddr); 5394 5395 ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list, 5396 &num_msdus); 5397 5398 for (i = 0; i < num_msdus; i++) { 5399 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 5400 msdu_list.sw_cookie[i]); 5401 5402 spin_lock_bh(&rx_ring->idr_lock); 5403 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 5404 if (!msdu) { 5405 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 5406 "full mon msdu_pop: invalid buf_id %d\n", 5407 buf_id); 5408 spin_unlock_bh(&rx_ring->idr_lock); 5409 break; 5410 } 5411 idr_remove(&rx_ring->bufs_idr, buf_id); 5412 spin_unlock_bh(&rx_ring->idr_lock); 5413 5414 rxcb = ATH11K_SKB_RXCB(msdu); 5415 if (!rxcb->unmapped) { 5416 dma_unmap_single(ar->ab->dev, rxcb->paddr, 5417 msdu->len + 5418 skb_tailroom(msdu), 5419 DMA_FROM_DEVICE); 5420 rxcb->unmapped = 1; 5421 } 5422 if (drop_mpdu) { 5423 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 5424 "full mon: i %d drop msdu %p *ppdu_id %x\n", 5425 i, msdu, sw_mon_entries->ppdu_id); 5426 dev_kfree_skb_any(msdu); 5427 msdu_cnt--; 5428 goto next_msdu; 5429 } 5430 5431 rx_desc = (struct hal_rx_desc *)msdu->data; 5432 5433 rx_pkt_offset = sizeof(struct hal_rx_desc); 5434 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc); 5435 5436 if (is_first_msdu) { 5437 if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) { 5438 drop_mpdu = true; 5439 dev_kfree_skb_any(msdu); 5440 msdu = NULL; 5441 goto next_msdu; 5442 } 5443 is_first_msdu = false; 5444 } 5445 5446 ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i], 5447 &is_frag, &total_len, 5448 &frag_len, &msdu_cnt); 5449 5450 rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len; 5451 5452 ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size); 5453 5454 if (!(*head_msdu)) 5455 *head_msdu = msdu; 5456 else if (last) 5457 last->next = msdu; 5458 5459 last = msdu; 5460 next_msdu: 5461 rx_bufs_used++; 5462 } 5463 5464 ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, 5465 &sw_mon_entries->mon_dst_paddr, 5466 &sw_mon_entries->mon_dst_sw_cookie, 5467 &rbm, 5468 &p_buf_addr_info); 5469 5470 if (ath11k_dp_rx_monitor_link_desc_return(ar, 5471 p_last_buf_addr_info, 5472 dp->mac_id)) 5473 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 5474 "full mon: dp_rx_monitor_link_desc_return failed\n"); 5475 5476 p_last_buf_addr_info = p_buf_addr_info; 5477 5478 } while (sw_mon_entries->mon_dst_paddr && msdu_cnt); 5479 5480 if (last) 5481 last->next = NULL; 5482 5483 *tail_msdu = msdu; 5484 5485 return rx_bufs_used; 5486 } 5487 5488 static int ath11k_dp_rx_full_mon_prepare_mpdu(struct ath11k_dp *dp, 5489 struct dp_full_mon_mpdu *mon_mpdu, 5490 struct sk_buff *head, 5491 struct sk_buff *tail) 5492 { 5493 mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC); 5494 if (!mon_mpdu) 5495 return -ENOMEM; 5496 5497 list_add_tail(&mon_mpdu->list, &dp->dp_full_mon_mpdu_list); 5498 mon_mpdu->head = head; 5499 mon_mpdu->tail = tail; 5500 5501 return 0; 5502 } 5503 5504 static void ath11k_dp_rx_full_mon_drop_ppdu(struct ath11k_dp *dp, 5505 struct dp_full_mon_mpdu *mon_mpdu) 5506 { 5507 struct dp_full_mon_mpdu *tmp; 5508 struct sk_buff *tmp_msdu, *skb_next; 5509 5510 if (list_empty(&dp->dp_full_mon_mpdu_list)) 5511 return; 5512 5513 list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) { 5514 list_del(&mon_mpdu->list); 5515 5516 tmp_msdu = mon_mpdu->head; 5517 while (tmp_msdu) { 5518 skb_next = tmp_msdu->next; 5519 dev_kfree_skb_any(tmp_msdu); 5520 tmp_msdu = skb_next; 5521 } 5522 5523 kfree(mon_mpdu); 5524 } 5525 } 5526 5527 static int ath11k_dp_rx_full_mon_deliver_ppdu(struct ath11k *ar, 5528 int mac_id, 5529 struct ath11k_mon_data *pmon, 5530 struct napi_struct *napi) 5531 { 5532 struct ath11k_pdev_mon_stats *rx_mon_stats; 5533 struct dp_full_mon_mpdu *tmp; 5534 struct dp_full_mon_mpdu *mon_mpdu = pmon->mon_mpdu; 5535 struct sk_buff *head_msdu, *tail_msdu; 5536 struct ath11k_base *ab = ar->ab; 5537 struct ath11k_dp *dp = &ab->dp; 5538 int ret; 5539 5540 rx_mon_stats = &pmon->rx_mon_stats; 5541 5542 list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) { 5543 list_del(&mon_mpdu->list); 5544 head_msdu = mon_mpdu->head; 5545 tail_msdu = mon_mpdu->tail; 5546 if (head_msdu && tail_msdu) { 5547 ret = ath11k_dp_rx_mon_deliver(ar, mac_id, head_msdu, 5548 &pmon->mon_ppdu_info, 5549 tail_msdu, napi); 5550 rx_mon_stats->dest_mpdu_done++; 5551 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "full mon: deliver ppdu\n"); 5552 } 5553 kfree(mon_mpdu); 5554 } 5555 5556 return ret; 5557 } 5558 5559 static int 5560 ath11k_dp_rx_process_full_mon_status_ring(struct ath11k_base *ab, int mac_id, 5561 struct napi_struct *napi, int budget) 5562 { 5563 struct ath11k *ar = ab->pdevs[mac_id].ar; 5564 struct ath11k_pdev_dp *dp = &ar->dp; 5565 struct ath11k_mon_data *pmon = &dp->mon_data; 5566 struct hal_sw_mon_ring_entries *sw_mon_entries; 5567 int quota = 0, work = 0, count; 5568 5569 sw_mon_entries = &pmon->sw_mon_entries; 5570 5571 while (pmon->hold_mon_dst_ring) { 5572 quota = ath11k_dp_rx_process_mon_status(ab, mac_id, 5573 napi, 1); 5574 if (pmon->buf_state == DP_MON_STATUS_MATCH) { 5575 count = sw_mon_entries->status_buf_count; 5576 if (count > 1) { 5577 quota += ath11k_dp_rx_process_mon_status(ab, mac_id, 5578 napi, count); 5579 } 5580 5581 ath11k_dp_rx_full_mon_deliver_ppdu(ar, dp->mac_id, 5582 pmon, napi); 5583 pmon->hold_mon_dst_ring = false; 5584 } else if (!pmon->mon_status_paddr || 5585 pmon->buf_state == DP_MON_STATUS_LEAD) { 5586 sw_mon_entries->drop_ppdu = true; 5587 pmon->hold_mon_dst_ring = false; 5588 } 5589 5590 if (!quota) 5591 break; 5592 5593 work += quota; 5594 } 5595 5596 if (sw_mon_entries->drop_ppdu) 5597 ath11k_dp_rx_full_mon_drop_ppdu(&ab->dp, pmon->mon_mpdu); 5598 5599 return work; 5600 } 5601 5602 static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id, 5603 struct napi_struct *napi, int budget) 5604 { 5605 struct ath11k *ar = ab->pdevs[mac_id].ar; 5606 struct ath11k_pdev_dp *dp = &ar->dp; 5607 struct ath11k_mon_data *pmon = &dp->mon_data; 5608 struct hal_sw_mon_ring_entries *sw_mon_entries; 5609 struct ath11k_pdev_mon_stats *rx_mon_stats; 5610 struct sk_buff *head_msdu, *tail_msdu; 5611 void *mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id]; 5612 void *ring_entry; 5613 u32 rx_bufs_used = 0, mpdu_rx_bufs_used; 5614 int quota = 0, ret; 5615 bool break_dst_ring = false; 5616 5617 spin_lock_bh(&pmon->mon_lock); 5618 5619 sw_mon_entries = &pmon->sw_mon_entries; 5620 rx_mon_stats = &pmon->rx_mon_stats; 5621 5622 if (pmon->hold_mon_dst_ring) { 5623 spin_unlock_bh(&pmon->mon_lock); 5624 goto reap_status_ring; 5625 } 5626 5627 ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng); 5628 while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) { 5629 head_msdu = NULL; 5630 tail_msdu = NULL; 5631 5632 mpdu_rx_bufs_used = ath11k_dp_rx_full_mon_mpdu_pop(ar, ring_entry, 5633 &head_msdu, 5634 &tail_msdu, 5635 sw_mon_entries); 5636 rx_bufs_used += mpdu_rx_bufs_used; 5637 5638 if (!sw_mon_entries->end_of_ppdu) { 5639 if (head_msdu) { 5640 ret = ath11k_dp_rx_full_mon_prepare_mpdu(&ab->dp, 5641 pmon->mon_mpdu, 5642 head_msdu, 5643 tail_msdu); 5644 if (ret) 5645 break_dst_ring = true; 5646 } 5647 5648 goto next_entry; 5649 } else { 5650 if (!sw_mon_entries->ppdu_id && 5651 !sw_mon_entries->mon_status_paddr) { 5652 break_dst_ring = true; 5653 goto next_entry; 5654 } 5655 } 5656 5657 rx_mon_stats->dest_ppdu_done++; 5658 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 5659 pmon->buf_state = DP_MON_STATUS_LAG; 5660 pmon->mon_status_paddr = sw_mon_entries->mon_status_paddr; 5661 pmon->hold_mon_dst_ring = true; 5662 next_entry: 5663 ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab, 5664 mon_dst_srng); 5665 if (break_dst_ring) 5666 break; 5667 } 5668 5669 ath11k_hal_srng_access_end(ar->ab, mon_dst_srng); 5670 spin_unlock_bh(&pmon->mon_lock); 5671 5672 if (rx_bufs_used) { 5673 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, 5674 &dp->rxdma_mon_buf_ring, 5675 rx_bufs_used, 5676 HAL_RX_BUF_RBM_SW3_BM); 5677 } 5678 5679 reap_status_ring: 5680 quota = ath11k_dp_rx_process_full_mon_status_ring(ab, mac_id, 5681 napi, budget); 5682 5683 return quota; 5684 } 5685 5686 int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id, 5687 struct napi_struct *napi, int budget) 5688 { 5689 struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id); 5690 int ret = 0; 5691 5692 if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) && 5693 ab->hw_params.full_monitor_mode) 5694 ret = ath11k_dp_full_mon_process_rx(ab, mac_id, napi, budget); 5695 else 5696 ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget); 5697 5698 return ret; 5699 } 5700 5701 static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar) 5702 { 5703 struct ath11k_pdev_dp *dp = &ar->dp; 5704 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 5705 5706 skb_queue_head_init(&pmon->rx_status_q); 5707 5708 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 5709 5710 memset(&pmon->rx_mon_stats, 0, 5711 sizeof(pmon->rx_mon_stats)); 5712 return 0; 5713 } 5714 5715 int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar) 5716 { 5717 struct ath11k_pdev_dp *dp = &ar->dp; 5718 struct ath11k_mon_data *pmon = &dp->mon_data; 5719 struct hal_srng *mon_desc_srng = NULL; 5720 struct dp_srng *dp_srng; 5721 int ret = 0; 5722 u32 n_link_desc = 0; 5723 5724 ret = ath11k_dp_rx_pdev_mon_status_attach(ar); 5725 if (ret) { 5726 ath11k_warn(ar->ab, "pdev_mon_status_attach() failed"); 5727 return ret; 5728 } 5729 5730 /* if rxdma1_enable is false, no need to setup 5731 * rxdma_mon_desc_ring. 5732 */ 5733 if (!ar->ab->hw_params.rxdma1_enable) 5734 return 0; 5735 5736 dp_srng = &dp->rxdma_mon_desc_ring; 5737 n_link_desc = dp_srng->size / 5738 ath11k_hal_srng_get_entrysize(ar->ab, HAL_RXDMA_MONITOR_DESC); 5739 mon_desc_srng = 5740 &ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id]; 5741 5742 ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks, 5743 HAL_RXDMA_MONITOR_DESC, mon_desc_srng, 5744 n_link_desc); 5745 if (ret) { 5746 ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed"); 5747 return ret; 5748 } 5749 pmon->mon_last_linkdesc_paddr = 0; 5750 pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1; 5751 spin_lock_init(&pmon->mon_lock); 5752 5753 return 0; 5754 } 5755 5756 static int ath11k_dp_mon_link_free(struct ath11k *ar) 5757 { 5758 struct ath11k_pdev_dp *dp = &ar->dp; 5759 struct ath11k_mon_data *pmon = &dp->mon_data; 5760 5761 ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks, 5762 HAL_RXDMA_MONITOR_DESC, 5763 &dp->rxdma_mon_desc_ring); 5764 return 0; 5765 } 5766 5767 int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar) 5768 { 5769 ath11k_dp_mon_link_free(ar); 5770 return 0; 5771 } 5772 5773 int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab) 5774 { 5775 /* start reap timer */ 5776 mod_timer(&ab->mon_reap_timer, 5777 jiffies + msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL)); 5778 5779 return 0; 5780 } 5781 5782 int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer) 5783 { 5784 int ret; 5785 5786 if (stop_timer) 5787 del_timer_sync(&ab->mon_reap_timer); 5788 5789 /* reap all the monitor related rings */ 5790 ret = ath11k_dp_purge_mon_ring(ab); 5791 if (ret) { 5792 ath11k_warn(ab, "failed to purge dp mon ring: %d\n", ret); 5793 return ret; 5794 } 5795 5796 return 0; 5797 } 5798