1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/ieee80211.h> 7 #include <linux/kernel.h> 8 #include <linux/skbuff.h> 9 #include <crypto/hash.h> 10 #include "core.h" 11 #include "debug.h" 12 #include "debugfs_htt_stats.h" 13 #include "debugfs_sta.h" 14 #include "hal_desc.h" 15 #include "hw.h" 16 #include "dp_rx.h" 17 #include "hal_rx.h" 18 #include "dp_tx.h" 19 #include "peer.h" 20 21 #define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ) 22 23 static inline 24 u8 *ath11k_dp_rx_h_80211_hdr(struct ath11k_base *ab, struct hal_rx_desc *desc) 25 { 26 return ab->hw_params.hw_ops->rx_desc_get_hdr_status(desc); 27 } 28 29 static inline 30 enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base *ab, 31 struct hal_rx_desc *desc) 32 { 33 if (!ab->hw_params.hw_ops->rx_desc_encrypt_valid(desc)) 34 return HAL_ENCRYPT_TYPE_OPEN; 35 36 return ab->hw_params.hw_ops->rx_desc_get_encrypt_type(desc); 37 } 38 39 static inline u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab, 40 struct hal_rx_desc *desc) 41 { 42 return ab->hw_params.hw_ops->rx_desc_get_decap_type(desc); 43 } 44 45 static inline 46 u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab, 47 struct hal_rx_desc *desc) 48 { 49 return ab->hw_params.hw_ops->rx_desc_get_mesh_ctl(desc); 50 } 51 52 static inline 53 bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base *ab, 54 struct hal_rx_desc *desc) 55 { 56 return ab->hw_params.hw_ops->rx_desc_get_mpdu_seq_ctl_vld(desc); 57 } 58 59 static inline bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base *ab, 60 struct hal_rx_desc *desc) 61 { 62 return ab->hw_params.hw_ops->rx_desc_get_mpdu_fc_valid(desc); 63 } 64 65 static inline bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab, 66 struct sk_buff *skb) 67 { 68 struct ieee80211_hdr *hdr; 69 70 hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz); 71 return ieee80211_has_morefrags(hdr->frame_control); 72 } 73 74 static inline u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab, 75 struct sk_buff *skb) 76 { 77 struct ieee80211_hdr *hdr; 78 79 hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz); 80 return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; 81 } 82 83 static inline u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base *ab, 84 struct hal_rx_desc *desc) 85 { 86 return ab->hw_params.hw_ops->rx_desc_get_mpdu_start_seq_no(desc); 87 } 88 89 static inline void *ath11k_dp_rx_get_attention(struct ath11k_base *ab, 90 struct hal_rx_desc *desc) 91 { 92 return ab->hw_params.hw_ops->rx_desc_get_attention(desc); 93 } 94 95 static inline bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention *attn) 96 { 97 return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE, 98 __le32_to_cpu(attn->info2)); 99 } 100 101 static inline bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention *attn) 102 { 103 return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL, 104 __le32_to_cpu(attn->info1)); 105 } 106 107 static inline bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention *attn) 108 { 109 return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL, 110 __le32_to_cpu(attn->info1)); 111 } 112 113 static inline bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention *attn) 114 { 115 return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE, 116 __le32_to_cpu(attn->info2)) == 117 RX_DESC_DECRYPT_STATUS_CODE_OK); 118 } 119 120 static u32 ath11k_dp_rx_h_attn_mpdu_err(struct rx_attention *attn) 121 { 122 u32 info = __le32_to_cpu(attn->info1); 123 u32 errmap = 0; 124 125 if (info & RX_ATTENTION_INFO1_FCS_ERR) 126 errmap |= DP_RX_MPDU_ERR_FCS; 127 128 if (info & RX_ATTENTION_INFO1_DECRYPT_ERR) 129 errmap |= DP_RX_MPDU_ERR_DECRYPT; 130 131 if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR) 132 errmap |= DP_RX_MPDU_ERR_TKIP_MIC; 133 134 if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR) 135 errmap |= DP_RX_MPDU_ERR_AMSDU_ERR; 136 137 if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR) 138 errmap |= DP_RX_MPDU_ERR_OVERFLOW; 139 140 if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR) 141 errmap |= DP_RX_MPDU_ERR_MSDU_LEN; 142 143 if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR) 144 errmap |= DP_RX_MPDU_ERR_MPDU_LEN; 145 146 return errmap; 147 } 148 149 static bool ath11k_dp_rx_h_attn_msdu_len_err(struct ath11k_base *ab, 150 struct hal_rx_desc *desc) 151 { 152 struct rx_attention *rx_attention; 153 u32 errmap; 154 155 rx_attention = ath11k_dp_rx_get_attention(ab, desc); 156 errmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention); 157 158 return errmap & DP_RX_MPDU_ERR_MSDU_LEN; 159 } 160 161 static inline u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab, 162 struct hal_rx_desc *desc) 163 { 164 return ab->hw_params.hw_ops->rx_desc_get_msdu_len(desc); 165 } 166 167 static inline u8 ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base *ab, 168 struct hal_rx_desc *desc) 169 { 170 return ab->hw_params.hw_ops->rx_desc_get_msdu_sgi(desc); 171 } 172 173 static inline u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base *ab, 174 struct hal_rx_desc *desc) 175 { 176 return ab->hw_params.hw_ops->rx_desc_get_msdu_rate_mcs(desc); 177 } 178 179 static inline u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base *ab, 180 struct hal_rx_desc *desc) 181 { 182 return ab->hw_params.hw_ops->rx_desc_get_msdu_rx_bw(desc); 183 } 184 185 static inline u32 ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base *ab, 186 struct hal_rx_desc *desc) 187 { 188 return ab->hw_params.hw_ops->rx_desc_get_msdu_freq(desc); 189 } 190 191 static inline u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base *ab, 192 struct hal_rx_desc *desc) 193 { 194 return ab->hw_params.hw_ops->rx_desc_get_msdu_pkt_type(desc); 195 } 196 197 static inline u8 ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base *ab, 198 struct hal_rx_desc *desc) 199 { 200 return hweight8(ab->hw_params.hw_ops->rx_desc_get_msdu_nss(desc)); 201 } 202 203 static inline u8 ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base *ab, 204 struct hal_rx_desc *desc) 205 { 206 return ab->hw_params.hw_ops->rx_desc_get_mpdu_tid(desc); 207 } 208 209 static inline u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base *ab, 210 struct hal_rx_desc *desc) 211 { 212 return ab->hw_params.hw_ops->rx_desc_get_mpdu_peer_id(desc); 213 } 214 215 static inline u8 ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base *ab, 216 struct hal_rx_desc *desc) 217 { 218 return ab->hw_params.hw_ops->rx_desc_get_l3_pad_bytes(desc); 219 } 220 221 static inline bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base *ab, 222 struct hal_rx_desc *desc) 223 { 224 return ab->hw_params.hw_ops->rx_desc_get_first_msdu(desc); 225 } 226 227 static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct ath11k_base *ab, 228 struct hal_rx_desc *desc) 229 { 230 return ab->hw_params.hw_ops->rx_desc_get_last_msdu(desc); 231 } 232 233 static void ath11k_dp_rx_desc_end_tlv_copy(struct ath11k_base *ab, 234 struct hal_rx_desc *fdesc, 235 struct hal_rx_desc *ldesc) 236 { 237 ab->hw_params.hw_ops->rx_desc_copy_attn_end_tlv(fdesc, ldesc); 238 } 239 240 static inline u32 ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention *attn) 241 { 242 return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR, 243 __le32_to_cpu(attn->info1)); 244 } 245 246 static inline u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab, 247 struct hal_rx_desc *rx_desc) 248 { 249 u8 *rx_pkt_hdr; 250 251 rx_pkt_hdr = ab->hw_params.hw_ops->rx_desc_get_msdu_payload(rx_desc); 252 253 return rx_pkt_hdr; 254 } 255 256 static inline bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab, 257 struct hal_rx_desc *rx_desc) 258 { 259 u32 tlv_tag; 260 261 tlv_tag = ab->hw_params.hw_ops->rx_desc_get_mpdu_start_tag(rx_desc); 262 263 return tlv_tag == HAL_RX_MPDU_START; 264 } 265 266 static inline u32 ath11k_dp_rxdesc_get_ppduid(struct ath11k_base *ab, 267 struct hal_rx_desc *rx_desc) 268 { 269 return ab->hw_params.hw_ops->rx_desc_get_mpdu_ppdu_id(rx_desc); 270 } 271 272 static inline void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab, 273 struct hal_rx_desc *desc, 274 u16 len) 275 { 276 ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len); 277 } 278 279 static bool ath11k_dp_rx_h_attn_is_mcbc(struct ath11k_base *ab, 280 struct hal_rx_desc *desc) 281 { 282 struct rx_attention *attn = ath11k_dp_rx_get_attention(ab, desc); 283 284 return ath11k_dp_rx_h_msdu_end_first_msdu(ab, desc) && 285 (!!FIELD_GET(RX_ATTENTION_INFO1_MCAST_BCAST, 286 __le32_to_cpu(attn->info1))); 287 } 288 289 static bool ath11k_dp_rxdesc_mac_addr2_valid(struct ath11k_base *ab, 290 struct hal_rx_desc *desc) 291 { 292 return ab->hw_params.hw_ops->rx_desc_mac_addr2_valid(desc); 293 } 294 295 static u8 *ath11k_dp_rxdesc_mpdu_start_addr2(struct ath11k_base *ab, 296 struct hal_rx_desc *desc) 297 { 298 return ab->hw_params.hw_ops->rx_desc_mpdu_start_addr2(desc); 299 } 300 301 static void ath11k_dp_service_mon_ring(struct timer_list *t) 302 { 303 struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer); 304 int i; 305 306 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) 307 ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET); 308 309 mod_timer(&ab->mon_reap_timer, jiffies + 310 msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL)); 311 } 312 313 static int ath11k_dp_purge_mon_ring(struct ath11k_base *ab) 314 { 315 int i, reaped = 0; 316 unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS); 317 318 do { 319 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) 320 reaped += ath11k_dp_rx_process_mon_rings(ab, i, 321 NULL, 322 DP_MON_SERVICE_BUDGET); 323 324 /* nothing more to reap */ 325 if (reaped < DP_MON_SERVICE_BUDGET) 326 return 0; 327 328 } while (time_before(jiffies, timeout)); 329 330 ath11k_warn(ab, "dp mon ring purge timeout"); 331 332 return -ETIMEDOUT; 333 } 334 335 /* Returns number of Rx buffers replenished */ 336 int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id, 337 struct dp_rxdma_ring *rx_ring, 338 int req_entries, 339 enum hal_rx_buf_return_buf_manager mgr) 340 { 341 struct hal_srng *srng; 342 u32 *desc; 343 struct sk_buff *skb; 344 int num_free; 345 int num_remain; 346 int buf_id; 347 u32 cookie; 348 dma_addr_t paddr; 349 350 req_entries = min(req_entries, rx_ring->bufs_max); 351 352 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 353 354 spin_lock_bh(&srng->lock); 355 356 ath11k_hal_srng_access_begin(ab, srng); 357 358 num_free = ath11k_hal_srng_src_num_free(ab, srng, true); 359 if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4)) 360 req_entries = num_free; 361 362 req_entries = min(num_free, req_entries); 363 num_remain = req_entries; 364 365 while (num_remain > 0) { 366 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + 367 DP_RX_BUFFER_ALIGN_SIZE); 368 if (!skb) 369 break; 370 371 if (!IS_ALIGNED((unsigned long)skb->data, 372 DP_RX_BUFFER_ALIGN_SIZE)) { 373 skb_pull(skb, 374 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - 375 skb->data); 376 } 377 378 paddr = dma_map_single(ab->dev, skb->data, 379 skb->len + skb_tailroom(skb), 380 DMA_FROM_DEVICE); 381 if (dma_mapping_error(ab->dev, paddr)) 382 goto fail_free_skb; 383 384 spin_lock_bh(&rx_ring->idr_lock); 385 buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, 386 rx_ring->bufs_max * 3, GFP_ATOMIC); 387 spin_unlock_bh(&rx_ring->idr_lock); 388 if (buf_id < 0) 389 goto fail_dma_unmap; 390 391 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 392 if (!desc) 393 goto fail_idr_remove; 394 395 ATH11K_SKB_RXCB(skb)->paddr = paddr; 396 397 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 398 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 399 400 num_remain--; 401 402 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); 403 } 404 405 ath11k_hal_srng_access_end(ab, srng); 406 407 spin_unlock_bh(&srng->lock); 408 409 return req_entries - num_remain; 410 411 fail_idr_remove: 412 spin_lock_bh(&rx_ring->idr_lock); 413 idr_remove(&rx_ring->bufs_idr, buf_id); 414 spin_unlock_bh(&rx_ring->idr_lock); 415 fail_dma_unmap: 416 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 417 DMA_FROM_DEVICE); 418 fail_free_skb: 419 dev_kfree_skb_any(skb); 420 421 ath11k_hal_srng_access_end(ab, srng); 422 423 spin_unlock_bh(&srng->lock); 424 425 return req_entries - num_remain; 426 } 427 428 static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar, 429 struct dp_rxdma_ring *rx_ring) 430 { 431 struct ath11k_pdev_dp *dp = &ar->dp; 432 struct sk_buff *skb; 433 int buf_id; 434 435 spin_lock_bh(&rx_ring->idr_lock); 436 idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { 437 idr_remove(&rx_ring->bufs_idr, buf_id); 438 /* TODO: Understand where internal driver does this dma_unmap 439 * of rxdma_buffer. 440 */ 441 dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr, 442 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); 443 dev_kfree_skb_any(skb); 444 } 445 446 idr_destroy(&rx_ring->bufs_idr); 447 spin_unlock_bh(&rx_ring->idr_lock); 448 449 /* if rxdma1_enable is false, mon_status_refill_ring 450 * isn't setup, so don't clean. 451 */ 452 if (!ar->ab->hw_params.rxdma1_enable) 453 return 0; 454 455 rx_ring = &dp->rx_mon_status_refill_ring[0]; 456 457 spin_lock_bh(&rx_ring->idr_lock); 458 idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { 459 idr_remove(&rx_ring->bufs_idr, buf_id); 460 /* XXX: Understand where internal driver does this dma_unmap 461 * of rxdma_buffer. 462 */ 463 dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr, 464 skb->len + skb_tailroom(skb), DMA_BIDIRECTIONAL); 465 dev_kfree_skb_any(skb); 466 } 467 468 idr_destroy(&rx_ring->bufs_idr); 469 spin_unlock_bh(&rx_ring->idr_lock); 470 471 return 0; 472 } 473 474 static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar) 475 { 476 struct ath11k_pdev_dp *dp = &ar->dp; 477 struct ath11k_base *ab = ar->ab; 478 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 479 int i; 480 481 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 482 483 rx_ring = &dp->rxdma_mon_buf_ring; 484 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 485 486 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 487 rx_ring = &dp->rx_mon_status_refill_ring[i]; 488 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 489 } 490 491 return 0; 492 } 493 494 static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar, 495 struct dp_rxdma_ring *rx_ring, 496 u32 ringtype) 497 { 498 struct ath11k_pdev_dp *dp = &ar->dp; 499 int num_entries; 500 501 num_entries = rx_ring->refill_buf_ring.size / 502 ath11k_hal_srng_get_entrysize(ar->ab, ringtype); 503 504 rx_ring->bufs_max = num_entries; 505 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries, 506 ar->ab->hw_params.hal_params->rx_buf_rbm); 507 return 0; 508 } 509 510 static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar) 511 { 512 struct ath11k_pdev_dp *dp = &ar->dp; 513 struct ath11k_base *ab = ar->ab; 514 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 515 int i; 516 517 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF); 518 519 if (ar->ab->hw_params.rxdma1_enable) { 520 rx_ring = &dp->rxdma_mon_buf_ring; 521 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF); 522 } 523 524 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 525 rx_ring = &dp->rx_mon_status_refill_ring[i]; 526 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS); 527 } 528 529 return 0; 530 } 531 532 static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar) 533 { 534 struct ath11k_pdev_dp *dp = &ar->dp; 535 struct ath11k_base *ab = ar->ab; 536 int i; 537 538 ath11k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring); 539 540 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 541 if (ab->hw_params.rx_mac_buf_ring) 542 ath11k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]); 543 544 ath11k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]); 545 ath11k_dp_srng_cleanup(ab, 546 &dp->rx_mon_status_refill_ring[i].refill_buf_ring); 547 } 548 549 ath11k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring); 550 } 551 552 void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab) 553 { 554 struct ath11k_dp *dp = &ab->dp; 555 int i; 556 557 for (i = 0; i < DP_REO_DST_RING_MAX; i++) 558 ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]); 559 } 560 561 int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab) 562 { 563 struct ath11k_dp *dp = &ab->dp; 564 int ret; 565 int i; 566 567 for (i = 0; i < DP_REO_DST_RING_MAX; i++) { 568 ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring[i], 569 HAL_REO_DST, i, 0, 570 DP_REO_DST_RING_SIZE); 571 if (ret) { 572 ath11k_warn(ab, "failed to setup reo_dst_ring\n"); 573 goto err_reo_cleanup; 574 } 575 } 576 577 return 0; 578 579 err_reo_cleanup: 580 ath11k_dp_pdev_reo_cleanup(ab); 581 582 return ret; 583 } 584 585 static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar) 586 { 587 struct ath11k_pdev_dp *dp = &ar->dp; 588 struct ath11k_base *ab = ar->ab; 589 struct dp_srng *srng = NULL; 590 int i; 591 int ret; 592 593 ret = ath11k_dp_srng_setup(ar->ab, 594 &dp->rx_refill_buf_ring.refill_buf_ring, 595 HAL_RXDMA_BUF, 0, 596 dp->mac_id, DP_RXDMA_BUF_RING_SIZE); 597 if (ret) { 598 ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n"); 599 return ret; 600 } 601 602 if (ar->ab->hw_params.rx_mac_buf_ring) { 603 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 604 ret = ath11k_dp_srng_setup(ar->ab, 605 &dp->rx_mac_buf_ring[i], 606 HAL_RXDMA_BUF, 1, 607 dp->mac_id + i, 1024); 608 if (ret) { 609 ath11k_warn(ar->ab, "failed to setup rx_mac_buf_ring %d\n", 610 i); 611 return ret; 612 } 613 } 614 } 615 616 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 617 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring[i], 618 HAL_RXDMA_DST, 0, dp->mac_id + i, 619 DP_RXDMA_ERR_DST_RING_SIZE); 620 if (ret) { 621 ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring %d\n", i); 622 return ret; 623 } 624 } 625 626 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 627 srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring; 628 ret = ath11k_dp_srng_setup(ar->ab, 629 srng, 630 HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id + i, 631 DP_RXDMA_MON_STATUS_RING_SIZE); 632 if (ret) { 633 ath11k_warn(ar->ab, 634 "failed to setup rx_mon_status_refill_ring %d\n", i); 635 return ret; 636 } 637 } 638 639 /* if rxdma1_enable is false, then it doesn't need 640 * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring 641 * and rxdma_mon_desc_ring. 642 * init reap timer for QCA6390. 643 */ 644 if (!ar->ab->hw_params.rxdma1_enable) { 645 //init mon status buffer reap timer 646 timer_setup(&ar->ab->mon_reap_timer, 647 ath11k_dp_service_mon_ring, 0); 648 return 0; 649 } 650 651 ret = ath11k_dp_srng_setup(ar->ab, 652 &dp->rxdma_mon_buf_ring.refill_buf_ring, 653 HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id, 654 DP_RXDMA_MONITOR_BUF_RING_SIZE); 655 if (ret) { 656 ath11k_warn(ar->ab, 657 "failed to setup HAL_RXDMA_MONITOR_BUF\n"); 658 return ret; 659 } 660 661 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring, 662 HAL_RXDMA_MONITOR_DST, 0, dp->mac_id, 663 DP_RXDMA_MONITOR_DST_RING_SIZE); 664 if (ret) { 665 ath11k_warn(ar->ab, 666 "failed to setup HAL_RXDMA_MONITOR_DST\n"); 667 return ret; 668 } 669 670 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring, 671 HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id, 672 DP_RXDMA_MONITOR_DESC_RING_SIZE); 673 if (ret) { 674 ath11k_warn(ar->ab, 675 "failed to setup HAL_RXDMA_MONITOR_DESC\n"); 676 return ret; 677 } 678 679 return 0; 680 } 681 682 void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab) 683 { 684 struct ath11k_dp *dp = &ab->dp; 685 struct dp_reo_cmd *cmd, *tmp; 686 struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache; 687 688 spin_lock_bh(&dp->reo_cmd_lock); 689 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 690 list_del(&cmd->list); 691 dma_unmap_single(ab->dev, cmd->data.paddr, 692 cmd->data.size, DMA_BIDIRECTIONAL); 693 kfree(cmd->data.vaddr); 694 kfree(cmd); 695 } 696 697 list_for_each_entry_safe(cmd_cache, tmp_cache, 698 &dp->reo_cmd_cache_flush_list, list) { 699 list_del(&cmd_cache->list); 700 dp->reo_cmd_cache_flush_count--; 701 dma_unmap_single(ab->dev, cmd_cache->data.paddr, 702 cmd_cache->data.size, DMA_BIDIRECTIONAL); 703 kfree(cmd_cache->data.vaddr); 704 kfree(cmd_cache); 705 } 706 spin_unlock_bh(&dp->reo_cmd_lock); 707 } 708 709 static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx, 710 enum hal_reo_cmd_status status) 711 { 712 struct dp_rx_tid *rx_tid = ctx; 713 714 if (status != HAL_REO_CMD_SUCCESS) 715 ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n", 716 rx_tid->tid, status); 717 718 dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size, 719 DMA_BIDIRECTIONAL); 720 kfree(rx_tid->vaddr); 721 } 722 723 static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab, 724 struct dp_rx_tid *rx_tid) 725 { 726 struct ath11k_hal_reo_cmd cmd = {0}; 727 unsigned long tot_desc_sz, desc_sz; 728 int ret; 729 730 tot_desc_sz = rx_tid->size; 731 desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID); 732 733 while (tot_desc_sz > desc_sz) { 734 tot_desc_sz -= desc_sz; 735 cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz); 736 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 737 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 738 HAL_REO_CMD_FLUSH_CACHE, &cmd, 739 NULL); 740 if (ret) 741 ath11k_warn(ab, 742 "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n", 743 rx_tid->tid, ret); 744 } 745 746 memset(&cmd, 0, sizeof(cmd)); 747 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 748 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 749 cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS; 750 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 751 HAL_REO_CMD_FLUSH_CACHE, 752 &cmd, ath11k_dp_reo_cmd_free); 753 if (ret) { 754 ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n", 755 rx_tid->tid, ret); 756 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 757 DMA_BIDIRECTIONAL); 758 kfree(rx_tid->vaddr); 759 } 760 } 761 762 static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx, 763 enum hal_reo_cmd_status status) 764 { 765 struct ath11k_base *ab = dp->ab; 766 struct dp_rx_tid *rx_tid = ctx; 767 struct dp_reo_cache_flush_elem *elem, *tmp; 768 769 if (status == HAL_REO_CMD_DRAIN) { 770 goto free_desc; 771 } else if (status != HAL_REO_CMD_SUCCESS) { 772 /* Shouldn't happen! Cleanup in case of other failure? */ 773 ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n", 774 rx_tid->tid, status); 775 return; 776 } 777 778 elem = kzalloc(sizeof(*elem), GFP_ATOMIC); 779 if (!elem) 780 goto free_desc; 781 782 elem->ts = jiffies; 783 memcpy(&elem->data, rx_tid, sizeof(*rx_tid)); 784 785 spin_lock_bh(&dp->reo_cmd_lock); 786 list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list); 787 dp->reo_cmd_cache_flush_count++; 788 789 /* Flush and invalidate aged REO desc from HW cache */ 790 list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list, 791 list) { 792 if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD || 793 time_after(jiffies, elem->ts + 794 msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) { 795 list_del(&elem->list); 796 dp->reo_cmd_cache_flush_count--; 797 spin_unlock_bh(&dp->reo_cmd_lock); 798 799 ath11k_dp_reo_cache_flush(ab, &elem->data); 800 kfree(elem); 801 spin_lock_bh(&dp->reo_cmd_lock); 802 } 803 } 804 spin_unlock_bh(&dp->reo_cmd_lock); 805 806 return; 807 free_desc: 808 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 809 DMA_BIDIRECTIONAL); 810 kfree(rx_tid->vaddr); 811 } 812 813 void ath11k_peer_rx_tid_delete(struct ath11k *ar, 814 struct ath11k_peer *peer, u8 tid) 815 { 816 struct ath11k_hal_reo_cmd cmd = {0}; 817 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 818 int ret; 819 820 if (!rx_tid->active) 821 return; 822 823 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 824 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 825 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 826 cmd.upd0 |= HAL_REO_CMD_UPD0_VLD; 827 ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid, 828 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 829 ath11k_dp_rx_tid_del_func); 830 if (ret) { 831 ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n", 832 tid, ret); 833 dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size, 834 DMA_BIDIRECTIONAL); 835 kfree(rx_tid->vaddr); 836 } 837 838 rx_tid->active = false; 839 } 840 841 static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab, 842 u32 *link_desc, 843 enum hal_wbm_rel_bm_act action) 844 { 845 struct ath11k_dp *dp = &ab->dp; 846 struct hal_srng *srng; 847 u32 *desc; 848 int ret = 0; 849 850 srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id]; 851 852 spin_lock_bh(&srng->lock); 853 854 ath11k_hal_srng_access_begin(ab, srng); 855 856 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 857 if (!desc) { 858 ret = -ENOBUFS; 859 goto exit; 860 } 861 862 ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc, 863 action); 864 865 exit: 866 ath11k_hal_srng_access_end(ab, srng); 867 868 spin_unlock_bh(&srng->lock); 869 870 return ret; 871 } 872 873 static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_desc) 874 { 875 struct ath11k_base *ab = rx_tid->ab; 876 877 lockdep_assert_held(&ab->base_lock); 878 879 if (rx_tid->dst_ring_desc) { 880 if (rel_link_desc) 881 ath11k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc, 882 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 883 kfree(rx_tid->dst_ring_desc); 884 rx_tid->dst_ring_desc = NULL; 885 } 886 887 rx_tid->cur_sn = 0; 888 rx_tid->last_frag_no = 0; 889 rx_tid->rx_frag_bitmap = 0; 890 __skb_queue_purge(&rx_tid->rx_frags); 891 } 892 893 void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer) 894 { 895 struct dp_rx_tid *rx_tid; 896 int i; 897 898 lockdep_assert_held(&ar->ab->base_lock); 899 900 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 901 rx_tid = &peer->rx_tid[i]; 902 903 spin_unlock_bh(&ar->ab->base_lock); 904 del_timer_sync(&rx_tid->frag_timer); 905 spin_lock_bh(&ar->ab->base_lock); 906 907 ath11k_dp_rx_frags_cleanup(rx_tid, true); 908 } 909 } 910 911 void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer) 912 { 913 struct dp_rx_tid *rx_tid; 914 int i; 915 916 lockdep_assert_held(&ar->ab->base_lock); 917 918 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 919 rx_tid = &peer->rx_tid[i]; 920 921 ath11k_peer_rx_tid_delete(ar, peer, i); 922 ath11k_dp_rx_frags_cleanup(rx_tid, true); 923 924 spin_unlock_bh(&ar->ab->base_lock); 925 del_timer_sync(&rx_tid->frag_timer); 926 spin_lock_bh(&ar->ab->base_lock); 927 } 928 } 929 930 static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar, 931 struct ath11k_peer *peer, 932 struct dp_rx_tid *rx_tid, 933 u32 ba_win_sz, u16 ssn, 934 bool update_ssn) 935 { 936 struct ath11k_hal_reo_cmd cmd = {0}; 937 int ret; 938 939 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 940 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 941 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 942 cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE; 943 cmd.ba_window_size = ba_win_sz; 944 945 if (update_ssn) { 946 cmd.upd0 |= HAL_REO_CMD_UPD0_SSN; 947 cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn); 948 } 949 950 ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid, 951 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 952 NULL); 953 if (ret) { 954 ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n", 955 rx_tid->tid, ret); 956 return ret; 957 } 958 959 rx_tid->ba_win_sz = ba_win_sz; 960 961 return 0; 962 } 963 964 static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab, 965 const u8 *peer_mac, int vdev_id, u8 tid) 966 { 967 struct ath11k_peer *peer; 968 struct dp_rx_tid *rx_tid; 969 970 spin_lock_bh(&ab->base_lock); 971 972 peer = ath11k_peer_find(ab, vdev_id, peer_mac); 973 if (!peer) { 974 ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n"); 975 goto unlock_exit; 976 } 977 978 rx_tid = &peer->rx_tid[tid]; 979 if (!rx_tid->active) 980 goto unlock_exit; 981 982 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 983 DMA_BIDIRECTIONAL); 984 kfree(rx_tid->vaddr); 985 986 rx_tid->active = false; 987 988 unlock_exit: 989 spin_unlock_bh(&ab->base_lock); 990 } 991 992 int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, 993 u8 tid, u32 ba_win_sz, u16 ssn, 994 enum hal_pn_type pn_type) 995 { 996 struct ath11k_base *ab = ar->ab; 997 struct ath11k_peer *peer; 998 struct dp_rx_tid *rx_tid; 999 u32 hw_desc_sz; 1000 u32 *addr_aligned; 1001 void *vaddr; 1002 dma_addr_t paddr; 1003 int ret; 1004 1005 spin_lock_bh(&ab->base_lock); 1006 1007 peer = ath11k_peer_find(ab, vdev_id, peer_mac); 1008 if (!peer) { 1009 ath11k_warn(ab, "failed to find the peer to set up rx tid\n"); 1010 spin_unlock_bh(&ab->base_lock); 1011 return -ENOENT; 1012 } 1013 1014 rx_tid = &peer->rx_tid[tid]; 1015 /* Update the tid queue if it is already setup */ 1016 if (rx_tid->active) { 1017 paddr = rx_tid->paddr; 1018 ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid, 1019 ba_win_sz, ssn, true); 1020 spin_unlock_bh(&ab->base_lock); 1021 if (ret) { 1022 ath11k_warn(ab, "failed to update reo for rx tid %d\n", tid); 1023 return ret; 1024 } 1025 1026 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, 1027 peer_mac, paddr, 1028 tid, 1, ba_win_sz); 1029 if (ret) 1030 ath11k_warn(ab, "failed to send wmi command to update rx reorder queue, tid :%d (%d)\n", 1031 tid, ret); 1032 return ret; 1033 } 1034 1035 rx_tid->tid = tid; 1036 1037 rx_tid->ba_win_sz = ba_win_sz; 1038 1039 /* TODO: Optimize the memory allocation for qos tid based on 1040 * the actual BA window size in REO tid update path. 1041 */ 1042 if (tid == HAL_DESC_REO_NON_QOS_TID) 1043 hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid); 1044 else 1045 hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid); 1046 1047 vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC); 1048 if (!vaddr) { 1049 spin_unlock_bh(&ab->base_lock); 1050 return -ENOMEM; 1051 } 1052 1053 addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN); 1054 1055 ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz, 1056 ssn, pn_type); 1057 1058 paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz, 1059 DMA_BIDIRECTIONAL); 1060 1061 ret = dma_mapping_error(ab->dev, paddr); 1062 if (ret) { 1063 spin_unlock_bh(&ab->base_lock); 1064 goto err_mem_free; 1065 } 1066 1067 rx_tid->vaddr = vaddr; 1068 rx_tid->paddr = paddr; 1069 rx_tid->size = hw_desc_sz; 1070 rx_tid->active = true; 1071 1072 spin_unlock_bh(&ab->base_lock); 1073 1074 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, 1075 paddr, tid, 1, ba_win_sz); 1076 if (ret) { 1077 ath11k_warn(ar->ab, "failed to setup rx reorder queue, tid :%d (%d)\n", 1078 tid, ret); 1079 ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid); 1080 } 1081 1082 return ret; 1083 1084 err_mem_free: 1085 kfree(vaddr); 1086 1087 return ret; 1088 } 1089 1090 int ath11k_dp_rx_ampdu_start(struct ath11k *ar, 1091 struct ieee80211_ampdu_params *params) 1092 { 1093 struct ath11k_base *ab = ar->ab; 1094 struct ath11k_sta *arsta = (void *)params->sta->drv_priv; 1095 int vdev_id = arsta->arvif->vdev_id; 1096 int ret; 1097 1098 ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id, 1099 params->tid, params->buf_size, 1100 params->ssn, arsta->pn_type); 1101 if (ret) 1102 ath11k_warn(ab, "failed to setup rx tid %d\n", ret); 1103 1104 return ret; 1105 } 1106 1107 int ath11k_dp_rx_ampdu_stop(struct ath11k *ar, 1108 struct ieee80211_ampdu_params *params) 1109 { 1110 struct ath11k_base *ab = ar->ab; 1111 struct ath11k_peer *peer; 1112 struct ath11k_sta *arsta = (void *)params->sta->drv_priv; 1113 int vdev_id = arsta->arvif->vdev_id; 1114 dma_addr_t paddr; 1115 bool active; 1116 int ret; 1117 1118 spin_lock_bh(&ab->base_lock); 1119 1120 peer = ath11k_peer_find(ab, vdev_id, params->sta->addr); 1121 if (!peer) { 1122 ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n"); 1123 spin_unlock_bh(&ab->base_lock); 1124 return -ENOENT; 1125 } 1126 1127 paddr = peer->rx_tid[params->tid].paddr; 1128 active = peer->rx_tid[params->tid].active; 1129 1130 if (!active) { 1131 spin_unlock_bh(&ab->base_lock); 1132 return 0; 1133 } 1134 1135 ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false); 1136 spin_unlock_bh(&ab->base_lock); 1137 if (ret) { 1138 ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n", 1139 params->tid, ret); 1140 return ret; 1141 } 1142 1143 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, 1144 params->sta->addr, paddr, 1145 params->tid, 1, 1); 1146 if (ret) 1147 ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n", 1148 ret); 1149 1150 return ret; 1151 } 1152 1153 int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif, 1154 const u8 *peer_addr, 1155 enum set_key_cmd key_cmd, 1156 struct ieee80211_key_conf *key) 1157 { 1158 struct ath11k *ar = arvif->ar; 1159 struct ath11k_base *ab = ar->ab; 1160 struct ath11k_hal_reo_cmd cmd = {0}; 1161 struct ath11k_peer *peer; 1162 struct dp_rx_tid *rx_tid; 1163 u8 tid; 1164 int ret = 0; 1165 1166 /* NOTE: Enable PN/TSC replay check offload only for unicast frames. 1167 * We use mac80211 PN/TSC replay check functionality for bcast/mcast 1168 * for now. 1169 */ 1170 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) 1171 return 0; 1172 1173 cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS; 1174 cmd.upd0 |= HAL_REO_CMD_UPD0_PN | 1175 HAL_REO_CMD_UPD0_PN_SIZE | 1176 HAL_REO_CMD_UPD0_PN_VALID | 1177 HAL_REO_CMD_UPD0_PN_CHECK | 1178 HAL_REO_CMD_UPD0_SVLD; 1179 1180 switch (key->cipher) { 1181 case WLAN_CIPHER_SUITE_TKIP: 1182 case WLAN_CIPHER_SUITE_CCMP: 1183 case WLAN_CIPHER_SUITE_CCMP_256: 1184 case WLAN_CIPHER_SUITE_GCMP: 1185 case WLAN_CIPHER_SUITE_GCMP_256: 1186 if (key_cmd == SET_KEY) { 1187 cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK; 1188 cmd.pn_size = 48; 1189 } 1190 break; 1191 default: 1192 break; 1193 } 1194 1195 spin_lock_bh(&ab->base_lock); 1196 1197 peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr); 1198 if (!peer) { 1199 ath11k_warn(ab, "failed to find the peer to configure pn replay detection\n"); 1200 spin_unlock_bh(&ab->base_lock); 1201 return -ENOENT; 1202 } 1203 1204 for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) { 1205 rx_tid = &peer->rx_tid[tid]; 1206 if (!rx_tid->active) 1207 continue; 1208 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 1209 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 1210 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 1211 HAL_REO_CMD_UPDATE_RX_QUEUE, 1212 &cmd, NULL); 1213 if (ret) { 1214 ath11k_warn(ab, "failed to configure rx tid %d queue for pn replay detection %d\n", 1215 tid, ret); 1216 break; 1217 } 1218 } 1219 1220 spin_unlock_bh(&ab->base_lock); 1221 1222 return ret; 1223 } 1224 1225 static inline int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats, 1226 u16 peer_id) 1227 { 1228 int i; 1229 1230 for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) { 1231 if (ppdu_stats->user_stats[i].is_valid_peer_id) { 1232 if (peer_id == ppdu_stats->user_stats[i].peer_id) 1233 return i; 1234 } else { 1235 return i; 1236 } 1237 } 1238 1239 return -EINVAL; 1240 } 1241 1242 static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab, 1243 u16 tag, u16 len, const void *ptr, 1244 void *data) 1245 { 1246 struct htt_ppdu_stats_info *ppdu_info; 1247 struct htt_ppdu_user_stats *user_stats; 1248 int cur_user; 1249 u16 peer_id; 1250 1251 ppdu_info = (struct htt_ppdu_stats_info *)data; 1252 1253 switch (tag) { 1254 case HTT_PPDU_STATS_TAG_COMMON: 1255 if (len < sizeof(struct htt_ppdu_stats_common)) { 1256 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1257 len, tag); 1258 return -EINVAL; 1259 } 1260 memcpy((void *)&ppdu_info->ppdu_stats.common, ptr, 1261 sizeof(struct htt_ppdu_stats_common)); 1262 break; 1263 case HTT_PPDU_STATS_TAG_USR_RATE: 1264 if (len < sizeof(struct htt_ppdu_stats_user_rate)) { 1265 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1266 len, tag); 1267 return -EINVAL; 1268 } 1269 1270 peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id; 1271 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1272 peer_id); 1273 if (cur_user < 0) 1274 return -EINVAL; 1275 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1276 user_stats->peer_id = peer_id; 1277 user_stats->is_valid_peer_id = true; 1278 memcpy((void *)&user_stats->rate, ptr, 1279 sizeof(struct htt_ppdu_stats_user_rate)); 1280 user_stats->tlv_flags |= BIT(tag); 1281 break; 1282 case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON: 1283 if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) { 1284 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1285 len, tag); 1286 return -EINVAL; 1287 } 1288 1289 peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id; 1290 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1291 peer_id); 1292 if (cur_user < 0) 1293 return -EINVAL; 1294 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1295 user_stats->peer_id = peer_id; 1296 user_stats->is_valid_peer_id = true; 1297 memcpy((void *)&user_stats->cmpltn_cmn, ptr, 1298 sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)); 1299 user_stats->tlv_flags |= BIT(tag); 1300 break; 1301 case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS: 1302 if (len < 1303 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) { 1304 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1305 len, tag); 1306 return -EINVAL; 1307 } 1308 1309 peer_id = 1310 ((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id; 1311 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1312 peer_id); 1313 if (cur_user < 0) 1314 return -EINVAL; 1315 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1316 user_stats->peer_id = peer_id; 1317 user_stats->is_valid_peer_id = true; 1318 memcpy((void *)&user_stats->ack_ba, ptr, 1319 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)); 1320 user_stats->tlv_flags |= BIT(tag); 1321 break; 1322 } 1323 return 0; 1324 } 1325 1326 int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len, 1327 int (*iter)(struct ath11k_base *ar, u16 tag, u16 len, 1328 const void *ptr, void *data), 1329 void *data) 1330 { 1331 const struct htt_tlv *tlv; 1332 const void *begin = ptr; 1333 u16 tlv_tag, tlv_len; 1334 int ret = -EINVAL; 1335 1336 while (len > 0) { 1337 if (len < sizeof(*tlv)) { 1338 ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", 1339 ptr - begin, len, sizeof(*tlv)); 1340 return -EINVAL; 1341 } 1342 tlv = (struct htt_tlv *)ptr; 1343 tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header); 1344 tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header); 1345 ptr += sizeof(*tlv); 1346 len -= sizeof(*tlv); 1347 1348 if (tlv_len > len) { 1349 ath11k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n", 1350 tlv_tag, ptr - begin, len, tlv_len); 1351 return -EINVAL; 1352 } 1353 ret = iter(ab, tlv_tag, tlv_len, ptr, data); 1354 if (ret == -ENOMEM) 1355 return ret; 1356 1357 ptr += tlv_len; 1358 len -= tlv_len; 1359 } 1360 return 0; 1361 } 1362 1363 static inline u32 ath11k_he_gi_to_nl80211_he_gi(u8 sgi) 1364 { 1365 u32 ret = 0; 1366 1367 switch (sgi) { 1368 case RX_MSDU_START_SGI_0_8_US: 1369 ret = NL80211_RATE_INFO_HE_GI_0_8; 1370 break; 1371 case RX_MSDU_START_SGI_1_6_US: 1372 ret = NL80211_RATE_INFO_HE_GI_1_6; 1373 break; 1374 case RX_MSDU_START_SGI_3_2_US: 1375 ret = NL80211_RATE_INFO_HE_GI_3_2; 1376 break; 1377 } 1378 1379 return ret; 1380 } 1381 1382 static void 1383 ath11k_update_per_peer_tx_stats(struct ath11k *ar, 1384 struct htt_ppdu_stats *ppdu_stats, u8 user) 1385 { 1386 struct ath11k_base *ab = ar->ab; 1387 struct ath11k_peer *peer; 1388 struct ieee80211_sta *sta; 1389 struct ath11k_sta *arsta; 1390 struct htt_ppdu_stats_user_rate *user_rate; 1391 struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats; 1392 struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user]; 1393 struct htt_ppdu_stats_common *common = &ppdu_stats->common; 1394 int ret; 1395 u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0; 1396 u32 succ_bytes = 0; 1397 u16 rate = 0, succ_pkts = 0; 1398 u32 tx_duration = 0; 1399 u8 tid = HTT_PPDU_STATS_NON_QOS_TID; 1400 bool is_ampdu = false; 1401 1402 if (!usr_stats) 1403 return; 1404 1405 if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE))) 1406 return; 1407 1408 if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) 1409 is_ampdu = 1410 HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags); 1411 1412 if (usr_stats->tlv_flags & 1413 BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) { 1414 succ_bytes = usr_stats->ack_ba.success_bytes; 1415 succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M, 1416 usr_stats->ack_ba.info); 1417 tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM, 1418 usr_stats->ack_ba.info); 1419 } 1420 1421 if (common->fes_duration_us) 1422 tx_duration = common->fes_duration_us; 1423 1424 user_rate = &usr_stats->rate; 1425 flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags); 1426 bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2; 1427 nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1; 1428 mcs = HTT_USR_RATE_MCS(user_rate->rate_flags); 1429 sgi = HTT_USR_RATE_GI(user_rate->rate_flags); 1430 dcm = HTT_USR_RATE_DCM(user_rate->rate_flags); 1431 1432 /* Note: If host configured fixed rates and in some other special 1433 * cases, the broadcast/management frames are sent in different rates. 1434 * Firmware rate's control to be skipped for this? 1435 */ 1436 1437 if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) { 1438 ath11k_warn(ab, "Invalid HE mcs %d peer stats", mcs); 1439 return; 1440 } 1441 1442 if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) { 1443 ath11k_warn(ab, "Invalid VHT mcs %d peer stats", mcs); 1444 return; 1445 } 1446 1447 if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) { 1448 ath11k_warn(ab, "Invalid HT mcs %d nss %d peer stats", 1449 mcs, nss); 1450 return; 1451 } 1452 1453 if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) { 1454 ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs, 1455 flags, 1456 &rate_idx, 1457 &rate); 1458 if (ret < 0) 1459 return; 1460 } 1461 1462 rcu_read_lock(); 1463 spin_lock_bh(&ab->base_lock); 1464 peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id); 1465 1466 if (!peer || !peer->sta) { 1467 spin_unlock_bh(&ab->base_lock); 1468 rcu_read_unlock(); 1469 return; 1470 } 1471 1472 sta = peer->sta; 1473 arsta = (struct ath11k_sta *)sta->drv_priv; 1474 1475 memset(&arsta->txrate, 0, sizeof(arsta->txrate)); 1476 1477 switch (flags) { 1478 case WMI_RATE_PREAMBLE_OFDM: 1479 arsta->txrate.legacy = rate; 1480 break; 1481 case WMI_RATE_PREAMBLE_CCK: 1482 arsta->txrate.legacy = rate; 1483 break; 1484 case WMI_RATE_PREAMBLE_HT: 1485 arsta->txrate.mcs = mcs + 8 * (nss - 1); 1486 arsta->txrate.flags = RATE_INFO_FLAGS_MCS; 1487 if (sgi) 1488 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1489 break; 1490 case WMI_RATE_PREAMBLE_VHT: 1491 arsta->txrate.mcs = mcs; 1492 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; 1493 if (sgi) 1494 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1495 break; 1496 case WMI_RATE_PREAMBLE_HE: 1497 arsta->txrate.mcs = mcs; 1498 arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS; 1499 arsta->txrate.he_dcm = dcm; 1500 arsta->txrate.he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi); 1501 arsta->txrate.he_ru_alloc = ath11k_he_ru_tones_to_nl80211_he_ru_alloc( 1502 (user_rate->ru_end - 1503 user_rate->ru_start) + 1); 1504 break; 1505 } 1506 1507 arsta->txrate.nss = nss; 1508 arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw); 1509 arsta->tx_duration += tx_duration; 1510 memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info)); 1511 1512 /* PPDU stats reported for mgmt packet doesn't have valid tx bytes. 1513 * So skip peer stats update for mgmt packets. 1514 */ 1515 if (tid < HTT_PPDU_STATS_NON_QOS_TID) { 1516 memset(peer_stats, 0, sizeof(*peer_stats)); 1517 peer_stats->succ_pkts = succ_pkts; 1518 peer_stats->succ_bytes = succ_bytes; 1519 peer_stats->is_ampdu = is_ampdu; 1520 peer_stats->duration = tx_duration; 1521 peer_stats->ba_fails = 1522 HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) + 1523 HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags); 1524 1525 if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) 1526 ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx); 1527 } 1528 1529 spin_unlock_bh(&ab->base_lock); 1530 rcu_read_unlock(); 1531 } 1532 1533 static void ath11k_htt_update_ppdu_stats(struct ath11k *ar, 1534 struct htt_ppdu_stats *ppdu_stats) 1535 { 1536 u8 user; 1537 1538 for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++) 1539 ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user); 1540 } 1541 1542 static 1543 struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar, 1544 u32 ppdu_id) 1545 { 1546 struct htt_ppdu_stats_info *ppdu_info; 1547 1548 spin_lock_bh(&ar->data_lock); 1549 if (!list_empty(&ar->ppdu_stats_info)) { 1550 list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) { 1551 if (ppdu_info->ppdu_id == ppdu_id) { 1552 spin_unlock_bh(&ar->data_lock); 1553 return ppdu_info; 1554 } 1555 } 1556 1557 if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) { 1558 ppdu_info = list_first_entry(&ar->ppdu_stats_info, 1559 typeof(*ppdu_info), list); 1560 list_del(&ppdu_info->list); 1561 ar->ppdu_stat_list_depth--; 1562 ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats); 1563 kfree(ppdu_info); 1564 } 1565 } 1566 spin_unlock_bh(&ar->data_lock); 1567 1568 ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC); 1569 if (!ppdu_info) 1570 return NULL; 1571 1572 spin_lock_bh(&ar->data_lock); 1573 list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info); 1574 ar->ppdu_stat_list_depth++; 1575 spin_unlock_bh(&ar->data_lock); 1576 1577 return ppdu_info; 1578 } 1579 1580 static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab, 1581 struct sk_buff *skb) 1582 { 1583 struct ath11k_htt_ppdu_stats_msg *msg; 1584 struct htt_ppdu_stats_info *ppdu_info; 1585 struct ath11k *ar; 1586 int ret; 1587 u8 pdev_id; 1588 u32 ppdu_id, len; 1589 1590 msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data; 1591 len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info); 1592 pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info); 1593 ppdu_id = msg->ppdu_id; 1594 1595 rcu_read_lock(); 1596 ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); 1597 if (!ar) { 1598 ret = -EINVAL; 1599 goto exit; 1600 } 1601 1602 if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) 1603 trace_ath11k_htt_ppdu_stats(ar, skb->data, len); 1604 1605 ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id); 1606 if (!ppdu_info) { 1607 ret = -EINVAL; 1608 goto exit; 1609 } 1610 1611 ppdu_info->ppdu_id = ppdu_id; 1612 ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len, 1613 ath11k_htt_tlv_ppdu_stats_parse, 1614 (void *)ppdu_info); 1615 if (ret) { 1616 ath11k_warn(ab, "Failed to parse tlv %d\n", ret); 1617 goto exit; 1618 } 1619 1620 exit: 1621 rcu_read_unlock(); 1622 1623 return ret; 1624 } 1625 1626 static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb) 1627 { 1628 struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data; 1629 struct ath_pktlog_hdr *hdr = (struct ath_pktlog_hdr *)data; 1630 struct ath11k *ar; 1631 u8 pdev_id; 1632 1633 pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr); 1634 ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); 1635 if (!ar) { 1636 ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id); 1637 return; 1638 } 1639 1640 trace_ath11k_htt_pktlog(ar, data->payload, hdr->size, 1641 ar->ab->pktlog_defs_checksum); 1642 } 1643 1644 static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab, 1645 struct sk_buff *skb) 1646 { 1647 u32 *data = (u32 *)skb->data; 1648 u8 pdev_id, ring_type, ring_id, pdev_idx; 1649 u16 hp, tp; 1650 u32 backpressure_time; 1651 struct ath11k_bp_stats *bp_stats; 1652 1653 pdev_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M, *data); 1654 ring_type = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M, *data); 1655 ring_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_ID_M, *data); 1656 ++data; 1657 1658 hp = FIELD_GET(HTT_BACKPRESSURE_EVENT_HP_M, *data); 1659 tp = FIELD_GET(HTT_BACKPRESSURE_EVENT_TP_M, *data); 1660 ++data; 1661 1662 backpressure_time = *data; 1663 1664 ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n", 1665 pdev_id, ring_type, ring_id, hp, tp, backpressure_time); 1666 1667 if (ring_type == HTT_BACKPRESSURE_UMAC_RING_TYPE) { 1668 if (ring_id >= HTT_SW_UMAC_RING_IDX_MAX) 1669 return; 1670 1671 bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[ring_id]; 1672 } else if (ring_type == HTT_BACKPRESSURE_LMAC_RING_TYPE) { 1673 pdev_idx = DP_HW2SW_MACID(pdev_id); 1674 1675 if (ring_id >= HTT_SW_LMAC_RING_IDX_MAX || pdev_idx >= MAX_RADIOS) 1676 return; 1677 1678 bp_stats = &ab->soc_stats.bp_stats.lmac_ring_bp_stats[ring_id][pdev_idx]; 1679 } else { 1680 ath11k_warn(ab, "unknown ring type received in htt bp event %d\n", 1681 ring_type); 1682 return; 1683 } 1684 1685 spin_lock_bh(&ab->base_lock); 1686 bp_stats->hp = hp; 1687 bp_stats->tp = tp; 1688 bp_stats->count++; 1689 bp_stats->jiffies = jiffies; 1690 spin_unlock_bh(&ab->base_lock); 1691 } 1692 1693 void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab, 1694 struct sk_buff *skb) 1695 { 1696 struct ath11k_dp *dp = &ab->dp; 1697 struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data; 1698 enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp); 1699 u16 peer_id; 1700 u8 vdev_id; 1701 u8 mac_addr[ETH_ALEN]; 1702 u16 peer_mac_h16; 1703 u16 ast_hash; 1704 u16 hw_peer_id; 1705 1706 ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type); 1707 1708 switch (type) { 1709 case HTT_T2H_MSG_TYPE_VERSION_CONF: 1710 dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR, 1711 resp->version_msg.version); 1712 dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR, 1713 resp->version_msg.version); 1714 complete(&dp->htt_tgt_version_received); 1715 break; 1716 case HTT_T2H_MSG_TYPE_PEER_MAP: 1717 vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID, 1718 resp->peer_map_ev.info); 1719 peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID, 1720 resp->peer_map_ev.info); 1721 peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16, 1722 resp->peer_map_ev.info1); 1723 ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32, 1724 peer_mac_h16, mac_addr); 1725 ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0); 1726 break; 1727 case HTT_T2H_MSG_TYPE_PEER_MAP2: 1728 vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID, 1729 resp->peer_map_ev.info); 1730 peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID, 1731 resp->peer_map_ev.info); 1732 peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16, 1733 resp->peer_map_ev.info1); 1734 ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32, 1735 peer_mac_h16, mac_addr); 1736 ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL, 1737 resp->peer_map_ev.info2); 1738 hw_peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID, 1739 resp->peer_map_ev.info1); 1740 ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash, 1741 hw_peer_id); 1742 break; 1743 case HTT_T2H_MSG_TYPE_PEER_UNMAP: 1744 case HTT_T2H_MSG_TYPE_PEER_UNMAP2: 1745 peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID, 1746 resp->peer_unmap_ev.info); 1747 ath11k_peer_unmap_event(ab, peer_id); 1748 break; 1749 case HTT_T2H_MSG_TYPE_PPDU_STATS_IND: 1750 ath11k_htt_pull_ppdu_stats(ab, skb); 1751 break; 1752 case HTT_T2H_MSG_TYPE_EXT_STATS_CONF: 1753 ath11k_debugfs_htt_ext_stats_handler(ab, skb); 1754 break; 1755 case HTT_T2H_MSG_TYPE_PKTLOG: 1756 ath11k_htt_pktlog(ab, skb); 1757 break; 1758 case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND: 1759 ath11k_htt_backpressure_event_handler(ab, skb); 1760 break; 1761 default: 1762 ath11k_warn(ab, "htt event %d not handled\n", type); 1763 break; 1764 } 1765 1766 dev_kfree_skb_any(skb); 1767 } 1768 1769 static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar, 1770 struct sk_buff_head *msdu_list, 1771 struct sk_buff *first, struct sk_buff *last, 1772 u8 l3pad_bytes, int msdu_len) 1773 { 1774 struct ath11k_base *ab = ar->ab; 1775 struct sk_buff *skb; 1776 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first); 1777 int buf_first_hdr_len, buf_first_len; 1778 struct hal_rx_desc *ldesc; 1779 int space_extra, rem_len, buf_len; 1780 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 1781 1782 /* As the msdu is spread across multiple rx buffers, 1783 * find the offset to the start of msdu for computing 1784 * the length of the msdu in the first buffer. 1785 */ 1786 buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes; 1787 buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len; 1788 1789 if (WARN_ON_ONCE(msdu_len <= buf_first_len)) { 1790 skb_put(first, buf_first_hdr_len + msdu_len); 1791 skb_pull(first, buf_first_hdr_len); 1792 return 0; 1793 } 1794 1795 ldesc = (struct hal_rx_desc *)last->data; 1796 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ab, ldesc); 1797 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ab, ldesc); 1798 1799 /* MSDU spans over multiple buffers because the length of the MSDU 1800 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data 1801 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. 1802 */ 1803 skb_put(first, DP_RX_BUFFER_SIZE); 1804 skb_pull(first, buf_first_hdr_len); 1805 1806 /* When an MSDU spread over multiple buffers attention, MSDU_END and 1807 * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs. 1808 */ 1809 ath11k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc); 1810 1811 space_extra = msdu_len - (buf_first_len + skb_tailroom(first)); 1812 if (space_extra > 0 && 1813 (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) { 1814 /* Free up all buffers of the MSDU */ 1815 while ((skb = __skb_dequeue(msdu_list)) != NULL) { 1816 rxcb = ATH11K_SKB_RXCB(skb); 1817 if (!rxcb->is_continuation) { 1818 dev_kfree_skb_any(skb); 1819 break; 1820 } 1821 dev_kfree_skb_any(skb); 1822 } 1823 return -ENOMEM; 1824 } 1825 1826 rem_len = msdu_len - buf_first_len; 1827 while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) { 1828 rxcb = ATH11K_SKB_RXCB(skb); 1829 if (rxcb->is_continuation) 1830 buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz; 1831 else 1832 buf_len = rem_len; 1833 1834 if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) { 1835 WARN_ON_ONCE(1); 1836 dev_kfree_skb_any(skb); 1837 return -EINVAL; 1838 } 1839 1840 skb_put(skb, buf_len + hal_rx_desc_sz); 1841 skb_pull(skb, hal_rx_desc_sz); 1842 skb_copy_from_linear_data(skb, skb_put(first, buf_len), 1843 buf_len); 1844 dev_kfree_skb_any(skb); 1845 1846 rem_len -= buf_len; 1847 if (!rxcb->is_continuation) 1848 break; 1849 } 1850 1851 return 0; 1852 } 1853 1854 static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list, 1855 struct sk_buff *first) 1856 { 1857 struct sk_buff *skb; 1858 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first); 1859 1860 if (!rxcb->is_continuation) 1861 return first; 1862 1863 skb_queue_walk(msdu_list, skb) { 1864 rxcb = ATH11K_SKB_RXCB(skb); 1865 if (!rxcb->is_continuation) 1866 return skb; 1867 } 1868 1869 return NULL; 1870 } 1871 1872 static void ath11k_dp_rx_h_csum_offload(struct ath11k *ar, struct sk_buff *msdu) 1873 { 1874 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1875 struct rx_attention *rx_attention; 1876 bool ip_csum_fail, l4_csum_fail; 1877 1878 rx_attention = ath11k_dp_rx_get_attention(ar->ab, rxcb->rx_desc); 1879 ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rx_attention); 1880 l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rx_attention); 1881 1882 msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ? 1883 CHECKSUM_NONE : CHECKSUM_UNNECESSARY; 1884 } 1885 1886 static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, 1887 enum hal_encrypt_type enctype) 1888 { 1889 switch (enctype) { 1890 case HAL_ENCRYPT_TYPE_OPEN: 1891 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1892 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1893 return 0; 1894 case HAL_ENCRYPT_TYPE_CCMP_128: 1895 return IEEE80211_CCMP_MIC_LEN; 1896 case HAL_ENCRYPT_TYPE_CCMP_256: 1897 return IEEE80211_CCMP_256_MIC_LEN; 1898 case HAL_ENCRYPT_TYPE_GCMP_128: 1899 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1900 return IEEE80211_GCMP_MIC_LEN; 1901 case HAL_ENCRYPT_TYPE_WEP_40: 1902 case HAL_ENCRYPT_TYPE_WEP_104: 1903 case HAL_ENCRYPT_TYPE_WEP_128: 1904 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1905 case HAL_ENCRYPT_TYPE_WAPI: 1906 break; 1907 } 1908 1909 ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype); 1910 return 0; 1911 } 1912 1913 static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar, 1914 enum hal_encrypt_type enctype) 1915 { 1916 switch (enctype) { 1917 case HAL_ENCRYPT_TYPE_OPEN: 1918 return 0; 1919 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1920 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1921 return IEEE80211_TKIP_IV_LEN; 1922 case HAL_ENCRYPT_TYPE_CCMP_128: 1923 return IEEE80211_CCMP_HDR_LEN; 1924 case HAL_ENCRYPT_TYPE_CCMP_256: 1925 return IEEE80211_CCMP_256_HDR_LEN; 1926 case HAL_ENCRYPT_TYPE_GCMP_128: 1927 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1928 return IEEE80211_GCMP_HDR_LEN; 1929 case HAL_ENCRYPT_TYPE_WEP_40: 1930 case HAL_ENCRYPT_TYPE_WEP_104: 1931 case HAL_ENCRYPT_TYPE_WEP_128: 1932 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1933 case HAL_ENCRYPT_TYPE_WAPI: 1934 break; 1935 } 1936 1937 ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 1938 return 0; 1939 } 1940 1941 static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar, 1942 enum hal_encrypt_type enctype) 1943 { 1944 switch (enctype) { 1945 case HAL_ENCRYPT_TYPE_OPEN: 1946 case HAL_ENCRYPT_TYPE_CCMP_128: 1947 case HAL_ENCRYPT_TYPE_CCMP_256: 1948 case HAL_ENCRYPT_TYPE_GCMP_128: 1949 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1950 return 0; 1951 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1952 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1953 return IEEE80211_TKIP_ICV_LEN; 1954 case HAL_ENCRYPT_TYPE_WEP_40: 1955 case HAL_ENCRYPT_TYPE_WEP_104: 1956 case HAL_ENCRYPT_TYPE_WEP_128: 1957 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1958 case HAL_ENCRYPT_TYPE_WAPI: 1959 break; 1960 } 1961 1962 ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 1963 return 0; 1964 } 1965 1966 static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar, 1967 struct sk_buff *msdu, 1968 u8 *first_hdr, 1969 enum hal_encrypt_type enctype, 1970 struct ieee80211_rx_status *status) 1971 { 1972 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1973 u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN]; 1974 struct ieee80211_hdr *hdr; 1975 size_t hdr_len; 1976 u8 da[ETH_ALEN]; 1977 u8 sa[ETH_ALEN]; 1978 u16 qos_ctl = 0; 1979 u8 *qos; 1980 1981 /* copy SA & DA and pull decapped header */ 1982 hdr = (struct ieee80211_hdr *)msdu->data; 1983 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1984 ether_addr_copy(da, ieee80211_get_DA(hdr)); 1985 ether_addr_copy(sa, ieee80211_get_SA(hdr)); 1986 skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control)); 1987 1988 if (rxcb->is_first_msdu) { 1989 /* original 802.11 header is valid for the first msdu 1990 * hence we can reuse the same header 1991 */ 1992 hdr = (struct ieee80211_hdr *)first_hdr; 1993 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1994 1995 /* Each A-MSDU subframe will be reported as a separate MSDU, 1996 * so strip the A-MSDU bit from QoS Ctl. 1997 */ 1998 if (ieee80211_is_data_qos(hdr->frame_control)) { 1999 qos = ieee80211_get_qos_ctl(hdr); 2000 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 2001 } 2002 } else { 2003 /* Rebuild qos header if this is a middle/last msdu */ 2004 hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 2005 2006 /* Reset the order bit as the HT_Control header is stripped */ 2007 hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER)); 2008 2009 qos_ctl = rxcb->tid; 2010 2011 if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(ar->ab, rxcb->rx_desc)) 2012 qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT; 2013 2014 /* TODO Add other QoS ctl fields when required */ 2015 2016 /* copy decap header before overwriting for reuse below */ 2017 memcpy(decap_hdr, (uint8_t *)hdr, hdr_len); 2018 } 2019 2020 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 2021 memcpy(skb_push(msdu, 2022 ath11k_dp_rx_crypto_param_len(ar, enctype)), 2023 (void *)hdr + hdr_len, 2024 ath11k_dp_rx_crypto_param_len(ar, enctype)); 2025 } 2026 2027 if (!rxcb->is_first_msdu) { 2028 memcpy(skb_push(msdu, 2029 IEEE80211_QOS_CTL_LEN), &qos_ctl, 2030 IEEE80211_QOS_CTL_LEN); 2031 memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len); 2032 return; 2033 } 2034 2035 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 2036 2037 /* original 802.11 header has a different DA and in 2038 * case of 4addr it may also have different SA 2039 */ 2040 hdr = (struct ieee80211_hdr *)msdu->data; 2041 ether_addr_copy(ieee80211_get_DA(hdr), da); 2042 ether_addr_copy(ieee80211_get_SA(hdr), sa); 2043 } 2044 2045 static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu, 2046 enum hal_encrypt_type enctype, 2047 struct ieee80211_rx_status *status, 2048 bool decrypted) 2049 { 2050 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 2051 struct ieee80211_hdr *hdr; 2052 size_t hdr_len; 2053 size_t crypto_len; 2054 2055 if (!rxcb->is_first_msdu || 2056 !(rxcb->is_first_msdu && rxcb->is_last_msdu)) { 2057 WARN_ON_ONCE(1); 2058 return; 2059 } 2060 2061 skb_trim(msdu, msdu->len - FCS_LEN); 2062 2063 if (!decrypted) 2064 return; 2065 2066 hdr = (void *)msdu->data; 2067 2068 /* Tail */ 2069 if (status->flag & RX_FLAG_IV_STRIPPED) { 2070 skb_trim(msdu, msdu->len - 2071 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 2072 2073 skb_trim(msdu, msdu->len - 2074 ath11k_dp_rx_crypto_icv_len(ar, enctype)); 2075 } else { 2076 /* MIC */ 2077 if (status->flag & RX_FLAG_MIC_STRIPPED) 2078 skb_trim(msdu, msdu->len - 2079 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 2080 2081 /* ICV */ 2082 if (status->flag & RX_FLAG_ICV_STRIPPED) 2083 skb_trim(msdu, msdu->len - 2084 ath11k_dp_rx_crypto_icv_len(ar, enctype)); 2085 } 2086 2087 /* MMIC */ 2088 if ((status->flag & RX_FLAG_MMIC_STRIPPED) && 2089 !ieee80211_has_morefrags(hdr->frame_control) && 2090 enctype == HAL_ENCRYPT_TYPE_TKIP_MIC) 2091 skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN); 2092 2093 /* Head */ 2094 if (status->flag & RX_FLAG_IV_STRIPPED) { 2095 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2096 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 2097 2098 memmove((void *)msdu->data + crypto_len, 2099 (void *)msdu->data, hdr_len); 2100 skb_pull(msdu, crypto_len); 2101 } 2102 } 2103 2104 static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar, 2105 struct sk_buff *msdu, 2106 enum hal_encrypt_type enctype) 2107 { 2108 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 2109 struct ieee80211_hdr *hdr; 2110 size_t hdr_len, crypto_len; 2111 void *rfc1042; 2112 bool is_amsdu; 2113 2114 is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu); 2115 hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(ar->ab, rxcb->rx_desc); 2116 rfc1042 = hdr; 2117 2118 if (rxcb->is_first_msdu) { 2119 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2120 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 2121 2122 rfc1042 += hdr_len + crypto_len; 2123 } 2124 2125 if (is_amsdu) 2126 rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr); 2127 2128 return rfc1042; 2129 } 2130 2131 static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar, 2132 struct sk_buff *msdu, 2133 u8 *first_hdr, 2134 enum hal_encrypt_type enctype, 2135 struct ieee80211_rx_status *status) 2136 { 2137 struct ieee80211_hdr *hdr; 2138 struct ethhdr *eth; 2139 size_t hdr_len; 2140 u8 da[ETH_ALEN]; 2141 u8 sa[ETH_ALEN]; 2142 void *rfc1042; 2143 2144 rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype); 2145 if (WARN_ON_ONCE(!rfc1042)) 2146 return; 2147 2148 /* pull decapped header and copy SA & DA */ 2149 eth = (struct ethhdr *)msdu->data; 2150 ether_addr_copy(da, eth->h_dest); 2151 ether_addr_copy(sa, eth->h_source); 2152 skb_pull(msdu, sizeof(struct ethhdr)); 2153 2154 /* push rfc1042/llc/snap */ 2155 memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042, 2156 sizeof(struct ath11k_dp_rfc1042_hdr)); 2157 2158 /* push original 802.11 header */ 2159 hdr = (struct ieee80211_hdr *)first_hdr; 2160 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2161 2162 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 2163 memcpy(skb_push(msdu, 2164 ath11k_dp_rx_crypto_param_len(ar, enctype)), 2165 (void *)hdr + hdr_len, 2166 ath11k_dp_rx_crypto_param_len(ar, enctype)); 2167 } 2168 2169 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 2170 2171 /* original 802.11 header has a different DA and in 2172 * case of 4addr it may also have different SA 2173 */ 2174 hdr = (struct ieee80211_hdr *)msdu->data; 2175 ether_addr_copy(ieee80211_get_DA(hdr), da); 2176 ether_addr_copy(ieee80211_get_SA(hdr), sa); 2177 } 2178 2179 static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu, 2180 struct hal_rx_desc *rx_desc, 2181 enum hal_encrypt_type enctype, 2182 struct ieee80211_rx_status *status, 2183 bool decrypted) 2184 { 2185 u8 *first_hdr; 2186 u8 decap; 2187 struct ethhdr *ehdr; 2188 2189 first_hdr = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc); 2190 decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc); 2191 2192 switch (decap) { 2193 case DP_RX_DECAP_TYPE_NATIVE_WIFI: 2194 ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr, 2195 enctype, status); 2196 break; 2197 case DP_RX_DECAP_TYPE_RAW: 2198 ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status, 2199 decrypted); 2200 break; 2201 case DP_RX_DECAP_TYPE_ETHERNET2_DIX: 2202 ehdr = (struct ethhdr *)msdu->data; 2203 2204 /* mac80211 allows fast path only for authorized STA */ 2205 if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) { 2206 ATH11K_SKB_RXCB(msdu)->is_eapol = true; 2207 ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr, 2208 enctype, status); 2209 break; 2210 } 2211 2212 /* PN for mcast packets will be validated in mac80211; 2213 * remove eth header and add 802.11 header. 2214 */ 2215 if (ATH11K_SKB_RXCB(msdu)->is_mcbc && decrypted) 2216 ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr, 2217 enctype, status); 2218 break; 2219 case DP_RX_DECAP_TYPE_8023: 2220 /* TODO: Handle undecap for these formats */ 2221 break; 2222 } 2223 } 2224 2225 static struct ath11k_peer * 2226 ath11k_dp_rx_h_find_peer(struct ath11k_base *ab, struct sk_buff *msdu) 2227 { 2228 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 2229 struct hal_rx_desc *rx_desc = rxcb->rx_desc; 2230 struct ath11k_peer *peer = NULL; 2231 2232 lockdep_assert_held(&ab->base_lock); 2233 2234 if (rxcb->peer_id) 2235 peer = ath11k_peer_find_by_id(ab, rxcb->peer_id); 2236 2237 if (peer) 2238 return peer; 2239 2240 if (!rx_desc || !(ath11k_dp_rxdesc_mac_addr2_valid(ab, rx_desc))) 2241 return NULL; 2242 2243 peer = ath11k_peer_find_by_addr(ab, 2244 ath11k_dp_rxdesc_mpdu_start_addr2(ab, rx_desc)); 2245 return peer; 2246 } 2247 2248 static void ath11k_dp_rx_h_mpdu(struct ath11k *ar, 2249 struct sk_buff *msdu, 2250 struct hal_rx_desc *rx_desc, 2251 struct ieee80211_rx_status *rx_status) 2252 { 2253 bool fill_crypto_hdr; 2254 enum hal_encrypt_type enctype; 2255 bool is_decrypted = false; 2256 struct ath11k_skb_rxcb *rxcb; 2257 struct ieee80211_hdr *hdr; 2258 struct ath11k_peer *peer; 2259 struct rx_attention *rx_attention; 2260 u32 err_bitmap; 2261 2262 /* PN for multicast packets will be checked in mac80211 */ 2263 rxcb = ATH11K_SKB_RXCB(msdu); 2264 fill_crypto_hdr = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc); 2265 rxcb->is_mcbc = fill_crypto_hdr; 2266 2267 if (rxcb->is_mcbc) { 2268 rxcb->peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc); 2269 rxcb->seq_no = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc); 2270 } 2271 2272 spin_lock_bh(&ar->ab->base_lock); 2273 peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu); 2274 if (peer) { 2275 if (rxcb->is_mcbc) 2276 enctype = peer->sec_type_grp; 2277 else 2278 enctype = peer->sec_type; 2279 } else { 2280 enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc); 2281 } 2282 spin_unlock_bh(&ar->ab->base_lock); 2283 2284 rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc); 2285 err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention); 2286 if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap) 2287 is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention); 2288 2289 /* Clear per-MPDU flags while leaving per-PPDU flags intact */ 2290 rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | 2291 RX_FLAG_MMIC_ERROR | 2292 RX_FLAG_DECRYPTED | 2293 RX_FLAG_IV_STRIPPED | 2294 RX_FLAG_MMIC_STRIPPED); 2295 2296 if (err_bitmap & DP_RX_MPDU_ERR_FCS) 2297 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 2298 if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC) 2299 rx_status->flag |= RX_FLAG_MMIC_ERROR; 2300 2301 if (is_decrypted) { 2302 rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED; 2303 2304 if (fill_crypto_hdr) 2305 rx_status->flag |= RX_FLAG_MIC_STRIPPED | 2306 RX_FLAG_ICV_STRIPPED; 2307 else 2308 rx_status->flag |= RX_FLAG_IV_STRIPPED | 2309 RX_FLAG_PN_VALIDATED; 2310 } 2311 2312 ath11k_dp_rx_h_csum_offload(ar, msdu); 2313 ath11k_dp_rx_h_undecap(ar, msdu, rx_desc, 2314 enctype, rx_status, is_decrypted); 2315 2316 if (!is_decrypted || fill_crypto_hdr) 2317 return; 2318 2319 if (ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc) != 2320 DP_RX_DECAP_TYPE_ETHERNET2_DIX) { 2321 hdr = (void *)msdu->data; 2322 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 2323 } 2324 } 2325 2326 static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc, 2327 struct ieee80211_rx_status *rx_status) 2328 { 2329 struct ieee80211_supported_band *sband; 2330 enum rx_msdu_start_pkt_type pkt_type; 2331 u8 bw; 2332 u8 rate_mcs, nss; 2333 u8 sgi; 2334 bool is_cck; 2335 2336 pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(ar->ab, rx_desc); 2337 bw = ath11k_dp_rx_h_msdu_start_rx_bw(ar->ab, rx_desc); 2338 rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(ar->ab, rx_desc); 2339 nss = ath11k_dp_rx_h_msdu_start_nss(ar->ab, rx_desc); 2340 sgi = ath11k_dp_rx_h_msdu_start_sgi(ar->ab, rx_desc); 2341 2342 switch (pkt_type) { 2343 case RX_MSDU_START_PKT_TYPE_11A: 2344 case RX_MSDU_START_PKT_TYPE_11B: 2345 is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B); 2346 sband = &ar->mac.sbands[rx_status->band]; 2347 rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs, 2348 is_cck); 2349 break; 2350 case RX_MSDU_START_PKT_TYPE_11N: 2351 rx_status->encoding = RX_ENC_HT; 2352 if (rate_mcs > ATH11K_HT_MCS_MAX) { 2353 ath11k_warn(ar->ab, 2354 "Received with invalid mcs in HT mode %d\n", 2355 rate_mcs); 2356 break; 2357 } 2358 rx_status->rate_idx = rate_mcs + (8 * (nss - 1)); 2359 if (sgi) 2360 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 2361 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 2362 break; 2363 case RX_MSDU_START_PKT_TYPE_11AC: 2364 rx_status->encoding = RX_ENC_VHT; 2365 rx_status->rate_idx = rate_mcs; 2366 if (rate_mcs > ATH11K_VHT_MCS_MAX) { 2367 ath11k_warn(ar->ab, 2368 "Received with invalid mcs in VHT mode %d\n", 2369 rate_mcs); 2370 break; 2371 } 2372 rx_status->nss = nss; 2373 if (sgi) 2374 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 2375 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 2376 break; 2377 case RX_MSDU_START_PKT_TYPE_11AX: 2378 rx_status->rate_idx = rate_mcs; 2379 if (rate_mcs > ATH11K_HE_MCS_MAX) { 2380 ath11k_warn(ar->ab, 2381 "Received with invalid mcs in HE mode %d\n", 2382 rate_mcs); 2383 break; 2384 } 2385 rx_status->encoding = RX_ENC_HE; 2386 rx_status->nss = nss; 2387 rx_status->he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi); 2388 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 2389 break; 2390 } 2391 } 2392 2393 static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc, 2394 struct ieee80211_rx_status *rx_status) 2395 { 2396 u8 channel_num; 2397 u32 center_freq, meta_data; 2398 struct ieee80211_channel *channel; 2399 2400 rx_status->freq = 0; 2401 rx_status->rate_idx = 0; 2402 rx_status->nss = 0; 2403 rx_status->encoding = RX_ENC_LEGACY; 2404 rx_status->bw = RATE_INFO_BW_20; 2405 2406 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 2407 2408 meta_data = ath11k_dp_rx_h_msdu_start_freq(ar->ab, rx_desc); 2409 channel_num = meta_data; 2410 center_freq = meta_data >> 16; 2411 2412 if (center_freq >= ATH11K_MIN_6G_FREQ && 2413 center_freq <= ATH11K_MAX_6G_FREQ) { 2414 rx_status->band = NL80211_BAND_6GHZ; 2415 rx_status->freq = center_freq; 2416 } else if (channel_num >= 1 && channel_num <= 14) { 2417 rx_status->band = NL80211_BAND_2GHZ; 2418 } else if (channel_num >= 36 && channel_num <= 173) { 2419 rx_status->band = NL80211_BAND_5GHZ; 2420 } else { 2421 spin_lock_bh(&ar->data_lock); 2422 channel = ar->rx_channel; 2423 if (channel) { 2424 rx_status->band = channel->band; 2425 channel_num = 2426 ieee80211_frequency_to_channel(channel->center_freq); 2427 } 2428 spin_unlock_bh(&ar->data_lock); 2429 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ", 2430 rx_desc, sizeof(struct hal_rx_desc)); 2431 } 2432 2433 if (rx_status->band != NL80211_BAND_6GHZ) 2434 rx_status->freq = ieee80211_channel_to_frequency(channel_num, 2435 rx_status->band); 2436 2437 ath11k_dp_rx_h_rate(ar, rx_desc, rx_status); 2438 } 2439 2440 static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi, 2441 struct sk_buff *msdu, 2442 struct ieee80211_rx_status *status) 2443 { 2444 static const struct ieee80211_radiotap_he known = { 2445 .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | 2446 IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN), 2447 .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN), 2448 }; 2449 struct ieee80211_rx_status *rx_status; 2450 struct ieee80211_radiotap_he *he = NULL; 2451 struct ieee80211_sta *pubsta = NULL; 2452 struct ath11k_peer *peer; 2453 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 2454 u8 decap = DP_RX_DECAP_TYPE_RAW; 2455 bool is_mcbc = rxcb->is_mcbc; 2456 bool is_eapol = rxcb->is_eapol; 2457 2458 if (status->encoding == RX_ENC_HE && 2459 !(status->flag & RX_FLAG_RADIOTAP_HE) && 2460 !(status->flag & RX_FLAG_SKIP_MONITOR)) { 2461 he = skb_push(msdu, sizeof(known)); 2462 memcpy(he, &known, sizeof(known)); 2463 status->flag |= RX_FLAG_RADIOTAP_HE; 2464 } 2465 2466 if (!(status->flag & RX_FLAG_ONLY_MONITOR)) 2467 decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rxcb->rx_desc); 2468 2469 spin_lock_bh(&ar->ab->base_lock); 2470 peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu); 2471 if (peer && peer->sta) 2472 pubsta = peer->sta; 2473 spin_unlock_bh(&ar->ab->base_lock); 2474 2475 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 2476 "rx skb %pK len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", 2477 msdu, 2478 msdu->len, 2479 peer ? peer->addr : NULL, 2480 rxcb->tid, 2481 is_mcbc ? "mcast" : "ucast", 2482 rxcb->seq_no, 2483 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", 2484 (status->encoding == RX_ENC_HT) ? "ht" : "", 2485 (status->encoding == RX_ENC_VHT) ? "vht" : "", 2486 (status->encoding == RX_ENC_HE) ? "he" : "", 2487 (status->bw == RATE_INFO_BW_40) ? "40" : "", 2488 (status->bw == RATE_INFO_BW_80) ? "80" : "", 2489 (status->bw == RATE_INFO_BW_160) ? "160" : "", 2490 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", 2491 status->rate_idx, 2492 status->nss, 2493 status->freq, 2494 status->band, status->flag, 2495 !!(status->flag & RX_FLAG_FAILED_FCS_CRC), 2496 !!(status->flag & RX_FLAG_MMIC_ERROR), 2497 !!(status->flag & RX_FLAG_AMSDU_MORE)); 2498 2499 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DP_RX, NULL, "dp rx msdu: ", 2500 msdu->data, msdu->len); 2501 2502 rx_status = IEEE80211_SKB_RXCB(msdu); 2503 *rx_status = *status; 2504 2505 /* TODO: trace rx packet */ 2506 2507 /* PN for multicast packets are not validate in HW, 2508 * so skip 802.3 rx path 2509 * Also, fast_rx expectes the STA to be authorized, hence 2510 * eapol packets are sent in slow path. 2511 */ 2512 if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol && 2513 !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED)) 2514 rx_status->flag |= RX_FLAG_8023; 2515 2516 ieee80211_rx_napi(ar->hw, pubsta, msdu, napi); 2517 } 2518 2519 static int ath11k_dp_rx_process_msdu(struct ath11k *ar, 2520 struct sk_buff *msdu, 2521 struct sk_buff_head *msdu_list, 2522 struct ieee80211_rx_status *rx_status) 2523 { 2524 struct ath11k_base *ab = ar->ab; 2525 struct hal_rx_desc *rx_desc, *lrx_desc; 2526 struct rx_attention *rx_attention; 2527 struct ath11k_skb_rxcb *rxcb; 2528 struct sk_buff *last_buf; 2529 u8 l3_pad_bytes; 2530 u8 *hdr_status; 2531 u16 msdu_len; 2532 int ret; 2533 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 2534 2535 last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu); 2536 if (!last_buf) { 2537 ath11k_warn(ab, 2538 "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n"); 2539 ret = -EIO; 2540 goto free_out; 2541 } 2542 2543 rx_desc = (struct hal_rx_desc *)msdu->data; 2544 if (ath11k_dp_rx_h_attn_msdu_len_err(ab, rx_desc)) { 2545 ath11k_warn(ar->ab, "msdu len not valid\n"); 2546 ret = -EIO; 2547 goto free_out; 2548 } 2549 2550 lrx_desc = (struct hal_rx_desc *)last_buf->data; 2551 rx_attention = ath11k_dp_rx_get_attention(ab, lrx_desc); 2552 if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) { 2553 ath11k_warn(ab, "msdu_done bit in attention is not set\n"); 2554 ret = -EIO; 2555 goto free_out; 2556 } 2557 2558 rxcb = ATH11K_SKB_RXCB(msdu); 2559 rxcb->rx_desc = rx_desc; 2560 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ab, rx_desc); 2561 l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ab, lrx_desc); 2562 2563 if (rxcb->is_frag) { 2564 skb_pull(msdu, hal_rx_desc_sz); 2565 } else if (!rxcb->is_continuation) { 2566 if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) { 2567 hdr_status = ath11k_dp_rx_h_80211_hdr(ab, rx_desc); 2568 ret = -EINVAL; 2569 ath11k_warn(ab, "invalid msdu len %u\n", msdu_len); 2570 ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", hdr_status, 2571 sizeof(struct ieee80211_hdr)); 2572 ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", rx_desc, 2573 sizeof(struct hal_rx_desc)); 2574 goto free_out; 2575 } 2576 skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len); 2577 skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes); 2578 } else { 2579 ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list, 2580 msdu, last_buf, 2581 l3_pad_bytes, msdu_len); 2582 if (ret) { 2583 ath11k_warn(ab, 2584 "failed to coalesce msdu rx buffer%d\n", ret); 2585 goto free_out; 2586 } 2587 } 2588 2589 ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status); 2590 ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status); 2591 2592 rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED; 2593 2594 return 0; 2595 2596 free_out: 2597 return ret; 2598 } 2599 2600 static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab, 2601 struct napi_struct *napi, 2602 struct sk_buff_head *msdu_list, 2603 int mac_id) 2604 { 2605 struct sk_buff *msdu; 2606 struct ath11k *ar; 2607 struct ieee80211_rx_status rx_status = {0}; 2608 int ret; 2609 2610 if (skb_queue_empty(msdu_list)) 2611 return; 2612 2613 if (unlikely(!rcu_access_pointer(ab->pdevs_active[mac_id]))) { 2614 __skb_queue_purge(msdu_list); 2615 return; 2616 } 2617 2618 ar = ab->pdevs[mac_id].ar; 2619 if (unlikely(test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags))) { 2620 __skb_queue_purge(msdu_list); 2621 return; 2622 } 2623 2624 while ((msdu = __skb_dequeue(msdu_list))) { 2625 ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status); 2626 if (unlikely(ret)) { 2627 ath11k_dbg(ab, ATH11K_DBG_DATA, 2628 "Unable to process msdu %d", ret); 2629 dev_kfree_skb_any(msdu); 2630 continue; 2631 } 2632 2633 ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status); 2634 } 2635 } 2636 2637 int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id, 2638 struct napi_struct *napi, int budget) 2639 { 2640 struct ath11k_dp *dp = &ab->dp; 2641 struct dp_rxdma_ring *rx_ring; 2642 int num_buffs_reaped[MAX_RADIOS] = {0}; 2643 struct sk_buff_head msdu_list[MAX_RADIOS]; 2644 struct ath11k_skb_rxcb *rxcb; 2645 int total_msdu_reaped = 0; 2646 struct hal_srng *srng; 2647 struct sk_buff *msdu; 2648 bool done = false; 2649 int buf_id, mac_id; 2650 struct ath11k *ar; 2651 struct hal_reo_dest_ring *desc; 2652 enum hal_reo_dest_ring_push_reason push_reason; 2653 u32 cookie; 2654 int i; 2655 2656 for (i = 0; i < MAX_RADIOS; i++) 2657 __skb_queue_head_init(&msdu_list[i]); 2658 2659 srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id]; 2660 2661 spin_lock_bh(&srng->lock); 2662 2663 ath11k_hal_srng_access_begin(ab, srng); 2664 2665 try_again: 2666 while (likely(desc = 2667 (struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab, 2668 srng))) { 2669 cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, 2670 desc->buf_addr_info.info1); 2671 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 2672 cookie); 2673 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie); 2674 2675 ar = ab->pdevs[mac_id].ar; 2676 rx_ring = &ar->dp.rx_refill_buf_ring; 2677 spin_lock_bh(&rx_ring->idr_lock); 2678 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 2679 if (unlikely(!msdu)) { 2680 ath11k_warn(ab, "frame rx with invalid buf_id %d\n", 2681 buf_id); 2682 spin_unlock_bh(&rx_ring->idr_lock); 2683 continue; 2684 } 2685 2686 idr_remove(&rx_ring->bufs_idr, buf_id); 2687 spin_unlock_bh(&rx_ring->idr_lock); 2688 2689 rxcb = ATH11K_SKB_RXCB(msdu); 2690 dma_unmap_single(ab->dev, rxcb->paddr, 2691 msdu->len + skb_tailroom(msdu), 2692 DMA_FROM_DEVICE); 2693 2694 num_buffs_reaped[mac_id]++; 2695 2696 push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON, 2697 desc->info0); 2698 if (unlikely(push_reason != 2699 HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) { 2700 dev_kfree_skb_any(msdu); 2701 ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++; 2702 continue; 2703 } 2704 2705 rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 & 2706 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU); 2707 rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 & 2708 RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU); 2709 rxcb->is_continuation = !!(desc->rx_msdu_info.info0 & 2710 RX_MSDU_DESC_INFO0_MSDU_CONTINUATION); 2711 rxcb->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID, 2712 desc->rx_mpdu_info.meta_data); 2713 rxcb->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM, 2714 desc->rx_mpdu_info.info0); 2715 rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM, 2716 desc->info0); 2717 2718 rxcb->mac_id = mac_id; 2719 __skb_queue_tail(&msdu_list[mac_id], msdu); 2720 2721 if (rxcb->is_continuation) { 2722 done = false; 2723 } else { 2724 total_msdu_reaped++; 2725 done = true; 2726 } 2727 2728 if (total_msdu_reaped >= budget) 2729 break; 2730 } 2731 2732 /* Hw might have updated the head pointer after we cached it. 2733 * In this case, even though there are entries in the ring we'll 2734 * get rx_desc NULL. Give the read another try with updated cached 2735 * head pointer so that we can reap complete MPDU in the current 2736 * rx processing. 2737 */ 2738 if (unlikely(!done && ath11k_hal_srng_dst_num_free(ab, srng, true))) { 2739 ath11k_hal_srng_access_end(ab, srng); 2740 goto try_again; 2741 } 2742 2743 ath11k_hal_srng_access_end(ab, srng); 2744 2745 spin_unlock_bh(&srng->lock); 2746 2747 if (unlikely(!total_msdu_reaped)) 2748 goto exit; 2749 2750 for (i = 0; i < ab->num_radios; i++) { 2751 if (!num_buffs_reaped[i]) 2752 continue; 2753 2754 ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list[i], i); 2755 2756 ar = ab->pdevs[i].ar; 2757 rx_ring = &ar->dp.rx_refill_buf_ring; 2758 2759 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i], 2760 ab->hw_params.hal_params->rx_buf_rbm); 2761 } 2762 exit: 2763 return total_msdu_reaped; 2764 } 2765 2766 static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta, 2767 struct hal_rx_mon_ppdu_info *ppdu_info) 2768 { 2769 struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats; 2770 u32 num_msdu; 2771 2772 if (!rx_stats) 2773 return; 2774 2775 num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count + 2776 ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count; 2777 2778 rx_stats->num_msdu += num_msdu; 2779 rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count + 2780 ppdu_info->tcp_ack_msdu_count; 2781 rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count; 2782 rx_stats->other_msdu_count += ppdu_info->other_msdu_count; 2783 2784 if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A || 2785 ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) { 2786 ppdu_info->nss = 1; 2787 ppdu_info->mcs = HAL_RX_MAX_MCS; 2788 ppdu_info->tid = IEEE80211_NUM_TIDS; 2789 } 2790 2791 if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS) 2792 rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu; 2793 2794 if (ppdu_info->mcs <= HAL_RX_MAX_MCS) 2795 rx_stats->mcs_count[ppdu_info->mcs] += num_msdu; 2796 2797 if (ppdu_info->gi < HAL_RX_GI_MAX) 2798 rx_stats->gi_count[ppdu_info->gi] += num_msdu; 2799 2800 if (ppdu_info->bw < HAL_RX_BW_MAX) 2801 rx_stats->bw_count[ppdu_info->bw] += num_msdu; 2802 2803 if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX) 2804 rx_stats->coding_count[ppdu_info->ldpc] += num_msdu; 2805 2806 if (ppdu_info->tid <= IEEE80211_NUM_TIDS) 2807 rx_stats->tid_count[ppdu_info->tid] += num_msdu; 2808 2809 if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX) 2810 rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu; 2811 2812 if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX) 2813 rx_stats->reception_type[ppdu_info->reception_type] += num_msdu; 2814 2815 if (ppdu_info->is_stbc) 2816 rx_stats->stbc_count += num_msdu; 2817 2818 if (ppdu_info->beamformed) 2819 rx_stats->beamformed_count += num_msdu; 2820 2821 if (ppdu_info->num_mpdu_fcs_ok > 1) 2822 rx_stats->ampdu_msdu_count += num_msdu; 2823 else 2824 rx_stats->non_ampdu_msdu_count += num_msdu; 2825 2826 rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok; 2827 rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err; 2828 rx_stats->dcm_count += ppdu_info->dcm; 2829 rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu; 2830 2831 arsta->rssi_comb = ppdu_info->rssi_comb; 2832 rx_stats->rx_duration += ppdu_info->rx_duration; 2833 arsta->rx_duration = rx_stats->rx_duration; 2834 } 2835 2836 static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab, 2837 struct dp_rxdma_ring *rx_ring, 2838 int *buf_id) 2839 { 2840 struct sk_buff *skb; 2841 dma_addr_t paddr; 2842 2843 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + 2844 DP_RX_BUFFER_ALIGN_SIZE); 2845 2846 if (!skb) 2847 goto fail_alloc_skb; 2848 2849 if (!IS_ALIGNED((unsigned long)skb->data, 2850 DP_RX_BUFFER_ALIGN_SIZE)) { 2851 skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - 2852 skb->data); 2853 } 2854 2855 paddr = dma_map_single(ab->dev, skb->data, 2856 skb->len + skb_tailroom(skb), 2857 DMA_FROM_DEVICE); 2858 if (unlikely(dma_mapping_error(ab->dev, paddr))) 2859 goto fail_free_skb; 2860 2861 spin_lock_bh(&rx_ring->idr_lock); 2862 *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, 2863 rx_ring->bufs_max, GFP_ATOMIC); 2864 spin_unlock_bh(&rx_ring->idr_lock); 2865 if (*buf_id < 0) 2866 goto fail_dma_unmap; 2867 2868 ATH11K_SKB_RXCB(skb)->paddr = paddr; 2869 return skb; 2870 2871 fail_dma_unmap: 2872 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 2873 DMA_FROM_DEVICE); 2874 fail_free_skb: 2875 dev_kfree_skb_any(skb); 2876 fail_alloc_skb: 2877 return NULL; 2878 } 2879 2880 int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id, 2881 struct dp_rxdma_ring *rx_ring, 2882 int req_entries, 2883 enum hal_rx_buf_return_buf_manager mgr) 2884 { 2885 struct hal_srng *srng; 2886 u32 *desc; 2887 struct sk_buff *skb; 2888 int num_free; 2889 int num_remain; 2890 int buf_id; 2891 u32 cookie; 2892 dma_addr_t paddr; 2893 2894 req_entries = min(req_entries, rx_ring->bufs_max); 2895 2896 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 2897 2898 spin_lock_bh(&srng->lock); 2899 2900 ath11k_hal_srng_access_begin(ab, srng); 2901 2902 num_free = ath11k_hal_srng_src_num_free(ab, srng, true); 2903 2904 req_entries = min(num_free, req_entries); 2905 num_remain = req_entries; 2906 2907 while (num_remain > 0) { 2908 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, 2909 &buf_id); 2910 if (!skb) 2911 break; 2912 paddr = ATH11K_SKB_RXCB(skb)->paddr; 2913 2914 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 2915 if (!desc) 2916 goto fail_desc_get; 2917 2918 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 2919 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 2920 2921 num_remain--; 2922 2923 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); 2924 } 2925 2926 ath11k_hal_srng_access_end(ab, srng); 2927 2928 spin_unlock_bh(&srng->lock); 2929 2930 return req_entries - num_remain; 2931 2932 fail_desc_get: 2933 spin_lock_bh(&rx_ring->idr_lock); 2934 idr_remove(&rx_ring->bufs_idr, buf_id); 2935 spin_unlock_bh(&rx_ring->idr_lock); 2936 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 2937 DMA_FROM_DEVICE); 2938 dev_kfree_skb_any(skb); 2939 ath11k_hal_srng_access_end(ab, srng); 2940 spin_unlock_bh(&srng->lock); 2941 2942 return req_entries - num_remain; 2943 } 2944 2945 static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id, 2946 int *budget, struct sk_buff_head *skb_list) 2947 { 2948 struct ath11k *ar; 2949 const struct ath11k_hw_hal_params *hal_params; 2950 struct ath11k_pdev_dp *dp; 2951 struct dp_rxdma_ring *rx_ring; 2952 struct hal_srng *srng; 2953 void *rx_mon_status_desc; 2954 struct sk_buff *skb; 2955 struct ath11k_skb_rxcb *rxcb; 2956 struct hal_tlv_hdr *tlv; 2957 u32 cookie; 2958 int buf_id, srng_id; 2959 dma_addr_t paddr; 2960 u8 rbm; 2961 int num_buffs_reaped = 0; 2962 2963 ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar; 2964 dp = &ar->dp; 2965 srng_id = ath11k_hw_mac_id_to_srng_id(&ab->hw_params, mac_id); 2966 rx_ring = &dp->rx_mon_status_refill_ring[srng_id]; 2967 2968 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 2969 2970 spin_lock_bh(&srng->lock); 2971 2972 ath11k_hal_srng_access_begin(ab, srng); 2973 while (*budget) { 2974 *budget -= 1; 2975 rx_mon_status_desc = 2976 ath11k_hal_srng_src_peek(ab, srng); 2977 if (!rx_mon_status_desc) 2978 break; 2979 2980 ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr, 2981 &cookie, &rbm); 2982 if (paddr) { 2983 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie); 2984 2985 spin_lock_bh(&rx_ring->idr_lock); 2986 skb = idr_find(&rx_ring->bufs_idr, buf_id); 2987 if (!skb) { 2988 ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n", 2989 buf_id); 2990 spin_unlock_bh(&rx_ring->idr_lock); 2991 goto move_next; 2992 } 2993 2994 idr_remove(&rx_ring->bufs_idr, buf_id); 2995 spin_unlock_bh(&rx_ring->idr_lock); 2996 2997 rxcb = ATH11K_SKB_RXCB(skb); 2998 2999 dma_unmap_single(ab->dev, rxcb->paddr, 3000 skb->len + skb_tailroom(skb), 3001 DMA_FROM_DEVICE); 3002 3003 tlv = (struct hal_tlv_hdr *)skb->data; 3004 if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != 3005 HAL_RX_STATUS_BUFFER_DONE) { 3006 ath11k_warn(ab, "mon status DONE not set %lx\n", 3007 FIELD_GET(HAL_TLV_HDR_TAG, 3008 tlv->tl)); 3009 dev_kfree_skb_any(skb); 3010 goto move_next; 3011 } 3012 3013 __skb_queue_tail(skb_list, skb); 3014 } 3015 move_next: 3016 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, 3017 &buf_id); 3018 3019 if (!skb) { 3020 hal_params = ab->hw_params.hal_params; 3021 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0, 3022 hal_params->rx_buf_rbm); 3023 num_buffs_reaped++; 3024 break; 3025 } 3026 rxcb = ATH11K_SKB_RXCB(skb); 3027 3028 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 3029 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 3030 3031 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr, 3032 cookie, 3033 ab->hw_params.hal_params->rx_buf_rbm); 3034 ath11k_hal_srng_src_get_next_entry(ab, srng); 3035 num_buffs_reaped++; 3036 } 3037 ath11k_hal_srng_access_end(ab, srng); 3038 spin_unlock_bh(&srng->lock); 3039 3040 return num_buffs_reaped; 3041 } 3042 3043 int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id, 3044 struct napi_struct *napi, int budget) 3045 { 3046 struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id); 3047 enum hal_rx_mon_status hal_status; 3048 struct sk_buff *skb; 3049 struct sk_buff_head skb_list; 3050 struct hal_rx_mon_ppdu_info ppdu_info; 3051 struct ath11k_peer *peer; 3052 struct ath11k_sta *arsta; 3053 int num_buffs_reaped = 0; 3054 u32 rx_buf_sz; 3055 u16 log_type = 0; 3056 3057 __skb_queue_head_init(&skb_list); 3058 3059 num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget, 3060 &skb_list); 3061 if (!num_buffs_reaped) 3062 goto exit; 3063 3064 while ((skb = __skb_dequeue(&skb_list))) { 3065 memset(&ppdu_info, 0, sizeof(ppdu_info)); 3066 ppdu_info.peer_id = HAL_INVALID_PEERID; 3067 3068 if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) { 3069 log_type = ATH11K_PKTLOG_TYPE_LITE_RX; 3070 rx_buf_sz = DP_RX_BUFFER_SIZE_LITE; 3071 } else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) { 3072 log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF; 3073 rx_buf_sz = DP_RX_BUFFER_SIZE; 3074 } 3075 3076 if (log_type) 3077 trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz); 3078 3079 hal_status = ath11k_hal_rx_parse_mon_status(ab, &ppdu_info, skb); 3080 3081 if (ppdu_info.peer_id == HAL_INVALID_PEERID || 3082 hal_status != HAL_RX_MON_STATUS_PPDU_DONE) { 3083 dev_kfree_skb_any(skb); 3084 continue; 3085 } 3086 3087 rcu_read_lock(); 3088 spin_lock_bh(&ab->base_lock); 3089 peer = ath11k_peer_find_by_id(ab, ppdu_info.peer_id); 3090 3091 if (!peer || !peer->sta) { 3092 ath11k_dbg(ab, ATH11K_DBG_DATA, 3093 "failed to find the peer with peer_id %d\n", 3094 ppdu_info.peer_id); 3095 spin_unlock_bh(&ab->base_lock); 3096 rcu_read_unlock(); 3097 dev_kfree_skb_any(skb); 3098 continue; 3099 } 3100 3101 arsta = (struct ath11k_sta *)peer->sta->drv_priv; 3102 ath11k_dp_rx_update_peer_stats(arsta, &ppdu_info); 3103 3104 if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr)) 3105 trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz); 3106 3107 spin_unlock_bh(&ab->base_lock); 3108 rcu_read_unlock(); 3109 3110 dev_kfree_skb_any(skb); 3111 } 3112 exit: 3113 return num_buffs_reaped; 3114 } 3115 3116 static void ath11k_dp_rx_frag_timer(struct timer_list *timer) 3117 { 3118 struct dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer); 3119 3120 spin_lock_bh(&rx_tid->ab->base_lock); 3121 if (rx_tid->last_frag_no && 3122 rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) { 3123 spin_unlock_bh(&rx_tid->ab->base_lock); 3124 return; 3125 } 3126 ath11k_dp_rx_frags_cleanup(rx_tid, true); 3127 spin_unlock_bh(&rx_tid->ab->base_lock); 3128 } 3129 3130 int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id) 3131 { 3132 struct ath11k_base *ab = ar->ab; 3133 struct crypto_shash *tfm; 3134 struct ath11k_peer *peer; 3135 struct dp_rx_tid *rx_tid; 3136 int i; 3137 3138 tfm = crypto_alloc_shash("michael_mic", 0, 0); 3139 if (IS_ERR(tfm)) 3140 return PTR_ERR(tfm); 3141 3142 spin_lock_bh(&ab->base_lock); 3143 3144 peer = ath11k_peer_find(ab, vdev_id, peer_mac); 3145 if (!peer) { 3146 ath11k_warn(ab, "failed to find the peer to set up fragment info\n"); 3147 spin_unlock_bh(&ab->base_lock); 3148 return -ENOENT; 3149 } 3150 3151 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 3152 rx_tid = &peer->rx_tid[i]; 3153 rx_tid->ab = ab; 3154 timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0); 3155 skb_queue_head_init(&rx_tid->rx_frags); 3156 } 3157 3158 peer->tfm_mmic = tfm; 3159 spin_unlock_bh(&ab->base_lock); 3160 3161 return 0; 3162 } 3163 3164 static int ath11k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key, 3165 struct ieee80211_hdr *hdr, u8 *data, 3166 size_t data_len, u8 *mic) 3167 { 3168 SHASH_DESC_ON_STACK(desc, tfm); 3169 u8 mic_hdr[16] = {0}; 3170 u8 tid = 0; 3171 int ret; 3172 3173 if (!tfm) 3174 return -EINVAL; 3175 3176 desc->tfm = tfm; 3177 3178 ret = crypto_shash_setkey(tfm, key, 8); 3179 if (ret) 3180 goto out; 3181 3182 ret = crypto_shash_init(desc); 3183 if (ret) 3184 goto out; 3185 3186 /* TKIP MIC header */ 3187 memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN); 3188 memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN); 3189 if (ieee80211_is_data_qos(hdr->frame_control)) 3190 tid = ieee80211_get_tid(hdr); 3191 mic_hdr[12] = tid; 3192 3193 ret = crypto_shash_update(desc, mic_hdr, 16); 3194 if (ret) 3195 goto out; 3196 ret = crypto_shash_update(desc, data, data_len); 3197 if (ret) 3198 goto out; 3199 ret = crypto_shash_final(desc, mic); 3200 out: 3201 shash_desc_zero(desc); 3202 return ret; 3203 } 3204 3205 static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer *peer, 3206 struct sk_buff *msdu) 3207 { 3208 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data; 3209 struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu); 3210 struct ieee80211_key_conf *key_conf; 3211 struct ieee80211_hdr *hdr; 3212 u8 mic[IEEE80211_CCMP_MIC_LEN]; 3213 int head_len, tail_len, ret; 3214 size_t data_len; 3215 u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 3216 u8 *key, *data; 3217 u8 key_idx; 3218 3219 if (ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc) != 3220 HAL_ENCRYPT_TYPE_TKIP_MIC) 3221 return 0; 3222 3223 hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz); 3224 hdr_len = ieee80211_hdrlen(hdr->frame_control); 3225 head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN; 3226 tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN; 3227 3228 if (!is_multicast_ether_addr(hdr->addr1)) 3229 key_idx = peer->ucast_keyidx; 3230 else 3231 key_idx = peer->mcast_keyidx; 3232 3233 key_conf = peer->keys[key_idx]; 3234 3235 data = msdu->data + head_len; 3236 data_len = msdu->len - head_len - tail_len; 3237 key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; 3238 3239 ret = ath11k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic); 3240 if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN)) 3241 goto mic_fail; 3242 3243 return 0; 3244 3245 mic_fail: 3246 (ATH11K_SKB_RXCB(msdu))->is_first_msdu = true; 3247 (ATH11K_SKB_RXCB(msdu))->is_last_msdu = true; 3248 3249 rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED | 3250 RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED; 3251 skb_pull(msdu, hal_rx_desc_sz); 3252 3253 ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs); 3254 ath11k_dp_rx_h_undecap(ar, msdu, rx_desc, 3255 HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true); 3256 ieee80211_rx(ar->hw, msdu); 3257 return -EINVAL; 3258 } 3259 3260 static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu, 3261 enum hal_encrypt_type enctype, u32 flags) 3262 { 3263 struct ieee80211_hdr *hdr; 3264 size_t hdr_len; 3265 size_t crypto_len; 3266 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 3267 3268 if (!flags) 3269 return; 3270 3271 hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz); 3272 3273 if (flags & RX_FLAG_MIC_STRIPPED) 3274 skb_trim(msdu, msdu->len - 3275 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 3276 3277 if (flags & RX_FLAG_ICV_STRIPPED) 3278 skb_trim(msdu, msdu->len - 3279 ath11k_dp_rx_crypto_icv_len(ar, enctype)); 3280 3281 if (flags & RX_FLAG_IV_STRIPPED) { 3282 hdr_len = ieee80211_hdrlen(hdr->frame_control); 3283 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 3284 3285 memmove((void *)msdu->data + hal_rx_desc_sz + crypto_len, 3286 (void *)msdu->data + hal_rx_desc_sz, hdr_len); 3287 skb_pull(msdu, crypto_len); 3288 } 3289 } 3290 3291 static int ath11k_dp_rx_h_defrag(struct ath11k *ar, 3292 struct ath11k_peer *peer, 3293 struct dp_rx_tid *rx_tid, 3294 struct sk_buff **defrag_skb) 3295 { 3296 struct hal_rx_desc *rx_desc; 3297 struct sk_buff *skb, *first_frag, *last_frag; 3298 struct ieee80211_hdr *hdr; 3299 struct rx_attention *rx_attention; 3300 enum hal_encrypt_type enctype; 3301 bool is_decrypted = false; 3302 int msdu_len = 0; 3303 int extra_space; 3304 u32 flags, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 3305 3306 first_frag = skb_peek(&rx_tid->rx_frags); 3307 last_frag = skb_peek_tail(&rx_tid->rx_frags); 3308 3309 skb_queue_walk(&rx_tid->rx_frags, skb) { 3310 flags = 0; 3311 rx_desc = (struct hal_rx_desc *)skb->data; 3312 hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz); 3313 3314 enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc); 3315 if (enctype != HAL_ENCRYPT_TYPE_OPEN) { 3316 rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc); 3317 is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention); 3318 } 3319 3320 if (is_decrypted) { 3321 if (skb != first_frag) 3322 flags |= RX_FLAG_IV_STRIPPED; 3323 if (skb != last_frag) 3324 flags |= RX_FLAG_ICV_STRIPPED | 3325 RX_FLAG_MIC_STRIPPED; 3326 } 3327 3328 /* RX fragments are always raw packets */ 3329 if (skb != last_frag) 3330 skb_trim(skb, skb->len - FCS_LEN); 3331 ath11k_dp_rx_h_undecap_frag(ar, skb, enctype, flags); 3332 3333 if (skb != first_frag) 3334 skb_pull(skb, hal_rx_desc_sz + 3335 ieee80211_hdrlen(hdr->frame_control)); 3336 msdu_len += skb->len; 3337 } 3338 3339 extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag)); 3340 if (extra_space > 0 && 3341 (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0)) 3342 return -ENOMEM; 3343 3344 __skb_unlink(first_frag, &rx_tid->rx_frags); 3345 while ((skb = __skb_dequeue(&rx_tid->rx_frags))) { 3346 skb_put_data(first_frag, skb->data, skb->len); 3347 dev_kfree_skb_any(skb); 3348 } 3349 3350 hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz); 3351 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); 3352 ATH11K_SKB_RXCB(first_frag)->is_frag = 1; 3353 3354 if (ath11k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag)) 3355 first_frag = NULL; 3356 3357 *defrag_skb = first_frag; 3358 return 0; 3359 } 3360 3361 static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_tid *rx_tid, 3362 struct sk_buff *defrag_skb) 3363 { 3364 struct ath11k_base *ab = ar->ab; 3365 struct ath11k_pdev_dp *dp = &ar->dp; 3366 struct dp_rxdma_ring *rx_refill_ring = &dp->rx_refill_buf_ring; 3367 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data; 3368 struct hal_reo_entrance_ring *reo_ent_ring; 3369 struct hal_reo_dest_ring *reo_dest_ring; 3370 struct dp_link_desc_bank *link_desc_banks; 3371 struct hal_rx_msdu_link *msdu_link; 3372 struct hal_rx_msdu_details *msdu0; 3373 struct hal_srng *srng; 3374 dma_addr_t paddr; 3375 u32 desc_bank, msdu_info, mpdu_info; 3376 u32 dst_idx, cookie, hal_rx_desc_sz; 3377 int ret, buf_id; 3378 3379 hal_rx_desc_sz = ab->hw_params.hal_desc_sz; 3380 link_desc_banks = ab->dp.link_desc_banks; 3381 reo_dest_ring = rx_tid->dst_ring_desc; 3382 3383 ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank); 3384 msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr + 3385 (paddr - link_desc_banks[desc_bank].paddr)); 3386 msdu0 = &msdu_link->msdu_link[0]; 3387 dst_idx = FIELD_GET(RX_MSDU_DESC_INFO0_REO_DEST_IND, msdu0->rx_msdu_info.info0); 3388 memset(msdu0, 0, sizeof(*msdu0)); 3389 3390 msdu_info = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1) | 3391 FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1) | 3392 FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION, 0) | 3393 FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH, 3394 defrag_skb->len - hal_rx_desc_sz) | 3395 FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND, dst_idx) | 3396 FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA, 1) | 3397 FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA, 1); 3398 msdu0->rx_msdu_info.info0 = msdu_info; 3399 3400 /* change msdu len in hal rx desc */ 3401 ath11k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz); 3402 3403 paddr = dma_map_single(ab->dev, defrag_skb->data, 3404 defrag_skb->len + skb_tailroom(defrag_skb), 3405 DMA_TO_DEVICE); 3406 if (dma_mapping_error(ab->dev, paddr)) 3407 return -ENOMEM; 3408 3409 spin_lock_bh(&rx_refill_ring->idr_lock); 3410 buf_id = idr_alloc(&rx_refill_ring->bufs_idr, defrag_skb, 0, 3411 rx_refill_ring->bufs_max * 3, GFP_ATOMIC); 3412 spin_unlock_bh(&rx_refill_ring->idr_lock); 3413 if (buf_id < 0) { 3414 ret = -ENOMEM; 3415 goto err_unmap_dma; 3416 } 3417 3418 ATH11K_SKB_RXCB(defrag_skb)->paddr = paddr; 3419 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, dp->mac_id) | 3420 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 3421 3422 ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie, 3423 ab->hw_params.hal_params->rx_buf_rbm); 3424 3425 /* Fill mpdu details into reo entrace ring */ 3426 srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id]; 3427 3428 spin_lock_bh(&srng->lock); 3429 ath11k_hal_srng_access_begin(ab, srng); 3430 3431 reo_ent_ring = (struct hal_reo_entrance_ring *) 3432 ath11k_hal_srng_src_get_next_entry(ab, srng); 3433 if (!reo_ent_ring) { 3434 ath11k_hal_srng_access_end(ab, srng); 3435 spin_unlock_bh(&srng->lock); 3436 ret = -ENOSPC; 3437 goto err_free_idr; 3438 } 3439 memset(reo_ent_ring, 0, sizeof(*reo_ent_ring)); 3440 3441 ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank); 3442 ath11k_hal_rx_buf_addr_info_set(reo_ent_ring, paddr, desc_bank, 3443 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST); 3444 3445 mpdu_info = FIELD_PREP(RX_MPDU_DESC_INFO0_MSDU_COUNT, 1) | 3446 FIELD_PREP(RX_MPDU_DESC_INFO0_SEQ_NUM, rx_tid->cur_sn) | 3447 FIELD_PREP(RX_MPDU_DESC_INFO0_FRAG_FLAG, 0) | 3448 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_SA, 1) | 3449 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_DA, 1) | 3450 FIELD_PREP(RX_MPDU_DESC_INFO0_RAW_MPDU, 1) | 3451 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_PN, 1); 3452 3453 reo_ent_ring->rx_mpdu_info.info0 = mpdu_info; 3454 reo_ent_ring->rx_mpdu_info.meta_data = reo_dest_ring->rx_mpdu_info.meta_data; 3455 reo_ent_ring->queue_addr_lo = reo_dest_ring->queue_addr_lo; 3456 reo_ent_ring->info0 = FIELD_PREP(HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI, 3457 FIELD_GET(HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI, 3458 reo_dest_ring->info0)) | 3459 FIELD_PREP(HAL_REO_ENTR_RING_INFO0_DEST_IND, dst_idx); 3460 ath11k_hal_srng_access_end(ab, srng); 3461 spin_unlock_bh(&srng->lock); 3462 3463 return 0; 3464 3465 err_free_idr: 3466 spin_lock_bh(&rx_refill_ring->idr_lock); 3467 idr_remove(&rx_refill_ring->bufs_idr, buf_id); 3468 spin_unlock_bh(&rx_refill_ring->idr_lock); 3469 err_unmap_dma: 3470 dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb), 3471 DMA_TO_DEVICE); 3472 return ret; 3473 } 3474 3475 static int ath11k_dp_rx_h_cmp_frags(struct ath11k *ar, 3476 struct sk_buff *a, struct sk_buff *b) 3477 { 3478 int frag1, frag2; 3479 3480 frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, a); 3481 frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, b); 3482 3483 return frag1 - frag2; 3484 } 3485 3486 static void ath11k_dp_rx_h_sort_frags(struct ath11k *ar, 3487 struct sk_buff_head *frag_list, 3488 struct sk_buff *cur_frag) 3489 { 3490 struct sk_buff *skb; 3491 int cmp; 3492 3493 skb_queue_walk(frag_list, skb) { 3494 cmp = ath11k_dp_rx_h_cmp_frags(ar, skb, cur_frag); 3495 if (cmp < 0) 3496 continue; 3497 __skb_queue_before(frag_list, skb, cur_frag); 3498 return; 3499 } 3500 __skb_queue_tail(frag_list, cur_frag); 3501 } 3502 3503 static u64 ath11k_dp_rx_h_get_pn(struct ath11k *ar, struct sk_buff *skb) 3504 { 3505 struct ieee80211_hdr *hdr; 3506 u64 pn = 0; 3507 u8 *ehdr; 3508 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 3509 3510 hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz); 3511 ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control); 3512 3513 pn = ehdr[0]; 3514 pn |= (u64)ehdr[1] << 8; 3515 pn |= (u64)ehdr[4] << 16; 3516 pn |= (u64)ehdr[5] << 24; 3517 pn |= (u64)ehdr[6] << 32; 3518 pn |= (u64)ehdr[7] << 40; 3519 3520 return pn; 3521 } 3522 3523 static bool 3524 ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k *ar, struct dp_rx_tid *rx_tid) 3525 { 3526 enum hal_encrypt_type encrypt_type; 3527 struct sk_buff *first_frag, *skb; 3528 struct hal_rx_desc *desc; 3529 u64 last_pn; 3530 u64 cur_pn; 3531 3532 first_frag = skb_peek(&rx_tid->rx_frags); 3533 desc = (struct hal_rx_desc *)first_frag->data; 3534 3535 encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, desc); 3536 if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 && 3537 encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 && 3538 encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 && 3539 encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256) 3540 return true; 3541 3542 last_pn = ath11k_dp_rx_h_get_pn(ar, first_frag); 3543 skb_queue_walk(&rx_tid->rx_frags, skb) { 3544 if (skb == first_frag) 3545 continue; 3546 3547 cur_pn = ath11k_dp_rx_h_get_pn(ar, skb); 3548 if (cur_pn != last_pn + 1) 3549 return false; 3550 last_pn = cur_pn; 3551 } 3552 return true; 3553 } 3554 3555 static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar, 3556 struct sk_buff *msdu, 3557 u32 *ring_desc) 3558 { 3559 struct ath11k_base *ab = ar->ab; 3560 struct hal_rx_desc *rx_desc; 3561 struct ath11k_peer *peer; 3562 struct dp_rx_tid *rx_tid; 3563 struct sk_buff *defrag_skb = NULL; 3564 u32 peer_id; 3565 u16 seqno, frag_no; 3566 u8 tid; 3567 int ret = 0; 3568 bool more_frags; 3569 bool is_mcbc; 3570 3571 rx_desc = (struct hal_rx_desc *)msdu->data; 3572 peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc); 3573 tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, rx_desc); 3574 seqno = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc); 3575 frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, msdu); 3576 more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(ar->ab, msdu); 3577 is_mcbc = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc); 3578 3579 /* Multicast/Broadcast fragments are not expected */ 3580 if (is_mcbc) 3581 return -EINVAL; 3582 3583 if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(ar->ab, rx_desc) || 3584 !ath11k_dp_rx_h_mpdu_start_fc_valid(ar->ab, rx_desc) || 3585 tid > IEEE80211_NUM_TIDS) 3586 return -EINVAL; 3587 3588 /* received unfragmented packet in reo 3589 * exception ring, this shouldn't happen 3590 * as these packets typically come from 3591 * reo2sw srngs. 3592 */ 3593 if (WARN_ON_ONCE(!frag_no && !more_frags)) 3594 return -EINVAL; 3595 3596 spin_lock_bh(&ab->base_lock); 3597 peer = ath11k_peer_find_by_id(ab, peer_id); 3598 if (!peer) { 3599 ath11k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n", 3600 peer_id); 3601 ret = -ENOENT; 3602 goto out_unlock; 3603 } 3604 rx_tid = &peer->rx_tid[tid]; 3605 3606 if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) || 3607 skb_queue_empty(&rx_tid->rx_frags)) { 3608 /* Flush stored fragments and start a new sequence */ 3609 ath11k_dp_rx_frags_cleanup(rx_tid, true); 3610 rx_tid->cur_sn = seqno; 3611 } 3612 3613 if (rx_tid->rx_frag_bitmap & BIT(frag_no)) { 3614 /* Fragment already present */ 3615 ret = -EINVAL; 3616 goto out_unlock; 3617 } 3618 3619 if (frag_no > __fls(rx_tid->rx_frag_bitmap)) 3620 __skb_queue_tail(&rx_tid->rx_frags, msdu); 3621 else 3622 ath11k_dp_rx_h_sort_frags(ar, &rx_tid->rx_frags, msdu); 3623 3624 rx_tid->rx_frag_bitmap |= BIT(frag_no); 3625 if (!more_frags) 3626 rx_tid->last_frag_no = frag_no; 3627 3628 if (frag_no == 0) { 3629 rx_tid->dst_ring_desc = kmemdup(ring_desc, 3630 sizeof(*rx_tid->dst_ring_desc), 3631 GFP_ATOMIC); 3632 if (!rx_tid->dst_ring_desc) { 3633 ret = -ENOMEM; 3634 goto out_unlock; 3635 } 3636 } else { 3637 ath11k_dp_rx_link_desc_return(ab, ring_desc, 3638 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3639 } 3640 3641 if (!rx_tid->last_frag_no || 3642 rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) { 3643 mod_timer(&rx_tid->frag_timer, jiffies + 3644 ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS); 3645 goto out_unlock; 3646 } 3647 3648 spin_unlock_bh(&ab->base_lock); 3649 del_timer_sync(&rx_tid->frag_timer); 3650 spin_lock_bh(&ab->base_lock); 3651 3652 peer = ath11k_peer_find_by_id(ab, peer_id); 3653 if (!peer) 3654 goto err_frags_cleanup; 3655 3656 if (!ath11k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid)) 3657 goto err_frags_cleanup; 3658 3659 if (ath11k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb)) 3660 goto err_frags_cleanup; 3661 3662 if (!defrag_skb) 3663 goto err_frags_cleanup; 3664 3665 if (ath11k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb)) 3666 goto err_frags_cleanup; 3667 3668 ath11k_dp_rx_frags_cleanup(rx_tid, false); 3669 goto out_unlock; 3670 3671 err_frags_cleanup: 3672 dev_kfree_skb_any(defrag_skb); 3673 ath11k_dp_rx_frags_cleanup(rx_tid, true); 3674 out_unlock: 3675 spin_unlock_bh(&ab->base_lock); 3676 return ret; 3677 } 3678 3679 static int 3680 ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool drop) 3681 { 3682 struct ath11k_pdev_dp *dp = &ar->dp; 3683 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 3684 struct sk_buff *msdu; 3685 struct ath11k_skb_rxcb *rxcb; 3686 struct hal_rx_desc *rx_desc; 3687 u8 *hdr_status; 3688 u16 msdu_len; 3689 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 3690 3691 spin_lock_bh(&rx_ring->idr_lock); 3692 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 3693 if (!msdu) { 3694 ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n", 3695 buf_id); 3696 spin_unlock_bh(&rx_ring->idr_lock); 3697 return -EINVAL; 3698 } 3699 3700 idr_remove(&rx_ring->bufs_idr, buf_id); 3701 spin_unlock_bh(&rx_ring->idr_lock); 3702 3703 rxcb = ATH11K_SKB_RXCB(msdu); 3704 dma_unmap_single(ar->ab->dev, rxcb->paddr, 3705 msdu->len + skb_tailroom(msdu), 3706 DMA_FROM_DEVICE); 3707 3708 if (drop) { 3709 dev_kfree_skb_any(msdu); 3710 return 0; 3711 } 3712 3713 rcu_read_lock(); 3714 if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) { 3715 dev_kfree_skb_any(msdu); 3716 goto exit; 3717 } 3718 3719 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 3720 dev_kfree_skb_any(msdu); 3721 goto exit; 3722 } 3723 3724 rx_desc = (struct hal_rx_desc *)msdu->data; 3725 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, rx_desc); 3726 if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) { 3727 hdr_status = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc); 3728 ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len); 3729 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status, 3730 sizeof(struct ieee80211_hdr)); 3731 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc, 3732 sizeof(struct hal_rx_desc)); 3733 dev_kfree_skb_any(msdu); 3734 goto exit; 3735 } 3736 3737 skb_put(msdu, hal_rx_desc_sz + msdu_len); 3738 3739 if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) { 3740 dev_kfree_skb_any(msdu); 3741 ath11k_dp_rx_link_desc_return(ar->ab, ring_desc, 3742 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3743 } 3744 exit: 3745 rcu_read_unlock(); 3746 return 0; 3747 } 3748 3749 int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi, 3750 int budget) 3751 { 3752 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; 3753 struct dp_link_desc_bank *link_desc_banks; 3754 enum hal_rx_buf_return_buf_manager rbm; 3755 int tot_n_bufs_reaped, quota, ret, i; 3756 int n_bufs_reaped[MAX_RADIOS] = {0}; 3757 struct dp_rxdma_ring *rx_ring; 3758 struct dp_srng *reo_except; 3759 u32 desc_bank, num_msdus; 3760 struct hal_srng *srng; 3761 struct ath11k_dp *dp; 3762 void *link_desc_va; 3763 int buf_id, mac_id; 3764 struct ath11k *ar; 3765 dma_addr_t paddr; 3766 u32 *desc; 3767 bool is_frag; 3768 u8 drop = 0; 3769 3770 tot_n_bufs_reaped = 0; 3771 quota = budget; 3772 3773 dp = &ab->dp; 3774 reo_except = &dp->reo_except_ring; 3775 link_desc_banks = dp->link_desc_banks; 3776 3777 srng = &ab->hal.srng_list[reo_except->ring_id]; 3778 3779 spin_lock_bh(&srng->lock); 3780 3781 ath11k_hal_srng_access_begin(ab, srng); 3782 3783 while (budget && 3784 (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 3785 struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc; 3786 3787 ab->soc_stats.err_ring_pkts++; 3788 ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr, 3789 &desc_bank); 3790 if (ret) { 3791 ath11k_warn(ab, "failed to parse error reo desc %d\n", 3792 ret); 3793 continue; 3794 } 3795 link_desc_va = link_desc_banks[desc_bank].vaddr + 3796 (paddr - link_desc_banks[desc_bank].paddr); 3797 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies, 3798 &rbm); 3799 if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST && 3800 rbm != ab->hw_params.hal_params->rx_buf_rbm) { 3801 ab->soc_stats.invalid_rbm++; 3802 ath11k_warn(ab, "invalid return buffer manager %d\n", rbm); 3803 ath11k_dp_rx_link_desc_return(ab, desc, 3804 HAL_WBM_REL_BM_ACT_REL_MSDU); 3805 continue; 3806 } 3807 3808 is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG); 3809 3810 /* Process only rx fragments with one msdu per link desc below, and drop 3811 * msdu's indicated due to error reasons. 3812 */ 3813 if (!is_frag || num_msdus > 1) { 3814 drop = 1; 3815 /* Return the link desc back to wbm idle list */ 3816 ath11k_dp_rx_link_desc_return(ab, desc, 3817 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3818 } 3819 3820 for (i = 0; i < num_msdus; i++) { 3821 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 3822 msdu_cookies[i]); 3823 3824 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, 3825 msdu_cookies[i]); 3826 3827 ar = ab->pdevs[mac_id].ar; 3828 3829 if (!ath11k_dp_process_rx_err_buf(ar, desc, buf_id, drop)) { 3830 n_bufs_reaped[mac_id]++; 3831 tot_n_bufs_reaped++; 3832 } 3833 } 3834 3835 if (tot_n_bufs_reaped >= quota) { 3836 tot_n_bufs_reaped = quota; 3837 goto exit; 3838 } 3839 3840 budget = quota - tot_n_bufs_reaped; 3841 } 3842 3843 exit: 3844 ath11k_hal_srng_access_end(ab, srng); 3845 3846 spin_unlock_bh(&srng->lock); 3847 3848 for (i = 0; i < ab->num_radios; i++) { 3849 if (!n_bufs_reaped[i]) 3850 continue; 3851 3852 ar = ab->pdevs[i].ar; 3853 rx_ring = &ar->dp.rx_refill_buf_ring; 3854 3855 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i], 3856 ab->hw_params.hal_params->rx_buf_rbm); 3857 } 3858 3859 return tot_n_bufs_reaped; 3860 } 3861 3862 static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar, 3863 int msdu_len, 3864 struct sk_buff_head *msdu_list) 3865 { 3866 struct sk_buff *skb, *tmp; 3867 struct ath11k_skb_rxcb *rxcb; 3868 int n_buffs; 3869 3870 n_buffs = DIV_ROUND_UP(msdu_len, 3871 (DP_RX_BUFFER_SIZE - ar->ab->hw_params.hal_desc_sz)); 3872 3873 skb_queue_walk_safe(msdu_list, skb, tmp) { 3874 rxcb = ATH11K_SKB_RXCB(skb); 3875 if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO && 3876 rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) { 3877 if (!n_buffs) 3878 break; 3879 __skb_unlink(skb, msdu_list); 3880 dev_kfree_skb_any(skb); 3881 n_buffs--; 3882 } 3883 } 3884 } 3885 3886 static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu, 3887 struct ieee80211_rx_status *status, 3888 struct sk_buff_head *msdu_list) 3889 { 3890 u16 msdu_len; 3891 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 3892 struct rx_attention *rx_attention; 3893 u8 l3pad_bytes; 3894 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3895 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 3896 3897 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc); 3898 3899 if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) { 3900 /* First buffer will be freed by the caller, so deduct it's length */ 3901 msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz); 3902 ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list); 3903 return -EINVAL; 3904 } 3905 3906 rx_attention = ath11k_dp_rx_get_attention(ar->ab, desc); 3907 if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) { 3908 ath11k_warn(ar->ab, 3909 "msdu_done bit not set in null_q_des processing\n"); 3910 __skb_queue_purge(msdu_list); 3911 return -EIO; 3912 } 3913 3914 /* Handle NULL queue descriptor violations arising out a missing 3915 * REO queue for a given peer or a given TID. This typically 3916 * may happen if a packet is received on a QOS enabled TID before the 3917 * ADDBA negotiation for that TID, when the TID queue is setup. Or 3918 * it may also happen for MC/BC frames if they are not routed to the 3919 * non-QOS TID queue, in the absence of any other default TID queue. 3920 * This error can show up both in a REO destination or WBM release ring. 3921 */ 3922 3923 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc); 3924 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc); 3925 3926 if (rxcb->is_frag) { 3927 skb_pull(msdu, hal_rx_desc_sz); 3928 } else { 3929 l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc); 3930 3931 if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) 3932 return -EINVAL; 3933 3934 skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); 3935 skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); 3936 } 3937 ath11k_dp_rx_h_ppdu(ar, desc, status); 3938 3939 ath11k_dp_rx_h_mpdu(ar, msdu, desc, status); 3940 3941 rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, desc); 3942 3943 /* Please note that caller will having the access to msdu and completing 3944 * rx with mac80211. Need not worry about cleaning up amsdu_list. 3945 */ 3946 3947 return 0; 3948 } 3949 3950 static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu, 3951 struct ieee80211_rx_status *status, 3952 struct sk_buff_head *msdu_list) 3953 { 3954 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3955 bool drop = false; 3956 3957 ar->ab->soc_stats.reo_error[rxcb->err_code]++; 3958 3959 switch (rxcb->err_code) { 3960 case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO: 3961 if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list)) 3962 drop = true; 3963 break; 3964 case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED: 3965 /* TODO: Do not drop PN failed packets in the driver; 3966 * instead, it is good to drop such packets in mac80211 3967 * after incrementing the replay counters. 3968 */ 3969 fallthrough; 3970 default: 3971 /* TODO: Review other errors and process them to mac80211 3972 * as appropriate. 3973 */ 3974 drop = true; 3975 break; 3976 } 3977 3978 return drop; 3979 } 3980 3981 static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu, 3982 struct ieee80211_rx_status *status) 3983 { 3984 u16 msdu_len; 3985 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 3986 u8 l3pad_bytes; 3987 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3988 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 3989 3990 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc); 3991 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc); 3992 3993 l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc); 3994 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc); 3995 skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); 3996 skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); 3997 3998 ath11k_dp_rx_h_ppdu(ar, desc, status); 3999 4000 status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR | 4001 RX_FLAG_DECRYPTED); 4002 4003 ath11k_dp_rx_h_undecap(ar, msdu, desc, 4004 HAL_ENCRYPT_TYPE_TKIP_MIC, status, false); 4005 } 4006 4007 static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar, struct sk_buff *msdu, 4008 struct ieee80211_rx_status *status) 4009 { 4010 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 4011 bool drop = false; 4012 4013 ar->ab->soc_stats.rxdma_error[rxcb->err_code]++; 4014 4015 switch (rxcb->err_code) { 4016 case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR: 4017 ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status); 4018 break; 4019 default: 4020 /* TODO: Review other rxdma error code to check if anything is 4021 * worth reporting to mac80211 4022 */ 4023 drop = true; 4024 break; 4025 } 4026 4027 return drop; 4028 } 4029 4030 static void ath11k_dp_rx_wbm_err(struct ath11k *ar, 4031 struct napi_struct *napi, 4032 struct sk_buff *msdu, 4033 struct sk_buff_head *msdu_list) 4034 { 4035 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 4036 struct ieee80211_rx_status rxs = {0}; 4037 bool drop = true; 4038 4039 switch (rxcb->err_rel_src) { 4040 case HAL_WBM_REL_SRC_MODULE_REO: 4041 drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list); 4042 break; 4043 case HAL_WBM_REL_SRC_MODULE_RXDMA: 4044 drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs); 4045 break; 4046 default: 4047 /* msdu will get freed */ 4048 break; 4049 } 4050 4051 if (drop) { 4052 dev_kfree_skb_any(msdu); 4053 return; 4054 } 4055 4056 ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs); 4057 } 4058 4059 int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab, 4060 struct napi_struct *napi, int budget) 4061 { 4062 struct ath11k *ar; 4063 struct ath11k_dp *dp = &ab->dp; 4064 struct dp_rxdma_ring *rx_ring; 4065 struct hal_rx_wbm_rel_info err_info; 4066 struct hal_srng *srng; 4067 struct sk_buff *msdu; 4068 struct sk_buff_head msdu_list[MAX_RADIOS]; 4069 struct ath11k_skb_rxcb *rxcb; 4070 u32 *rx_desc; 4071 int buf_id, mac_id; 4072 int num_buffs_reaped[MAX_RADIOS] = {0}; 4073 int total_num_buffs_reaped = 0; 4074 int ret, i; 4075 4076 for (i = 0; i < ab->num_radios; i++) 4077 __skb_queue_head_init(&msdu_list[i]); 4078 4079 srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id]; 4080 4081 spin_lock_bh(&srng->lock); 4082 4083 ath11k_hal_srng_access_begin(ab, srng); 4084 4085 while (budget) { 4086 rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng); 4087 if (!rx_desc) 4088 break; 4089 4090 ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info); 4091 if (ret) { 4092 ath11k_warn(ab, 4093 "failed to parse rx error in wbm_rel ring desc %d\n", 4094 ret); 4095 continue; 4096 } 4097 4098 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie); 4099 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie); 4100 4101 ar = ab->pdevs[mac_id].ar; 4102 rx_ring = &ar->dp.rx_refill_buf_ring; 4103 4104 spin_lock_bh(&rx_ring->idr_lock); 4105 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 4106 if (!msdu) { 4107 ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n", 4108 buf_id, mac_id); 4109 spin_unlock_bh(&rx_ring->idr_lock); 4110 continue; 4111 } 4112 4113 idr_remove(&rx_ring->bufs_idr, buf_id); 4114 spin_unlock_bh(&rx_ring->idr_lock); 4115 4116 rxcb = ATH11K_SKB_RXCB(msdu); 4117 dma_unmap_single(ab->dev, rxcb->paddr, 4118 msdu->len + skb_tailroom(msdu), 4119 DMA_FROM_DEVICE); 4120 4121 num_buffs_reaped[mac_id]++; 4122 total_num_buffs_reaped++; 4123 budget--; 4124 4125 if (err_info.push_reason != 4126 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { 4127 dev_kfree_skb_any(msdu); 4128 continue; 4129 } 4130 4131 rxcb->err_rel_src = err_info.err_rel_src; 4132 rxcb->err_code = err_info.err_code; 4133 rxcb->rx_desc = (struct hal_rx_desc *)msdu->data; 4134 __skb_queue_tail(&msdu_list[mac_id], msdu); 4135 } 4136 4137 ath11k_hal_srng_access_end(ab, srng); 4138 4139 spin_unlock_bh(&srng->lock); 4140 4141 if (!total_num_buffs_reaped) 4142 goto done; 4143 4144 for (i = 0; i < ab->num_radios; i++) { 4145 if (!num_buffs_reaped[i]) 4146 continue; 4147 4148 ar = ab->pdevs[i].ar; 4149 rx_ring = &ar->dp.rx_refill_buf_ring; 4150 4151 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i], 4152 ab->hw_params.hal_params->rx_buf_rbm); 4153 } 4154 4155 rcu_read_lock(); 4156 for (i = 0; i < ab->num_radios; i++) { 4157 if (!rcu_dereference(ab->pdevs_active[i])) { 4158 __skb_queue_purge(&msdu_list[i]); 4159 continue; 4160 } 4161 4162 ar = ab->pdevs[i].ar; 4163 4164 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 4165 __skb_queue_purge(&msdu_list[i]); 4166 continue; 4167 } 4168 4169 while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL) 4170 ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]); 4171 } 4172 rcu_read_unlock(); 4173 done: 4174 return total_num_buffs_reaped; 4175 } 4176 4177 int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget) 4178 { 4179 struct ath11k *ar; 4180 struct dp_srng *err_ring; 4181 struct dp_rxdma_ring *rx_ring; 4182 struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks; 4183 struct hal_srng *srng; 4184 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; 4185 enum hal_rx_buf_return_buf_manager rbm; 4186 enum hal_reo_entr_rxdma_ecode rxdma_err_code; 4187 struct ath11k_skb_rxcb *rxcb; 4188 struct sk_buff *skb; 4189 struct hal_reo_entrance_ring *entr_ring; 4190 void *desc; 4191 int num_buf_freed = 0; 4192 int quota = budget; 4193 dma_addr_t paddr; 4194 u32 desc_bank; 4195 void *link_desc_va; 4196 int num_msdus; 4197 int i; 4198 int buf_id; 4199 4200 ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar; 4201 err_ring = &ar->dp.rxdma_err_dst_ring[ath11k_hw_mac_id_to_srng_id(&ab->hw_params, 4202 mac_id)]; 4203 rx_ring = &ar->dp.rx_refill_buf_ring; 4204 4205 srng = &ab->hal.srng_list[err_ring->ring_id]; 4206 4207 spin_lock_bh(&srng->lock); 4208 4209 ath11k_hal_srng_access_begin(ab, srng); 4210 4211 while (quota-- && 4212 (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 4213 ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank); 4214 4215 entr_ring = (struct hal_reo_entrance_ring *)desc; 4216 rxdma_err_code = 4217 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE, 4218 entr_ring->info1); 4219 ab->soc_stats.rxdma_error[rxdma_err_code]++; 4220 4221 link_desc_va = link_desc_banks[desc_bank].vaddr + 4222 (paddr - link_desc_banks[desc_bank].paddr); 4223 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, 4224 msdu_cookies, &rbm); 4225 4226 for (i = 0; i < num_msdus; i++) { 4227 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 4228 msdu_cookies[i]); 4229 4230 spin_lock_bh(&rx_ring->idr_lock); 4231 skb = idr_find(&rx_ring->bufs_idr, buf_id); 4232 if (!skb) { 4233 ath11k_warn(ab, "rxdma error with invalid buf_id %d\n", 4234 buf_id); 4235 spin_unlock_bh(&rx_ring->idr_lock); 4236 continue; 4237 } 4238 4239 idr_remove(&rx_ring->bufs_idr, buf_id); 4240 spin_unlock_bh(&rx_ring->idr_lock); 4241 4242 rxcb = ATH11K_SKB_RXCB(skb); 4243 dma_unmap_single(ab->dev, rxcb->paddr, 4244 skb->len + skb_tailroom(skb), 4245 DMA_FROM_DEVICE); 4246 dev_kfree_skb_any(skb); 4247 4248 num_buf_freed++; 4249 } 4250 4251 ath11k_dp_rx_link_desc_return(ab, desc, 4252 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 4253 } 4254 4255 ath11k_hal_srng_access_end(ab, srng); 4256 4257 spin_unlock_bh(&srng->lock); 4258 4259 if (num_buf_freed) 4260 ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed, 4261 ab->hw_params.hal_params->rx_buf_rbm); 4262 4263 return budget - quota; 4264 } 4265 4266 void ath11k_dp_process_reo_status(struct ath11k_base *ab) 4267 { 4268 struct ath11k_dp *dp = &ab->dp; 4269 struct hal_srng *srng; 4270 struct dp_reo_cmd *cmd, *tmp; 4271 bool found = false; 4272 u32 *reo_desc; 4273 u16 tag; 4274 struct hal_reo_status reo_status; 4275 4276 srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id]; 4277 4278 memset(&reo_status, 0, sizeof(reo_status)); 4279 4280 spin_lock_bh(&srng->lock); 4281 4282 ath11k_hal_srng_access_begin(ab, srng); 4283 4284 while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 4285 tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc); 4286 4287 switch (tag) { 4288 case HAL_REO_GET_QUEUE_STATS_STATUS: 4289 ath11k_hal_reo_status_queue_stats(ab, reo_desc, 4290 &reo_status); 4291 break; 4292 case HAL_REO_FLUSH_QUEUE_STATUS: 4293 ath11k_hal_reo_flush_queue_status(ab, reo_desc, 4294 &reo_status); 4295 break; 4296 case HAL_REO_FLUSH_CACHE_STATUS: 4297 ath11k_hal_reo_flush_cache_status(ab, reo_desc, 4298 &reo_status); 4299 break; 4300 case HAL_REO_UNBLOCK_CACHE_STATUS: 4301 ath11k_hal_reo_unblk_cache_status(ab, reo_desc, 4302 &reo_status); 4303 break; 4304 case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS: 4305 ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc, 4306 &reo_status); 4307 break; 4308 case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS: 4309 ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc, 4310 &reo_status); 4311 break; 4312 case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS: 4313 ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc, 4314 &reo_status); 4315 break; 4316 default: 4317 ath11k_warn(ab, "Unknown reo status type %d\n", tag); 4318 continue; 4319 } 4320 4321 spin_lock_bh(&dp->reo_cmd_lock); 4322 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 4323 if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) { 4324 found = true; 4325 list_del(&cmd->list); 4326 break; 4327 } 4328 } 4329 spin_unlock_bh(&dp->reo_cmd_lock); 4330 4331 if (found) { 4332 cmd->handler(dp, (void *)&cmd->data, 4333 reo_status.uniform_hdr.cmd_status); 4334 kfree(cmd); 4335 } 4336 4337 found = false; 4338 } 4339 4340 ath11k_hal_srng_access_end(ab, srng); 4341 4342 spin_unlock_bh(&srng->lock); 4343 } 4344 4345 void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id) 4346 { 4347 struct ath11k *ar = ab->pdevs[mac_id].ar; 4348 4349 ath11k_dp_rx_pdev_srng_free(ar); 4350 ath11k_dp_rxdma_pdev_buf_free(ar); 4351 } 4352 4353 int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id) 4354 { 4355 struct ath11k *ar = ab->pdevs[mac_id].ar; 4356 struct ath11k_pdev_dp *dp = &ar->dp; 4357 u32 ring_id; 4358 int i; 4359 int ret; 4360 4361 ret = ath11k_dp_rx_pdev_srng_alloc(ar); 4362 if (ret) { 4363 ath11k_warn(ab, "failed to setup rx srngs\n"); 4364 return ret; 4365 } 4366 4367 ret = ath11k_dp_rxdma_pdev_buf_setup(ar); 4368 if (ret) { 4369 ath11k_warn(ab, "failed to setup rxdma ring\n"); 4370 return ret; 4371 } 4372 4373 ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; 4374 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF); 4375 if (ret) { 4376 ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n", 4377 ret); 4378 return ret; 4379 } 4380 4381 if (ab->hw_params.rx_mac_buf_ring) { 4382 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 4383 ring_id = dp->rx_mac_buf_ring[i].ring_id; 4384 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, 4385 mac_id + i, HAL_RXDMA_BUF); 4386 if (ret) { 4387 ath11k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n", 4388 i, ret); 4389 return ret; 4390 } 4391 } 4392 } 4393 4394 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 4395 ring_id = dp->rxdma_err_dst_ring[i].ring_id; 4396 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, 4397 mac_id + i, HAL_RXDMA_DST); 4398 if (ret) { 4399 ath11k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n", 4400 i, ret); 4401 return ret; 4402 } 4403 } 4404 4405 if (!ab->hw_params.rxdma1_enable) 4406 goto config_refill_ring; 4407 4408 ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id; 4409 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, 4410 mac_id, HAL_RXDMA_MONITOR_BUF); 4411 if (ret) { 4412 ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n", 4413 ret); 4414 return ret; 4415 } 4416 ret = ath11k_dp_tx_htt_srng_setup(ab, 4417 dp->rxdma_mon_dst_ring.ring_id, 4418 mac_id, HAL_RXDMA_MONITOR_DST); 4419 if (ret) { 4420 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n", 4421 ret); 4422 return ret; 4423 } 4424 ret = ath11k_dp_tx_htt_srng_setup(ab, 4425 dp->rxdma_mon_desc_ring.ring_id, 4426 mac_id, HAL_RXDMA_MONITOR_DESC); 4427 if (ret) { 4428 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n", 4429 ret); 4430 return ret; 4431 } 4432 4433 config_refill_ring: 4434 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 4435 ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; 4436 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i, 4437 HAL_RXDMA_MONITOR_STATUS); 4438 if (ret) { 4439 ath11k_warn(ab, 4440 "failed to configure mon_status_refill_ring%d %d\n", 4441 i, ret); 4442 return ret; 4443 } 4444 } 4445 4446 return 0; 4447 } 4448 4449 static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len) 4450 { 4451 if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) { 4452 *frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc); 4453 *total_len -= *frag_len; 4454 } else { 4455 *frag_len = *total_len; 4456 *total_len = 0; 4457 } 4458 } 4459 4460 static 4461 int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar, 4462 void *p_last_buf_addr_info, 4463 u8 mac_id) 4464 { 4465 struct ath11k_pdev_dp *dp = &ar->dp; 4466 struct dp_srng *dp_srng; 4467 void *hal_srng; 4468 void *src_srng_desc; 4469 int ret = 0; 4470 4471 if (ar->ab->hw_params.rxdma1_enable) { 4472 dp_srng = &dp->rxdma_mon_desc_ring; 4473 hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id]; 4474 } else { 4475 dp_srng = &ar->ab->dp.wbm_desc_rel_ring; 4476 hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id]; 4477 } 4478 4479 ath11k_hal_srng_access_begin(ar->ab, hal_srng); 4480 4481 src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng); 4482 4483 if (src_srng_desc) { 4484 struct ath11k_buffer_addr *src_desc = 4485 (struct ath11k_buffer_addr *)src_srng_desc; 4486 4487 *src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info); 4488 } else { 4489 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4490 "Monitor Link Desc Ring %d Full", mac_id); 4491 ret = -ENOMEM; 4492 } 4493 4494 ath11k_hal_srng_access_end(ar->ab, hal_srng); 4495 return ret; 4496 } 4497 4498 static 4499 void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc, 4500 dma_addr_t *paddr, u32 *sw_cookie, 4501 u8 *rbm, 4502 void **pp_buf_addr_info) 4503 { 4504 struct hal_rx_msdu_link *msdu_link = 4505 (struct hal_rx_msdu_link *)rx_msdu_link_desc; 4506 struct ath11k_buffer_addr *buf_addr_info; 4507 4508 buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info; 4509 4510 ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm); 4511 4512 *pp_buf_addr_info = (void *)buf_addr_info; 4513 } 4514 4515 static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len) 4516 { 4517 if (skb->len > len) { 4518 skb_trim(skb, len); 4519 } else { 4520 if (skb_tailroom(skb) < len - skb->len) { 4521 if ((pskb_expand_head(skb, 0, 4522 len - skb->len - skb_tailroom(skb), 4523 GFP_ATOMIC))) { 4524 dev_kfree_skb_any(skb); 4525 return -ENOMEM; 4526 } 4527 } 4528 skb_put(skb, (len - skb->len)); 4529 } 4530 return 0; 4531 } 4532 4533 static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar, 4534 void *msdu_link_desc, 4535 struct hal_rx_msdu_list *msdu_list, 4536 u16 *num_msdus) 4537 { 4538 struct hal_rx_msdu_details *msdu_details = NULL; 4539 struct rx_msdu_desc *msdu_desc_info = NULL; 4540 struct hal_rx_msdu_link *msdu_link = NULL; 4541 int i; 4542 u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1); 4543 u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1); 4544 u8 tmp = 0; 4545 4546 msdu_link = (struct hal_rx_msdu_link *)msdu_link_desc; 4547 msdu_details = &msdu_link->msdu_link[0]; 4548 4549 for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) { 4550 if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR, 4551 msdu_details[i].buf_addr_info.info0) == 0) { 4552 msdu_desc_info = &msdu_details[i - 1].rx_msdu_info; 4553 msdu_desc_info->info0 |= last; 4554 ; 4555 break; 4556 } 4557 msdu_desc_info = &msdu_details[i].rx_msdu_info; 4558 4559 if (!i) 4560 msdu_desc_info->info0 |= first; 4561 else if (i == (HAL_RX_NUM_MSDU_DESC - 1)) 4562 msdu_desc_info->info0 |= last; 4563 msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0; 4564 msdu_list->msdu_info[i].msdu_len = 4565 HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0); 4566 msdu_list->sw_cookie[i] = 4567 FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, 4568 msdu_details[i].buf_addr_info.info1); 4569 tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR, 4570 msdu_details[i].buf_addr_info.info1); 4571 msdu_list->rbm[i] = tmp; 4572 } 4573 *num_msdus = i; 4574 } 4575 4576 static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id, 4577 u32 *rx_bufs_used) 4578 { 4579 u32 ret = 0; 4580 4581 if ((*ppdu_id < msdu_ppdu_id) && 4582 ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) { 4583 *ppdu_id = msdu_ppdu_id; 4584 ret = msdu_ppdu_id; 4585 } else if ((*ppdu_id > msdu_ppdu_id) && 4586 ((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) { 4587 /* mon_dst is behind than mon_status 4588 * skip dst_ring and free it 4589 */ 4590 *rx_bufs_used += 1; 4591 *ppdu_id = msdu_ppdu_id; 4592 ret = msdu_ppdu_id; 4593 } 4594 return ret; 4595 } 4596 4597 static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info, 4598 bool *is_frag, u32 *total_len, 4599 u32 *frag_len, u32 *msdu_cnt) 4600 { 4601 if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) { 4602 if (!*is_frag) { 4603 *total_len = info->msdu_len; 4604 *is_frag = true; 4605 } 4606 ath11k_dp_mon_set_frag_len(total_len, 4607 frag_len); 4608 } else { 4609 if (*is_frag) { 4610 ath11k_dp_mon_set_frag_len(total_len, 4611 frag_len); 4612 } else { 4613 *frag_len = info->msdu_len; 4614 } 4615 *is_frag = false; 4616 *msdu_cnt -= 1; 4617 } 4618 } 4619 4620 static u32 4621 ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id, 4622 void *ring_entry, struct sk_buff **head_msdu, 4623 struct sk_buff **tail_msdu, u32 *npackets, 4624 u32 *ppdu_id) 4625 { 4626 struct ath11k_pdev_dp *dp = &ar->dp; 4627 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4628 struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring; 4629 struct sk_buff *msdu = NULL, *last = NULL; 4630 struct hal_rx_msdu_list msdu_list; 4631 void *p_buf_addr_info, *p_last_buf_addr_info; 4632 struct hal_rx_desc *rx_desc; 4633 void *rx_msdu_link_desc; 4634 dma_addr_t paddr; 4635 u16 num_msdus = 0; 4636 u32 rx_buf_size, rx_pkt_offset, sw_cookie; 4637 u32 rx_bufs_used = 0, i = 0; 4638 u32 msdu_ppdu_id = 0, msdu_cnt = 0; 4639 u32 total_len = 0, frag_len = 0; 4640 bool is_frag, is_first_msdu; 4641 bool drop_mpdu = false; 4642 struct ath11k_skb_rxcb *rxcb; 4643 struct hal_reo_entrance_ring *ent_desc = 4644 (struct hal_reo_entrance_ring *)ring_entry; 4645 int buf_id; 4646 u32 rx_link_buf_info[2]; 4647 u8 rbm; 4648 4649 if (!ar->ab->hw_params.rxdma1_enable) 4650 rx_ring = &dp->rx_refill_buf_ring; 4651 4652 ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr, 4653 &sw_cookie, 4654 &p_last_buf_addr_info, &rbm, 4655 &msdu_cnt); 4656 4657 if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON, 4658 ent_desc->info1) == 4659 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { 4660 u8 rxdma_err = 4661 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE, 4662 ent_desc->info1); 4663 if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR || 4664 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR || 4665 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) { 4666 drop_mpdu = true; 4667 pmon->rx_mon_stats.dest_mpdu_drop++; 4668 } 4669 } 4670 4671 is_frag = false; 4672 is_first_msdu = true; 4673 4674 do { 4675 if (pmon->mon_last_linkdesc_paddr == paddr) { 4676 pmon->rx_mon_stats.dup_mon_linkdesc_cnt++; 4677 return rx_bufs_used; 4678 } 4679 4680 if (ar->ab->hw_params.rxdma1_enable) 4681 rx_msdu_link_desc = 4682 (void *)pmon->link_desc_banks[sw_cookie].vaddr + 4683 (paddr - pmon->link_desc_banks[sw_cookie].paddr); 4684 else 4685 rx_msdu_link_desc = 4686 (void *)ar->ab->dp.link_desc_banks[sw_cookie].vaddr + 4687 (paddr - ar->ab->dp.link_desc_banks[sw_cookie].paddr); 4688 4689 ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list, 4690 &num_msdus); 4691 4692 for (i = 0; i < num_msdus; i++) { 4693 u32 l2_hdr_offset; 4694 4695 if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) { 4696 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4697 "i %d last_cookie %d is same\n", 4698 i, pmon->mon_last_buf_cookie); 4699 drop_mpdu = true; 4700 pmon->rx_mon_stats.dup_mon_buf_cnt++; 4701 continue; 4702 } 4703 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 4704 msdu_list.sw_cookie[i]); 4705 4706 spin_lock_bh(&rx_ring->idr_lock); 4707 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 4708 spin_unlock_bh(&rx_ring->idr_lock); 4709 if (!msdu) { 4710 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4711 "msdu_pop: invalid buf_id %d\n", buf_id); 4712 break; 4713 } 4714 rxcb = ATH11K_SKB_RXCB(msdu); 4715 if (!rxcb->unmapped) { 4716 dma_unmap_single(ar->ab->dev, rxcb->paddr, 4717 msdu->len + 4718 skb_tailroom(msdu), 4719 DMA_FROM_DEVICE); 4720 rxcb->unmapped = 1; 4721 } 4722 if (drop_mpdu) { 4723 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4724 "i %d drop msdu %p *ppdu_id %x\n", 4725 i, msdu, *ppdu_id); 4726 dev_kfree_skb_any(msdu); 4727 msdu = NULL; 4728 goto next_msdu; 4729 } 4730 4731 rx_desc = (struct hal_rx_desc *)msdu->data; 4732 4733 rx_pkt_offset = sizeof(struct hal_rx_desc); 4734 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc); 4735 4736 if (is_first_msdu) { 4737 if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) { 4738 drop_mpdu = true; 4739 dev_kfree_skb_any(msdu); 4740 msdu = NULL; 4741 pmon->mon_last_linkdesc_paddr = paddr; 4742 goto next_msdu; 4743 } 4744 4745 msdu_ppdu_id = 4746 ath11k_dp_rxdesc_get_ppduid(ar->ab, rx_desc); 4747 4748 if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id, 4749 ppdu_id, 4750 &rx_bufs_used)) { 4751 if (rx_bufs_used) { 4752 drop_mpdu = true; 4753 dev_kfree_skb_any(msdu); 4754 msdu = NULL; 4755 goto next_msdu; 4756 } 4757 return rx_bufs_used; 4758 } 4759 pmon->mon_last_linkdesc_paddr = paddr; 4760 is_first_msdu = false; 4761 } 4762 ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i], 4763 &is_frag, &total_len, 4764 &frag_len, &msdu_cnt); 4765 rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len; 4766 4767 ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size); 4768 4769 if (!(*head_msdu)) 4770 *head_msdu = msdu; 4771 else if (last) 4772 last->next = msdu; 4773 4774 last = msdu; 4775 next_msdu: 4776 pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i]; 4777 rx_bufs_used++; 4778 spin_lock_bh(&rx_ring->idr_lock); 4779 idr_remove(&rx_ring->bufs_idr, buf_id); 4780 spin_unlock_bh(&rx_ring->idr_lock); 4781 } 4782 4783 ath11k_hal_rx_buf_addr_info_set(rx_link_buf_info, paddr, sw_cookie, rbm); 4784 4785 ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr, 4786 &sw_cookie, &rbm, 4787 &p_buf_addr_info); 4788 4789 if (ar->ab->hw_params.rxdma1_enable) { 4790 if (ath11k_dp_rx_monitor_link_desc_return(ar, 4791 p_last_buf_addr_info, 4792 dp->mac_id)) 4793 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4794 "dp_rx_monitor_link_desc_return failed"); 4795 } else { 4796 ath11k_dp_rx_link_desc_return(ar->ab, rx_link_buf_info, 4797 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 4798 } 4799 4800 p_last_buf_addr_info = p_buf_addr_info; 4801 4802 } while (paddr && msdu_cnt); 4803 4804 if (last) 4805 last->next = NULL; 4806 4807 *tail_msdu = msdu; 4808 4809 if (msdu_cnt == 0) 4810 *npackets = 1; 4811 4812 return rx_bufs_used; 4813 } 4814 4815 static void ath11k_dp_rx_msdus_set_payload(struct ath11k *ar, struct sk_buff *msdu) 4816 { 4817 u32 rx_pkt_offset, l2_hdr_offset; 4818 4819 rx_pkt_offset = ar->ab->hw_params.hal_desc_sz; 4820 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, 4821 (struct hal_rx_desc *)msdu->data); 4822 skb_pull(msdu, rx_pkt_offset + l2_hdr_offset); 4823 } 4824 4825 static struct sk_buff * 4826 ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, 4827 u32 mac_id, struct sk_buff *head_msdu, 4828 struct sk_buff *last_msdu, 4829 struct ieee80211_rx_status *rxs, bool *fcs_err) 4830 { 4831 struct ath11k_base *ab = ar->ab; 4832 struct sk_buff *msdu, *prev_buf; 4833 u32 wifi_hdr_len; 4834 struct hal_rx_desc *rx_desc; 4835 char *hdr_desc; 4836 u8 *dest, decap_format; 4837 struct ieee80211_hdr_3addr *wh; 4838 struct rx_attention *rx_attention; 4839 u32 err_bitmap; 4840 4841 if (!head_msdu) 4842 goto err_merge_fail; 4843 4844 rx_desc = (struct hal_rx_desc *)head_msdu->data; 4845 rx_attention = ath11k_dp_rx_get_attention(ab, rx_desc); 4846 err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention); 4847 4848 if (err_bitmap & DP_RX_MPDU_ERR_FCS) 4849 *fcs_err = true; 4850 4851 if (ath11k_dp_rxdesc_get_mpdulen_err(rx_attention)) 4852 return NULL; 4853 4854 decap_format = ath11k_dp_rx_h_msdu_start_decap_type(ab, rx_desc); 4855 4856 ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs); 4857 4858 if (decap_format == DP_RX_DECAP_TYPE_RAW) { 4859 ath11k_dp_rx_msdus_set_payload(ar, head_msdu); 4860 4861 prev_buf = head_msdu; 4862 msdu = head_msdu->next; 4863 4864 while (msdu) { 4865 ath11k_dp_rx_msdus_set_payload(ar, msdu); 4866 4867 prev_buf = msdu; 4868 msdu = msdu->next; 4869 } 4870 4871 prev_buf->next = NULL; 4872 4873 skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN); 4874 } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) { 4875 __le16 qos_field; 4876 u8 qos_pkt = 0; 4877 4878 rx_desc = (struct hal_rx_desc *)head_msdu->data; 4879 hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc); 4880 4881 /* Base size */ 4882 wifi_hdr_len = sizeof(struct ieee80211_hdr_3addr); 4883 wh = (struct ieee80211_hdr_3addr *)hdr_desc; 4884 4885 if (ieee80211_is_data_qos(wh->frame_control)) { 4886 struct ieee80211_qos_hdr *qwh = 4887 (struct ieee80211_qos_hdr *)hdr_desc; 4888 4889 qos_field = qwh->qos_ctrl; 4890 qos_pkt = 1; 4891 } 4892 msdu = head_msdu; 4893 4894 while (msdu) { 4895 rx_desc = (struct hal_rx_desc *)msdu->data; 4896 hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc); 4897 4898 if (qos_pkt) { 4899 dest = skb_push(msdu, sizeof(__le16)); 4900 if (!dest) 4901 goto err_merge_fail; 4902 memcpy(dest, hdr_desc, wifi_hdr_len); 4903 memcpy(dest + wifi_hdr_len, 4904 (u8 *)&qos_field, sizeof(__le16)); 4905 } 4906 ath11k_dp_rx_msdus_set_payload(ar, msdu); 4907 prev_buf = msdu; 4908 msdu = msdu->next; 4909 } 4910 dest = skb_put(prev_buf, HAL_RX_FCS_LEN); 4911 if (!dest) 4912 goto err_merge_fail; 4913 4914 ath11k_dbg(ab, ATH11K_DBG_DATA, 4915 "mpdu_buf %pK mpdu_buf->len %u", 4916 prev_buf, prev_buf->len); 4917 } else { 4918 ath11k_dbg(ab, ATH11K_DBG_DATA, 4919 "decap format %d is not supported!\n", 4920 decap_format); 4921 goto err_merge_fail; 4922 } 4923 4924 return head_msdu; 4925 4926 err_merge_fail: 4927 return NULL; 4928 } 4929 4930 static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id, 4931 struct sk_buff *head_msdu, 4932 struct sk_buff *tail_msdu, 4933 struct napi_struct *napi) 4934 { 4935 struct ath11k_pdev_dp *dp = &ar->dp; 4936 struct sk_buff *mon_skb, *skb_next, *header; 4937 struct ieee80211_rx_status *rxs = &dp->rx_status; 4938 bool fcs_err = false; 4939 4940 mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu, 4941 tail_msdu, rxs, &fcs_err); 4942 4943 if (!mon_skb) 4944 goto mon_deliver_fail; 4945 4946 header = mon_skb; 4947 4948 rxs->flag = 0; 4949 4950 if (fcs_err) 4951 rxs->flag = RX_FLAG_FAILED_FCS_CRC; 4952 4953 do { 4954 skb_next = mon_skb->next; 4955 if (!skb_next) 4956 rxs->flag &= ~RX_FLAG_AMSDU_MORE; 4957 else 4958 rxs->flag |= RX_FLAG_AMSDU_MORE; 4959 4960 if (mon_skb == header) { 4961 header = NULL; 4962 rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN; 4963 } else { 4964 rxs->flag |= RX_FLAG_ALLOW_SAME_PN; 4965 } 4966 rxs->flag |= RX_FLAG_ONLY_MONITOR; 4967 4968 ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb, rxs); 4969 mon_skb = skb_next; 4970 } while (mon_skb); 4971 rxs->flag = 0; 4972 4973 return 0; 4974 4975 mon_deliver_fail: 4976 mon_skb = head_msdu; 4977 while (mon_skb) { 4978 skb_next = mon_skb->next; 4979 dev_kfree_skb_any(mon_skb); 4980 mon_skb = skb_next; 4981 } 4982 return -EINVAL; 4983 } 4984 4985 static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id, 4986 u32 quota, struct napi_struct *napi) 4987 { 4988 struct ath11k_pdev_dp *dp = &ar->dp; 4989 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4990 const struct ath11k_hw_hal_params *hal_params; 4991 void *ring_entry; 4992 void *mon_dst_srng; 4993 u32 ppdu_id; 4994 u32 rx_bufs_used; 4995 u32 ring_id; 4996 struct ath11k_pdev_mon_stats *rx_mon_stats; 4997 u32 npackets = 0; 4998 4999 if (ar->ab->hw_params.rxdma1_enable) 5000 ring_id = dp->rxdma_mon_dst_ring.ring_id; 5001 else 5002 ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id; 5003 5004 mon_dst_srng = &ar->ab->hal.srng_list[ring_id]; 5005 5006 if (!mon_dst_srng) { 5007 ath11k_warn(ar->ab, 5008 "HAL Monitor Destination Ring Init Failed -- %pK", 5009 mon_dst_srng); 5010 return; 5011 } 5012 5013 spin_lock_bh(&pmon->mon_lock); 5014 5015 ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng); 5016 5017 ppdu_id = pmon->mon_ppdu_info.ppdu_id; 5018 rx_bufs_used = 0; 5019 rx_mon_stats = &pmon->rx_mon_stats; 5020 5021 while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) { 5022 struct sk_buff *head_msdu, *tail_msdu; 5023 5024 head_msdu = NULL; 5025 tail_msdu = NULL; 5026 5027 rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry, 5028 &head_msdu, 5029 &tail_msdu, 5030 &npackets, &ppdu_id); 5031 5032 if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) { 5033 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 5034 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 5035 "dest_rx: new ppdu_id %x != status ppdu_id %x", 5036 ppdu_id, pmon->mon_ppdu_info.ppdu_id); 5037 break; 5038 } 5039 if (head_msdu && tail_msdu) { 5040 ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu, 5041 tail_msdu, napi); 5042 rx_mon_stats->dest_mpdu_done++; 5043 } 5044 5045 ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab, 5046 mon_dst_srng); 5047 } 5048 ath11k_hal_srng_access_end(ar->ab, mon_dst_srng); 5049 5050 spin_unlock_bh(&pmon->mon_lock); 5051 5052 if (rx_bufs_used) { 5053 rx_mon_stats->dest_ppdu_done++; 5054 hal_params = ar->ab->hw_params.hal_params; 5055 5056 if (ar->ab->hw_params.rxdma1_enable) 5057 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, 5058 &dp->rxdma_mon_buf_ring, 5059 rx_bufs_used, 5060 hal_params->rx_buf_rbm); 5061 else 5062 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, 5063 &dp->rx_refill_buf_ring, 5064 rx_bufs_used, 5065 hal_params->rx_buf_rbm); 5066 } 5067 } 5068 5069 static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar, 5070 int mac_id, u32 quota, 5071 struct napi_struct *napi) 5072 { 5073 struct ath11k_pdev_dp *dp = &ar->dp; 5074 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 5075 struct hal_rx_mon_ppdu_info *ppdu_info; 5076 struct sk_buff *status_skb; 5077 u32 tlv_status = HAL_TLV_STATUS_BUF_DONE; 5078 struct ath11k_pdev_mon_stats *rx_mon_stats; 5079 5080 ppdu_info = &pmon->mon_ppdu_info; 5081 rx_mon_stats = &pmon->rx_mon_stats; 5082 5083 if (pmon->mon_ppdu_status != DP_PPDU_STATUS_START) 5084 return; 5085 5086 while (!skb_queue_empty(&pmon->rx_status_q)) { 5087 status_skb = skb_dequeue(&pmon->rx_status_q); 5088 5089 tlv_status = ath11k_hal_rx_parse_mon_status(ar->ab, ppdu_info, 5090 status_skb); 5091 if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) { 5092 rx_mon_stats->status_ppdu_done++; 5093 pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE; 5094 ath11k_dp_rx_mon_dest_process(ar, mac_id, quota, napi); 5095 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 5096 } 5097 dev_kfree_skb_any(status_skb); 5098 } 5099 } 5100 5101 static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id, 5102 struct napi_struct *napi, int budget) 5103 { 5104 struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id); 5105 struct ath11k_pdev_dp *dp = &ar->dp; 5106 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 5107 int num_buffs_reaped = 0; 5108 5109 num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, mac_id, &budget, 5110 &pmon->rx_status_q); 5111 if (num_buffs_reaped) 5112 ath11k_dp_rx_mon_status_process_tlv(ar, mac_id, budget, napi); 5113 5114 return num_buffs_reaped; 5115 } 5116 5117 int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id, 5118 struct napi_struct *napi, int budget) 5119 { 5120 struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id); 5121 int ret = 0; 5122 5123 if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) 5124 ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget); 5125 else 5126 ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget); 5127 return ret; 5128 } 5129 5130 static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar) 5131 { 5132 struct ath11k_pdev_dp *dp = &ar->dp; 5133 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 5134 5135 skb_queue_head_init(&pmon->rx_status_q); 5136 5137 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 5138 5139 memset(&pmon->rx_mon_stats, 0, 5140 sizeof(pmon->rx_mon_stats)); 5141 return 0; 5142 } 5143 5144 int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar) 5145 { 5146 struct ath11k_pdev_dp *dp = &ar->dp; 5147 struct ath11k_mon_data *pmon = &dp->mon_data; 5148 struct hal_srng *mon_desc_srng = NULL; 5149 struct dp_srng *dp_srng; 5150 int ret = 0; 5151 u32 n_link_desc = 0; 5152 5153 ret = ath11k_dp_rx_pdev_mon_status_attach(ar); 5154 if (ret) { 5155 ath11k_warn(ar->ab, "pdev_mon_status_attach() failed"); 5156 return ret; 5157 } 5158 5159 /* if rxdma1_enable is false, no need to setup 5160 * rxdma_mon_desc_ring. 5161 */ 5162 if (!ar->ab->hw_params.rxdma1_enable) 5163 return 0; 5164 5165 dp_srng = &dp->rxdma_mon_desc_ring; 5166 n_link_desc = dp_srng->size / 5167 ath11k_hal_srng_get_entrysize(ar->ab, HAL_RXDMA_MONITOR_DESC); 5168 mon_desc_srng = 5169 &ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id]; 5170 5171 ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks, 5172 HAL_RXDMA_MONITOR_DESC, mon_desc_srng, 5173 n_link_desc); 5174 if (ret) { 5175 ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed"); 5176 return ret; 5177 } 5178 pmon->mon_last_linkdesc_paddr = 0; 5179 pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1; 5180 spin_lock_init(&pmon->mon_lock); 5181 5182 return 0; 5183 } 5184 5185 static int ath11k_dp_mon_link_free(struct ath11k *ar) 5186 { 5187 struct ath11k_pdev_dp *dp = &ar->dp; 5188 struct ath11k_mon_data *pmon = &dp->mon_data; 5189 5190 ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks, 5191 HAL_RXDMA_MONITOR_DESC, 5192 &dp->rxdma_mon_desc_ring); 5193 return 0; 5194 } 5195 5196 int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar) 5197 { 5198 ath11k_dp_mon_link_free(ar); 5199 return 0; 5200 } 5201 5202 int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab) 5203 { 5204 /* start reap timer */ 5205 mod_timer(&ab->mon_reap_timer, 5206 jiffies + msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL)); 5207 5208 return 0; 5209 } 5210 5211 int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer) 5212 { 5213 int ret; 5214 5215 if (stop_timer) 5216 del_timer_sync(&ab->mon_reap_timer); 5217 5218 /* reap all the monitor related rings */ 5219 ret = ath11k_dp_purge_mon_ring(ab); 5220 if (ret) { 5221 ath11k_warn(ab, "failed to purge dp mon ring: %d\n", ret); 5222 return ret; 5223 } 5224 5225 return 0; 5226 } 5227