1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/ieee80211.h> 7 #include <linux/kernel.h> 8 #include <linux/skbuff.h> 9 #include <crypto/hash.h> 10 #include "core.h" 11 #include "debug.h" 12 #include "debugfs_htt_stats.h" 13 #include "debugfs_sta.h" 14 #include "hal_desc.h" 15 #include "hw.h" 16 #include "dp_rx.h" 17 #include "hal_rx.h" 18 #include "dp_tx.h" 19 #include "peer.h" 20 21 #define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ) 22 23 static u8 *ath11k_dp_rx_h_80211_hdr(struct hal_rx_desc *desc) 24 { 25 return desc->hdr_status; 26 } 27 28 static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct hal_rx_desc *desc) 29 { 30 if (!(__le32_to_cpu(desc->mpdu_start.info1) & 31 RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID)) 32 return HAL_ENCRYPT_TYPE_OPEN; 33 34 return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE, 35 __le32_to_cpu(desc->mpdu_start.info2)); 36 } 37 38 static u8 ath11k_dp_rx_h_msdu_start_decap_type(struct hal_rx_desc *desc) 39 { 40 return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT, 41 __le32_to_cpu(desc->msdu_start.info2)); 42 } 43 44 static u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct hal_rx_desc *desc) 45 { 46 return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT, 47 __le32_to_cpu(desc->msdu_start.info2)); 48 } 49 50 static bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct hal_rx_desc *desc) 51 { 52 return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID, 53 __le32_to_cpu(desc->mpdu_start.info1)); 54 } 55 56 static bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct hal_rx_desc *desc) 57 { 58 return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_FCTRL_VALID, 59 __le32_to_cpu(desc->mpdu_start.info1)); 60 } 61 62 static bool ath11k_dp_rx_h_mpdu_start_more_frags(struct sk_buff *skb) 63 { 64 struct ieee80211_hdr *hdr; 65 66 hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE); 67 return ieee80211_has_morefrags(hdr->frame_control); 68 } 69 70 static u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct sk_buff *skb) 71 { 72 struct ieee80211_hdr *hdr; 73 74 hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE); 75 return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; 76 } 77 78 static u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct hal_rx_desc *desc) 79 { 80 return FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_NUM, 81 __le32_to_cpu(desc->mpdu_start.info1)); 82 } 83 84 static bool ath11k_dp_rx_h_attn_msdu_done(struct hal_rx_desc *desc) 85 { 86 return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE, 87 __le32_to_cpu(desc->attention.info2)); 88 } 89 90 static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct hal_rx_desc *desc) 91 { 92 return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL, 93 __le32_to_cpu(desc->attention.info1)); 94 } 95 96 static bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct hal_rx_desc *desc) 97 { 98 return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL, 99 __le32_to_cpu(desc->attention.info1)); 100 } 101 102 static bool ath11k_dp_rx_h_attn_is_decrypted(struct hal_rx_desc *desc) 103 { 104 return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE, 105 __le32_to_cpu(desc->attention.info2)) == 106 RX_DESC_DECRYPT_STATUS_CODE_OK); 107 } 108 109 static u32 ath11k_dp_rx_h_attn_mpdu_err(struct hal_rx_desc *desc) 110 { 111 u32 info = __le32_to_cpu(desc->attention.info1); 112 u32 errmap = 0; 113 114 if (info & RX_ATTENTION_INFO1_FCS_ERR) 115 errmap |= DP_RX_MPDU_ERR_FCS; 116 117 if (info & RX_ATTENTION_INFO1_DECRYPT_ERR) 118 errmap |= DP_RX_MPDU_ERR_DECRYPT; 119 120 if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR) 121 errmap |= DP_RX_MPDU_ERR_TKIP_MIC; 122 123 if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR) 124 errmap |= DP_RX_MPDU_ERR_AMSDU_ERR; 125 126 if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR) 127 errmap |= DP_RX_MPDU_ERR_OVERFLOW; 128 129 if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR) 130 errmap |= DP_RX_MPDU_ERR_MSDU_LEN; 131 132 if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR) 133 errmap |= DP_RX_MPDU_ERR_MPDU_LEN; 134 135 return errmap; 136 } 137 138 static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct hal_rx_desc *desc) 139 { 140 return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH, 141 __le32_to_cpu(desc->msdu_start.info1)); 142 } 143 144 static u8 ath11k_dp_rx_h_msdu_start_sgi(struct hal_rx_desc *desc) 145 { 146 return FIELD_GET(RX_MSDU_START_INFO3_SGI, 147 __le32_to_cpu(desc->msdu_start.info3)); 148 } 149 150 static u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct hal_rx_desc *desc) 151 { 152 return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS, 153 __le32_to_cpu(desc->msdu_start.info3)); 154 } 155 156 static u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct hal_rx_desc *desc) 157 { 158 return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW, 159 __le32_to_cpu(desc->msdu_start.info3)); 160 } 161 162 static u32 ath11k_dp_rx_h_msdu_start_freq(struct hal_rx_desc *desc) 163 { 164 return __le32_to_cpu(desc->msdu_start.phy_meta_data); 165 } 166 167 static u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct hal_rx_desc *desc) 168 { 169 return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE, 170 __le32_to_cpu(desc->msdu_start.info3)); 171 } 172 173 static u8 ath11k_dp_rx_h_msdu_start_nss(struct hal_rx_desc *desc) 174 { 175 u8 mimo_ss_bitmap = FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP, 176 __le32_to_cpu(desc->msdu_start.info3)); 177 178 return hweight8(mimo_ss_bitmap); 179 } 180 181 static u8 ath11k_dp_rx_h_mpdu_start_tid(struct hal_rx_desc *desc) 182 { 183 return FIELD_GET(RX_MPDU_START_INFO2_TID, 184 __le32_to_cpu(desc->mpdu_start.info2)); 185 } 186 187 static u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct hal_rx_desc *desc) 188 { 189 return __le16_to_cpu(desc->mpdu_start.sw_peer_id); 190 } 191 192 static u8 ath11k_dp_rx_h_msdu_end_l3pad(struct hal_rx_desc *desc) 193 { 194 return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING, 195 __le32_to_cpu(desc->msdu_end.info2)); 196 } 197 198 static bool ath11k_dp_rx_h_msdu_end_first_msdu(struct hal_rx_desc *desc) 199 { 200 return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU, 201 __le32_to_cpu(desc->msdu_end.info2)); 202 } 203 204 static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct hal_rx_desc *desc) 205 { 206 return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU, 207 __le32_to_cpu(desc->msdu_end.info2)); 208 } 209 210 static void ath11k_dp_rx_desc_end_tlv_copy(struct hal_rx_desc *fdesc, 211 struct hal_rx_desc *ldesc) 212 { 213 memcpy((u8 *)&fdesc->msdu_end, (u8 *)&ldesc->msdu_end, 214 sizeof(struct rx_msdu_end)); 215 memcpy((u8 *)&fdesc->attention, (u8 *)&ldesc->attention, 216 sizeof(struct rx_attention)); 217 memcpy((u8 *)&fdesc->mpdu_end, (u8 *)&ldesc->mpdu_end, 218 sizeof(struct rx_mpdu_end)); 219 } 220 221 static u32 ath11k_dp_rxdesc_get_mpdulen_err(struct hal_rx_desc *rx_desc) 222 { 223 struct rx_attention *rx_attn; 224 225 rx_attn = &rx_desc->attention; 226 227 return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR, 228 __le32_to_cpu(rx_attn->info1)); 229 } 230 231 static u32 ath11k_dp_rxdesc_get_decap_format(struct hal_rx_desc *rx_desc) 232 { 233 struct rx_msdu_start *rx_msdu_start; 234 235 rx_msdu_start = &rx_desc->msdu_start; 236 237 return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT, 238 __le32_to_cpu(rx_msdu_start->info2)); 239 } 240 241 static u8 *ath11k_dp_rxdesc_get_80211hdr(struct hal_rx_desc *rx_desc) 242 { 243 u8 *rx_pkt_hdr; 244 245 rx_pkt_hdr = &rx_desc->msdu_payload[0]; 246 247 return rx_pkt_hdr; 248 } 249 250 static bool ath11k_dp_rxdesc_mpdu_valid(struct hal_rx_desc *rx_desc) 251 { 252 u32 tlv_tag; 253 254 tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG, 255 __le32_to_cpu(rx_desc->mpdu_start_tag)); 256 257 return tlv_tag == HAL_RX_MPDU_START; 258 } 259 260 static u32 ath11k_dp_rxdesc_get_ppduid(struct hal_rx_desc *rx_desc) 261 { 262 return __le16_to_cpu(rx_desc->mpdu_start.phy_ppdu_id); 263 } 264 265 static void ath11k_dp_service_mon_ring(struct timer_list *t) 266 { 267 struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer); 268 int i; 269 270 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) 271 ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET); 272 273 mod_timer(&ab->mon_reap_timer, jiffies + 274 msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL)); 275 } 276 277 static int ath11k_dp_purge_mon_ring(struct ath11k_base *ab) 278 { 279 int i, reaped = 0; 280 unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS); 281 282 do { 283 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) 284 reaped += ath11k_dp_rx_process_mon_rings(ab, i, 285 NULL, 286 DP_MON_SERVICE_BUDGET); 287 288 /* nothing more to reap */ 289 if (reaped < DP_MON_SERVICE_BUDGET) 290 return 0; 291 292 } while (time_before(jiffies, timeout)); 293 294 ath11k_warn(ab, "dp mon ring purge timeout"); 295 296 return -ETIMEDOUT; 297 } 298 299 /* Returns number of Rx buffers replenished */ 300 int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id, 301 struct dp_rxdma_ring *rx_ring, 302 int req_entries, 303 enum hal_rx_buf_return_buf_manager mgr) 304 { 305 struct hal_srng *srng; 306 u32 *desc; 307 struct sk_buff *skb; 308 int num_free; 309 int num_remain; 310 int buf_id; 311 u32 cookie; 312 dma_addr_t paddr; 313 314 req_entries = min(req_entries, rx_ring->bufs_max); 315 316 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 317 318 spin_lock_bh(&srng->lock); 319 320 ath11k_hal_srng_access_begin(ab, srng); 321 322 num_free = ath11k_hal_srng_src_num_free(ab, srng, true); 323 if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4)) 324 req_entries = num_free; 325 326 req_entries = min(num_free, req_entries); 327 num_remain = req_entries; 328 329 while (num_remain > 0) { 330 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + 331 DP_RX_BUFFER_ALIGN_SIZE); 332 if (!skb) 333 break; 334 335 if (!IS_ALIGNED((unsigned long)skb->data, 336 DP_RX_BUFFER_ALIGN_SIZE)) { 337 skb_pull(skb, 338 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - 339 skb->data); 340 } 341 342 paddr = dma_map_single(ab->dev, skb->data, 343 skb->len + skb_tailroom(skb), 344 DMA_FROM_DEVICE); 345 if (dma_mapping_error(ab->dev, paddr)) 346 goto fail_free_skb; 347 348 spin_lock_bh(&rx_ring->idr_lock); 349 buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, 350 rx_ring->bufs_max * 3, GFP_ATOMIC); 351 spin_unlock_bh(&rx_ring->idr_lock); 352 if (buf_id < 0) 353 goto fail_dma_unmap; 354 355 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 356 if (!desc) 357 goto fail_idr_remove; 358 359 ATH11K_SKB_RXCB(skb)->paddr = paddr; 360 361 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 362 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 363 364 num_remain--; 365 366 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); 367 } 368 369 ath11k_hal_srng_access_end(ab, srng); 370 371 spin_unlock_bh(&srng->lock); 372 373 return req_entries - num_remain; 374 375 fail_idr_remove: 376 spin_lock_bh(&rx_ring->idr_lock); 377 idr_remove(&rx_ring->bufs_idr, buf_id); 378 spin_unlock_bh(&rx_ring->idr_lock); 379 fail_dma_unmap: 380 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 381 DMA_FROM_DEVICE); 382 fail_free_skb: 383 dev_kfree_skb_any(skb); 384 385 ath11k_hal_srng_access_end(ab, srng); 386 387 spin_unlock_bh(&srng->lock); 388 389 return req_entries - num_remain; 390 } 391 392 static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar, 393 struct dp_rxdma_ring *rx_ring) 394 { 395 struct ath11k_pdev_dp *dp = &ar->dp; 396 struct sk_buff *skb; 397 int buf_id; 398 399 spin_lock_bh(&rx_ring->idr_lock); 400 idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { 401 idr_remove(&rx_ring->bufs_idr, buf_id); 402 /* TODO: Understand where internal driver does this dma_unmap 403 * of rxdma_buffer. 404 */ 405 dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr, 406 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); 407 dev_kfree_skb_any(skb); 408 } 409 410 idr_destroy(&rx_ring->bufs_idr); 411 spin_unlock_bh(&rx_ring->idr_lock); 412 413 /* if rxdma1_enable is false, mon_status_refill_ring 414 * isn't setup, so don't clean. 415 */ 416 if (!ar->ab->hw_params.rxdma1_enable) 417 return 0; 418 419 rx_ring = &dp->rx_mon_status_refill_ring[0]; 420 421 spin_lock_bh(&rx_ring->idr_lock); 422 idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { 423 idr_remove(&rx_ring->bufs_idr, buf_id); 424 /* XXX: Understand where internal driver does this dma_unmap 425 * of rxdma_buffer. 426 */ 427 dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr, 428 skb->len + skb_tailroom(skb), DMA_BIDIRECTIONAL); 429 dev_kfree_skb_any(skb); 430 } 431 432 idr_destroy(&rx_ring->bufs_idr); 433 spin_unlock_bh(&rx_ring->idr_lock); 434 435 return 0; 436 } 437 438 static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar) 439 { 440 struct ath11k_pdev_dp *dp = &ar->dp; 441 struct ath11k_base *ab = ar->ab; 442 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 443 int i; 444 445 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 446 447 rx_ring = &dp->rxdma_mon_buf_ring; 448 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 449 450 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 451 rx_ring = &dp->rx_mon_status_refill_ring[i]; 452 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 453 } 454 455 return 0; 456 } 457 458 static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar, 459 struct dp_rxdma_ring *rx_ring, 460 u32 ringtype) 461 { 462 struct ath11k_pdev_dp *dp = &ar->dp; 463 int num_entries; 464 465 num_entries = rx_ring->refill_buf_ring.size / 466 ath11k_hal_srng_get_entrysize(ar->ab, ringtype); 467 468 rx_ring->bufs_max = num_entries; 469 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries, 470 HAL_RX_BUF_RBM_SW3_BM); 471 return 0; 472 } 473 474 static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar) 475 { 476 struct ath11k_pdev_dp *dp = &ar->dp; 477 struct ath11k_base *ab = ar->ab; 478 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 479 int i; 480 481 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF); 482 483 if (ar->ab->hw_params.rxdma1_enable) { 484 rx_ring = &dp->rxdma_mon_buf_ring; 485 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF); 486 } 487 488 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 489 rx_ring = &dp->rx_mon_status_refill_ring[i]; 490 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS); 491 } 492 493 return 0; 494 } 495 496 static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar) 497 { 498 struct ath11k_pdev_dp *dp = &ar->dp; 499 struct ath11k_base *ab = ar->ab; 500 int i; 501 502 ath11k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring); 503 504 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 505 if (ab->hw_params.rx_mac_buf_ring) 506 ath11k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]); 507 508 ath11k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]); 509 ath11k_dp_srng_cleanup(ab, 510 &dp->rx_mon_status_refill_ring[i].refill_buf_ring); 511 } 512 513 ath11k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring); 514 } 515 516 void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab) 517 { 518 struct ath11k_dp *dp = &ab->dp; 519 int i; 520 521 for (i = 0; i < DP_REO_DST_RING_MAX; i++) 522 ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]); 523 } 524 525 int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab) 526 { 527 struct ath11k_dp *dp = &ab->dp; 528 int ret; 529 int i; 530 531 for (i = 0; i < DP_REO_DST_RING_MAX; i++) { 532 ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring[i], 533 HAL_REO_DST, i, 0, 534 DP_REO_DST_RING_SIZE); 535 if (ret) { 536 ath11k_warn(ab, "failed to setup reo_dst_ring\n"); 537 goto err_reo_cleanup; 538 } 539 } 540 541 return 0; 542 543 err_reo_cleanup: 544 ath11k_dp_pdev_reo_cleanup(ab); 545 546 return ret; 547 } 548 549 static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar) 550 { 551 struct ath11k_pdev_dp *dp = &ar->dp; 552 struct ath11k_base *ab = ar->ab; 553 struct dp_srng *srng = NULL; 554 int i; 555 int ret; 556 557 ret = ath11k_dp_srng_setup(ar->ab, 558 &dp->rx_refill_buf_ring.refill_buf_ring, 559 HAL_RXDMA_BUF, 0, 560 dp->mac_id, DP_RXDMA_BUF_RING_SIZE); 561 if (ret) { 562 ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n"); 563 return ret; 564 } 565 566 if (ar->ab->hw_params.rx_mac_buf_ring) { 567 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 568 ret = ath11k_dp_srng_setup(ar->ab, 569 &dp->rx_mac_buf_ring[i], 570 HAL_RXDMA_BUF, 1, 571 dp->mac_id + i, 1024); 572 if (ret) { 573 ath11k_warn(ar->ab, "failed to setup rx_mac_buf_ring %d\n", 574 i); 575 return ret; 576 } 577 } 578 } 579 580 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 581 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring[i], 582 HAL_RXDMA_DST, 0, dp->mac_id + i, 583 DP_RXDMA_ERR_DST_RING_SIZE); 584 if (ret) { 585 ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring %d\n", i); 586 return ret; 587 } 588 } 589 590 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 591 srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring; 592 ret = ath11k_dp_srng_setup(ar->ab, 593 srng, 594 HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id + i, 595 DP_RXDMA_MON_STATUS_RING_SIZE); 596 if (ret) { 597 ath11k_warn(ar->ab, 598 "failed to setup rx_mon_status_refill_ring %d\n", i); 599 return ret; 600 } 601 } 602 603 /* if rxdma1_enable is false, then it doesn't need 604 * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring 605 * and rxdma_mon_desc_ring. 606 * init reap timer for QCA6390. 607 */ 608 if (!ar->ab->hw_params.rxdma1_enable) { 609 //init mon status buffer reap timer 610 timer_setup(&ar->ab->mon_reap_timer, 611 ath11k_dp_service_mon_ring, 0); 612 return 0; 613 } 614 615 ret = ath11k_dp_srng_setup(ar->ab, 616 &dp->rxdma_mon_buf_ring.refill_buf_ring, 617 HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id, 618 DP_RXDMA_MONITOR_BUF_RING_SIZE); 619 if (ret) { 620 ath11k_warn(ar->ab, 621 "failed to setup HAL_RXDMA_MONITOR_BUF\n"); 622 return ret; 623 } 624 625 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring, 626 HAL_RXDMA_MONITOR_DST, 0, dp->mac_id, 627 DP_RXDMA_MONITOR_DST_RING_SIZE); 628 if (ret) { 629 ath11k_warn(ar->ab, 630 "failed to setup HAL_RXDMA_MONITOR_DST\n"); 631 return ret; 632 } 633 634 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring, 635 HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id, 636 DP_RXDMA_MONITOR_DESC_RING_SIZE); 637 if (ret) { 638 ath11k_warn(ar->ab, 639 "failed to setup HAL_RXDMA_MONITOR_DESC\n"); 640 return ret; 641 } 642 643 return 0; 644 } 645 646 void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab) 647 { 648 struct ath11k_dp *dp = &ab->dp; 649 struct dp_reo_cmd *cmd, *tmp; 650 struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache; 651 652 spin_lock_bh(&dp->reo_cmd_lock); 653 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 654 list_del(&cmd->list); 655 dma_unmap_single(ab->dev, cmd->data.paddr, 656 cmd->data.size, DMA_BIDIRECTIONAL); 657 kfree(cmd->data.vaddr); 658 kfree(cmd); 659 } 660 661 list_for_each_entry_safe(cmd_cache, tmp_cache, 662 &dp->reo_cmd_cache_flush_list, list) { 663 list_del(&cmd_cache->list); 664 dp->reo_cmd_cache_flush_count--; 665 dma_unmap_single(ab->dev, cmd_cache->data.paddr, 666 cmd_cache->data.size, DMA_BIDIRECTIONAL); 667 kfree(cmd_cache->data.vaddr); 668 kfree(cmd_cache); 669 } 670 spin_unlock_bh(&dp->reo_cmd_lock); 671 } 672 673 static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx, 674 enum hal_reo_cmd_status status) 675 { 676 struct dp_rx_tid *rx_tid = ctx; 677 678 if (status != HAL_REO_CMD_SUCCESS) 679 ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n", 680 rx_tid->tid, status); 681 682 dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size, 683 DMA_BIDIRECTIONAL); 684 kfree(rx_tid->vaddr); 685 } 686 687 static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab, 688 struct dp_rx_tid *rx_tid) 689 { 690 struct ath11k_hal_reo_cmd cmd = {0}; 691 unsigned long tot_desc_sz, desc_sz; 692 int ret; 693 694 tot_desc_sz = rx_tid->size; 695 desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID); 696 697 while (tot_desc_sz > desc_sz) { 698 tot_desc_sz -= desc_sz; 699 cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz); 700 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 701 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 702 HAL_REO_CMD_FLUSH_CACHE, &cmd, 703 NULL); 704 if (ret) 705 ath11k_warn(ab, 706 "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n", 707 rx_tid->tid, ret); 708 } 709 710 memset(&cmd, 0, sizeof(cmd)); 711 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 712 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 713 cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS; 714 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 715 HAL_REO_CMD_FLUSH_CACHE, 716 &cmd, ath11k_dp_reo_cmd_free); 717 if (ret) { 718 ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n", 719 rx_tid->tid, ret); 720 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 721 DMA_BIDIRECTIONAL); 722 kfree(rx_tid->vaddr); 723 } 724 } 725 726 static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx, 727 enum hal_reo_cmd_status status) 728 { 729 struct ath11k_base *ab = dp->ab; 730 struct dp_rx_tid *rx_tid = ctx; 731 struct dp_reo_cache_flush_elem *elem, *tmp; 732 733 if (status == HAL_REO_CMD_DRAIN) { 734 goto free_desc; 735 } else if (status != HAL_REO_CMD_SUCCESS) { 736 /* Shouldn't happen! Cleanup in case of other failure? */ 737 ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n", 738 rx_tid->tid, status); 739 return; 740 } 741 742 elem = kzalloc(sizeof(*elem), GFP_ATOMIC); 743 if (!elem) 744 goto free_desc; 745 746 elem->ts = jiffies; 747 memcpy(&elem->data, rx_tid, sizeof(*rx_tid)); 748 749 spin_lock_bh(&dp->reo_cmd_lock); 750 list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list); 751 dp->reo_cmd_cache_flush_count++; 752 753 /* Flush and invalidate aged REO desc from HW cache */ 754 list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list, 755 list) { 756 if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD || 757 time_after(jiffies, elem->ts + 758 msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) { 759 list_del(&elem->list); 760 dp->reo_cmd_cache_flush_count--; 761 spin_unlock_bh(&dp->reo_cmd_lock); 762 763 ath11k_dp_reo_cache_flush(ab, &elem->data); 764 kfree(elem); 765 spin_lock_bh(&dp->reo_cmd_lock); 766 } 767 } 768 spin_unlock_bh(&dp->reo_cmd_lock); 769 770 return; 771 free_desc: 772 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 773 DMA_BIDIRECTIONAL); 774 kfree(rx_tid->vaddr); 775 } 776 777 void ath11k_peer_rx_tid_delete(struct ath11k *ar, 778 struct ath11k_peer *peer, u8 tid) 779 { 780 struct ath11k_hal_reo_cmd cmd = {0}; 781 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 782 int ret; 783 784 if (!rx_tid->active) 785 return; 786 787 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 788 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 789 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 790 cmd.upd0 |= HAL_REO_CMD_UPD0_VLD; 791 ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid, 792 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 793 ath11k_dp_rx_tid_del_func); 794 if (ret) { 795 ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n", 796 tid, ret); 797 dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size, 798 DMA_BIDIRECTIONAL); 799 kfree(rx_tid->vaddr); 800 } 801 802 rx_tid->active = false; 803 } 804 805 static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab, 806 u32 *link_desc, 807 enum hal_wbm_rel_bm_act action) 808 { 809 struct ath11k_dp *dp = &ab->dp; 810 struct hal_srng *srng; 811 u32 *desc; 812 int ret = 0; 813 814 srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id]; 815 816 spin_lock_bh(&srng->lock); 817 818 ath11k_hal_srng_access_begin(ab, srng); 819 820 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 821 if (!desc) { 822 ret = -ENOBUFS; 823 goto exit; 824 } 825 826 ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc, 827 action); 828 829 exit: 830 ath11k_hal_srng_access_end(ab, srng); 831 832 spin_unlock_bh(&srng->lock); 833 834 return ret; 835 } 836 837 static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_desc) 838 { 839 struct ath11k_base *ab = rx_tid->ab; 840 841 lockdep_assert_held(&ab->base_lock); 842 843 if (rx_tid->dst_ring_desc) { 844 if (rel_link_desc) 845 ath11k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc, 846 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 847 kfree(rx_tid->dst_ring_desc); 848 rx_tid->dst_ring_desc = NULL; 849 } 850 851 rx_tid->cur_sn = 0; 852 rx_tid->last_frag_no = 0; 853 rx_tid->rx_frag_bitmap = 0; 854 __skb_queue_purge(&rx_tid->rx_frags); 855 } 856 857 void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer) 858 { 859 struct dp_rx_tid *rx_tid; 860 int i; 861 862 lockdep_assert_held(&ar->ab->base_lock); 863 864 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 865 rx_tid = &peer->rx_tid[i]; 866 867 ath11k_peer_rx_tid_delete(ar, peer, i); 868 ath11k_dp_rx_frags_cleanup(rx_tid, true); 869 870 spin_unlock_bh(&ar->ab->base_lock); 871 del_timer_sync(&rx_tid->frag_timer); 872 spin_lock_bh(&ar->ab->base_lock); 873 } 874 } 875 876 static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar, 877 struct ath11k_peer *peer, 878 struct dp_rx_tid *rx_tid, 879 u32 ba_win_sz, u16 ssn, 880 bool update_ssn) 881 { 882 struct ath11k_hal_reo_cmd cmd = {0}; 883 int ret; 884 885 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 886 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 887 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 888 cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE; 889 cmd.ba_window_size = ba_win_sz; 890 891 if (update_ssn) { 892 cmd.upd0 |= HAL_REO_CMD_UPD0_SSN; 893 cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn); 894 } 895 896 ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid, 897 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 898 NULL); 899 if (ret) { 900 ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n", 901 rx_tid->tid, ret); 902 return ret; 903 } 904 905 rx_tid->ba_win_sz = ba_win_sz; 906 907 return 0; 908 } 909 910 static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab, 911 const u8 *peer_mac, int vdev_id, u8 tid) 912 { 913 struct ath11k_peer *peer; 914 struct dp_rx_tid *rx_tid; 915 916 spin_lock_bh(&ab->base_lock); 917 918 peer = ath11k_peer_find(ab, vdev_id, peer_mac); 919 if (!peer) { 920 ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n"); 921 goto unlock_exit; 922 } 923 924 rx_tid = &peer->rx_tid[tid]; 925 if (!rx_tid->active) 926 goto unlock_exit; 927 928 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 929 DMA_BIDIRECTIONAL); 930 kfree(rx_tid->vaddr); 931 932 rx_tid->active = false; 933 934 unlock_exit: 935 spin_unlock_bh(&ab->base_lock); 936 } 937 938 int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, 939 u8 tid, u32 ba_win_sz, u16 ssn, 940 enum hal_pn_type pn_type) 941 { 942 struct ath11k_base *ab = ar->ab; 943 struct ath11k_peer *peer; 944 struct dp_rx_tid *rx_tid; 945 u32 hw_desc_sz; 946 u32 *addr_aligned; 947 void *vaddr; 948 dma_addr_t paddr; 949 int ret; 950 951 spin_lock_bh(&ab->base_lock); 952 953 peer = ath11k_peer_find(ab, vdev_id, peer_mac); 954 if (!peer) { 955 ath11k_warn(ab, "failed to find the peer to set up rx tid\n"); 956 spin_unlock_bh(&ab->base_lock); 957 return -ENOENT; 958 } 959 960 rx_tid = &peer->rx_tid[tid]; 961 /* Update the tid queue if it is already setup */ 962 if (rx_tid->active) { 963 paddr = rx_tid->paddr; 964 ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid, 965 ba_win_sz, ssn, true); 966 spin_unlock_bh(&ab->base_lock); 967 if (ret) { 968 ath11k_warn(ab, "failed to update reo for rx tid %d\n", tid); 969 return ret; 970 } 971 972 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, 973 peer_mac, paddr, 974 tid, 1, ba_win_sz); 975 if (ret) 976 ath11k_warn(ab, "failed to send wmi command to update rx reorder queue, tid :%d (%d)\n", 977 tid, ret); 978 return ret; 979 } 980 981 rx_tid->tid = tid; 982 983 rx_tid->ba_win_sz = ba_win_sz; 984 985 /* TODO: Optimize the memory allocation for qos tid based on 986 * the actual BA window size in REO tid update path. 987 */ 988 if (tid == HAL_DESC_REO_NON_QOS_TID) 989 hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid); 990 else 991 hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid); 992 993 vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC); 994 if (!vaddr) { 995 spin_unlock_bh(&ab->base_lock); 996 return -ENOMEM; 997 } 998 999 addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN); 1000 1001 ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz, 1002 ssn, pn_type); 1003 1004 paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz, 1005 DMA_BIDIRECTIONAL); 1006 1007 ret = dma_mapping_error(ab->dev, paddr); 1008 if (ret) { 1009 spin_unlock_bh(&ab->base_lock); 1010 goto err_mem_free; 1011 } 1012 1013 rx_tid->vaddr = vaddr; 1014 rx_tid->paddr = paddr; 1015 rx_tid->size = hw_desc_sz; 1016 rx_tid->active = true; 1017 1018 spin_unlock_bh(&ab->base_lock); 1019 1020 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, 1021 paddr, tid, 1, ba_win_sz); 1022 if (ret) { 1023 ath11k_warn(ar->ab, "failed to setup rx reorder queue, tid :%d (%d)\n", 1024 tid, ret); 1025 ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid); 1026 } 1027 1028 return ret; 1029 1030 err_mem_free: 1031 kfree(vaddr); 1032 1033 return ret; 1034 } 1035 1036 int ath11k_dp_rx_ampdu_start(struct ath11k *ar, 1037 struct ieee80211_ampdu_params *params) 1038 { 1039 struct ath11k_base *ab = ar->ab; 1040 struct ath11k_sta *arsta = (void *)params->sta->drv_priv; 1041 int vdev_id = arsta->arvif->vdev_id; 1042 int ret; 1043 1044 ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id, 1045 params->tid, params->buf_size, 1046 params->ssn, arsta->pn_type); 1047 if (ret) 1048 ath11k_warn(ab, "failed to setup rx tid %d\n", ret); 1049 1050 return ret; 1051 } 1052 1053 int ath11k_dp_rx_ampdu_stop(struct ath11k *ar, 1054 struct ieee80211_ampdu_params *params) 1055 { 1056 struct ath11k_base *ab = ar->ab; 1057 struct ath11k_peer *peer; 1058 struct ath11k_sta *arsta = (void *)params->sta->drv_priv; 1059 int vdev_id = arsta->arvif->vdev_id; 1060 dma_addr_t paddr; 1061 bool active; 1062 int ret; 1063 1064 spin_lock_bh(&ab->base_lock); 1065 1066 peer = ath11k_peer_find(ab, vdev_id, params->sta->addr); 1067 if (!peer) { 1068 ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n"); 1069 spin_unlock_bh(&ab->base_lock); 1070 return -ENOENT; 1071 } 1072 1073 paddr = peer->rx_tid[params->tid].paddr; 1074 active = peer->rx_tid[params->tid].active; 1075 1076 if (!active) { 1077 spin_unlock_bh(&ab->base_lock); 1078 return 0; 1079 } 1080 1081 ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false); 1082 spin_unlock_bh(&ab->base_lock); 1083 if (ret) { 1084 ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n", 1085 params->tid, ret); 1086 return ret; 1087 } 1088 1089 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, 1090 params->sta->addr, paddr, 1091 params->tid, 1, 1); 1092 if (ret) 1093 ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n", 1094 ret); 1095 1096 return ret; 1097 } 1098 1099 int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif, 1100 const u8 *peer_addr, 1101 enum set_key_cmd key_cmd, 1102 struct ieee80211_key_conf *key) 1103 { 1104 struct ath11k *ar = arvif->ar; 1105 struct ath11k_base *ab = ar->ab; 1106 struct ath11k_hal_reo_cmd cmd = {0}; 1107 struct ath11k_peer *peer; 1108 struct dp_rx_tid *rx_tid; 1109 u8 tid; 1110 int ret = 0; 1111 1112 /* NOTE: Enable PN/TSC replay check offload only for unicast frames. 1113 * We use mac80211 PN/TSC replay check functionality for bcast/mcast 1114 * for now. 1115 */ 1116 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) 1117 return 0; 1118 1119 cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS; 1120 cmd.upd0 |= HAL_REO_CMD_UPD0_PN | 1121 HAL_REO_CMD_UPD0_PN_SIZE | 1122 HAL_REO_CMD_UPD0_PN_VALID | 1123 HAL_REO_CMD_UPD0_PN_CHECK | 1124 HAL_REO_CMD_UPD0_SVLD; 1125 1126 switch (key->cipher) { 1127 case WLAN_CIPHER_SUITE_TKIP: 1128 case WLAN_CIPHER_SUITE_CCMP: 1129 case WLAN_CIPHER_SUITE_CCMP_256: 1130 case WLAN_CIPHER_SUITE_GCMP: 1131 case WLAN_CIPHER_SUITE_GCMP_256: 1132 if (key_cmd == SET_KEY) { 1133 cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK; 1134 cmd.pn_size = 48; 1135 } 1136 break; 1137 default: 1138 break; 1139 } 1140 1141 spin_lock_bh(&ab->base_lock); 1142 1143 peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr); 1144 if (!peer) { 1145 ath11k_warn(ab, "failed to find the peer to configure pn replay detection\n"); 1146 spin_unlock_bh(&ab->base_lock); 1147 return -ENOENT; 1148 } 1149 1150 for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) { 1151 rx_tid = &peer->rx_tid[tid]; 1152 if (!rx_tid->active) 1153 continue; 1154 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 1155 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 1156 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 1157 HAL_REO_CMD_UPDATE_RX_QUEUE, 1158 &cmd, NULL); 1159 if (ret) { 1160 ath11k_warn(ab, "failed to configure rx tid %d queue for pn replay detection %d\n", 1161 tid, ret); 1162 break; 1163 } 1164 } 1165 1166 spin_unlock_bh(&ar->ab->base_lock); 1167 1168 return ret; 1169 } 1170 1171 static inline int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats, 1172 u16 peer_id) 1173 { 1174 int i; 1175 1176 for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) { 1177 if (ppdu_stats->user_stats[i].is_valid_peer_id) { 1178 if (peer_id == ppdu_stats->user_stats[i].peer_id) 1179 return i; 1180 } else { 1181 return i; 1182 } 1183 } 1184 1185 return -EINVAL; 1186 } 1187 1188 static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab, 1189 u16 tag, u16 len, const void *ptr, 1190 void *data) 1191 { 1192 struct htt_ppdu_stats_info *ppdu_info; 1193 struct htt_ppdu_user_stats *user_stats; 1194 int cur_user; 1195 u16 peer_id; 1196 1197 ppdu_info = (struct htt_ppdu_stats_info *)data; 1198 1199 switch (tag) { 1200 case HTT_PPDU_STATS_TAG_COMMON: 1201 if (len < sizeof(struct htt_ppdu_stats_common)) { 1202 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1203 len, tag); 1204 return -EINVAL; 1205 } 1206 memcpy((void *)&ppdu_info->ppdu_stats.common, ptr, 1207 sizeof(struct htt_ppdu_stats_common)); 1208 break; 1209 case HTT_PPDU_STATS_TAG_USR_RATE: 1210 if (len < sizeof(struct htt_ppdu_stats_user_rate)) { 1211 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1212 len, tag); 1213 return -EINVAL; 1214 } 1215 1216 peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id; 1217 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1218 peer_id); 1219 if (cur_user < 0) 1220 return -EINVAL; 1221 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1222 user_stats->peer_id = peer_id; 1223 user_stats->is_valid_peer_id = true; 1224 memcpy((void *)&user_stats->rate, ptr, 1225 sizeof(struct htt_ppdu_stats_user_rate)); 1226 user_stats->tlv_flags |= BIT(tag); 1227 break; 1228 case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON: 1229 if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) { 1230 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1231 len, tag); 1232 return -EINVAL; 1233 } 1234 1235 peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id; 1236 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1237 peer_id); 1238 if (cur_user < 0) 1239 return -EINVAL; 1240 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1241 user_stats->peer_id = peer_id; 1242 user_stats->is_valid_peer_id = true; 1243 memcpy((void *)&user_stats->cmpltn_cmn, ptr, 1244 sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)); 1245 user_stats->tlv_flags |= BIT(tag); 1246 break; 1247 case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS: 1248 if (len < 1249 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) { 1250 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1251 len, tag); 1252 return -EINVAL; 1253 } 1254 1255 peer_id = 1256 ((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id; 1257 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1258 peer_id); 1259 if (cur_user < 0) 1260 return -EINVAL; 1261 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1262 user_stats->peer_id = peer_id; 1263 user_stats->is_valid_peer_id = true; 1264 memcpy((void *)&user_stats->ack_ba, ptr, 1265 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)); 1266 user_stats->tlv_flags |= BIT(tag); 1267 break; 1268 } 1269 return 0; 1270 } 1271 1272 int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len, 1273 int (*iter)(struct ath11k_base *ar, u16 tag, u16 len, 1274 const void *ptr, void *data), 1275 void *data) 1276 { 1277 const struct htt_tlv *tlv; 1278 const void *begin = ptr; 1279 u16 tlv_tag, tlv_len; 1280 int ret = -EINVAL; 1281 1282 while (len > 0) { 1283 if (len < sizeof(*tlv)) { 1284 ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", 1285 ptr - begin, len, sizeof(*tlv)); 1286 return -EINVAL; 1287 } 1288 tlv = (struct htt_tlv *)ptr; 1289 tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header); 1290 tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header); 1291 ptr += sizeof(*tlv); 1292 len -= sizeof(*tlv); 1293 1294 if (tlv_len > len) { 1295 ath11k_err(ab, "htt tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n", 1296 tlv_tag, ptr - begin, len, tlv_len); 1297 return -EINVAL; 1298 } 1299 ret = iter(ab, tlv_tag, tlv_len, ptr, data); 1300 if (ret == -ENOMEM) 1301 return ret; 1302 1303 ptr += tlv_len; 1304 len -= tlv_len; 1305 } 1306 return 0; 1307 } 1308 1309 static inline u32 ath11k_he_gi_to_nl80211_he_gi(u8 sgi) 1310 { 1311 u32 ret = 0; 1312 1313 switch (sgi) { 1314 case RX_MSDU_START_SGI_0_8_US: 1315 ret = NL80211_RATE_INFO_HE_GI_0_8; 1316 break; 1317 case RX_MSDU_START_SGI_1_6_US: 1318 ret = NL80211_RATE_INFO_HE_GI_1_6; 1319 break; 1320 case RX_MSDU_START_SGI_3_2_US: 1321 ret = NL80211_RATE_INFO_HE_GI_3_2; 1322 break; 1323 } 1324 1325 return ret; 1326 } 1327 1328 static void 1329 ath11k_update_per_peer_tx_stats(struct ath11k *ar, 1330 struct htt_ppdu_stats *ppdu_stats, u8 user) 1331 { 1332 struct ath11k_base *ab = ar->ab; 1333 struct ath11k_peer *peer; 1334 struct ieee80211_sta *sta; 1335 struct ath11k_sta *arsta; 1336 struct htt_ppdu_stats_user_rate *user_rate; 1337 struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats; 1338 struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user]; 1339 struct htt_ppdu_stats_common *common = &ppdu_stats->common; 1340 int ret; 1341 u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0; 1342 u32 succ_bytes = 0; 1343 u16 rate = 0, succ_pkts = 0; 1344 u32 tx_duration = 0; 1345 u8 tid = HTT_PPDU_STATS_NON_QOS_TID; 1346 bool is_ampdu = false; 1347 1348 if (!usr_stats) 1349 return; 1350 1351 if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE))) 1352 return; 1353 1354 if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) 1355 is_ampdu = 1356 HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags); 1357 1358 if (usr_stats->tlv_flags & 1359 BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) { 1360 succ_bytes = usr_stats->ack_ba.success_bytes; 1361 succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M, 1362 usr_stats->ack_ba.info); 1363 tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM, 1364 usr_stats->ack_ba.info); 1365 } 1366 1367 if (common->fes_duration_us) 1368 tx_duration = common->fes_duration_us; 1369 1370 user_rate = &usr_stats->rate; 1371 flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags); 1372 bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2; 1373 nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1; 1374 mcs = HTT_USR_RATE_MCS(user_rate->rate_flags); 1375 sgi = HTT_USR_RATE_GI(user_rate->rate_flags); 1376 dcm = HTT_USR_RATE_DCM(user_rate->rate_flags); 1377 1378 /* Note: If host configured fixed rates and in some other special 1379 * cases, the broadcast/management frames are sent in different rates. 1380 * Firmware rate's control to be skipped for this? 1381 */ 1382 1383 if (flags == WMI_RATE_PREAMBLE_HE && mcs > 11) { 1384 ath11k_warn(ab, "Invalid HE mcs %hhd peer stats", mcs); 1385 return; 1386 } 1387 1388 if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) { 1389 ath11k_warn(ab, "Invalid HE mcs %hhd peer stats", mcs); 1390 return; 1391 } 1392 1393 if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) { 1394 ath11k_warn(ab, "Invalid VHT mcs %hhd peer stats", mcs); 1395 return; 1396 } 1397 1398 if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) { 1399 ath11k_warn(ab, "Invalid HT mcs %hhd nss %hhd peer stats", 1400 mcs, nss); 1401 return; 1402 } 1403 1404 if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) { 1405 ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs, 1406 flags, 1407 &rate_idx, 1408 &rate); 1409 if (ret < 0) 1410 return; 1411 } 1412 1413 rcu_read_lock(); 1414 spin_lock_bh(&ab->base_lock); 1415 peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id); 1416 1417 if (!peer || !peer->sta) { 1418 spin_unlock_bh(&ab->base_lock); 1419 rcu_read_unlock(); 1420 return; 1421 } 1422 1423 sta = peer->sta; 1424 arsta = (struct ath11k_sta *)sta->drv_priv; 1425 1426 memset(&arsta->txrate, 0, sizeof(arsta->txrate)); 1427 1428 switch (flags) { 1429 case WMI_RATE_PREAMBLE_OFDM: 1430 arsta->txrate.legacy = rate; 1431 break; 1432 case WMI_RATE_PREAMBLE_CCK: 1433 arsta->txrate.legacy = rate; 1434 break; 1435 case WMI_RATE_PREAMBLE_HT: 1436 arsta->txrate.mcs = mcs + 8 * (nss - 1); 1437 arsta->txrate.flags = RATE_INFO_FLAGS_MCS; 1438 if (sgi) 1439 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1440 break; 1441 case WMI_RATE_PREAMBLE_VHT: 1442 arsta->txrate.mcs = mcs; 1443 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; 1444 if (sgi) 1445 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1446 break; 1447 case WMI_RATE_PREAMBLE_HE: 1448 arsta->txrate.mcs = mcs; 1449 arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS; 1450 arsta->txrate.he_dcm = dcm; 1451 arsta->txrate.he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi); 1452 arsta->txrate.he_ru_alloc = ath11k_he_ru_tones_to_nl80211_he_ru_alloc( 1453 (user_rate->ru_end - 1454 user_rate->ru_start) + 1); 1455 break; 1456 } 1457 1458 arsta->txrate.nss = nss; 1459 arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw); 1460 arsta->tx_duration += tx_duration; 1461 memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info)); 1462 1463 /* PPDU stats reported for mgmt packet doesn't have valid tx bytes. 1464 * So skip peer stats update for mgmt packets. 1465 */ 1466 if (tid < HTT_PPDU_STATS_NON_QOS_TID) { 1467 memset(peer_stats, 0, sizeof(*peer_stats)); 1468 peer_stats->succ_pkts = succ_pkts; 1469 peer_stats->succ_bytes = succ_bytes; 1470 peer_stats->is_ampdu = is_ampdu; 1471 peer_stats->duration = tx_duration; 1472 peer_stats->ba_fails = 1473 HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) + 1474 HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags); 1475 1476 if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) 1477 ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx); 1478 } 1479 1480 spin_unlock_bh(&ab->base_lock); 1481 rcu_read_unlock(); 1482 } 1483 1484 static void ath11k_htt_update_ppdu_stats(struct ath11k *ar, 1485 struct htt_ppdu_stats *ppdu_stats) 1486 { 1487 u8 user; 1488 1489 for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++) 1490 ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user); 1491 } 1492 1493 static 1494 struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar, 1495 u32 ppdu_id) 1496 { 1497 struct htt_ppdu_stats_info *ppdu_info; 1498 1499 spin_lock_bh(&ar->data_lock); 1500 if (!list_empty(&ar->ppdu_stats_info)) { 1501 list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) { 1502 if (ppdu_info->ppdu_id == ppdu_id) { 1503 spin_unlock_bh(&ar->data_lock); 1504 return ppdu_info; 1505 } 1506 } 1507 1508 if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) { 1509 ppdu_info = list_first_entry(&ar->ppdu_stats_info, 1510 typeof(*ppdu_info), list); 1511 list_del(&ppdu_info->list); 1512 ar->ppdu_stat_list_depth--; 1513 ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats); 1514 kfree(ppdu_info); 1515 } 1516 } 1517 spin_unlock_bh(&ar->data_lock); 1518 1519 ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC); 1520 if (!ppdu_info) 1521 return NULL; 1522 1523 spin_lock_bh(&ar->data_lock); 1524 list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info); 1525 ar->ppdu_stat_list_depth++; 1526 spin_unlock_bh(&ar->data_lock); 1527 1528 return ppdu_info; 1529 } 1530 1531 static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab, 1532 struct sk_buff *skb) 1533 { 1534 struct ath11k_htt_ppdu_stats_msg *msg; 1535 struct htt_ppdu_stats_info *ppdu_info; 1536 struct ath11k *ar; 1537 int ret; 1538 u8 pdev_id; 1539 u32 ppdu_id, len; 1540 1541 msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data; 1542 len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info); 1543 pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info); 1544 ppdu_id = msg->ppdu_id; 1545 1546 rcu_read_lock(); 1547 ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); 1548 if (!ar) { 1549 ret = -EINVAL; 1550 goto exit; 1551 } 1552 1553 if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) 1554 trace_ath11k_htt_ppdu_stats(ar, skb->data, len); 1555 1556 ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id); 1557 if (!ppdu_info) { 1558 ret = -EINVAL; 1559 goto exit; 1560 } 1561 1562 ppdu_info->ppdu_id = ppdu_id; 1563 ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len, 1564 ath11k_htt_tlv_ppdu_stats_parse, 1565 (void *)ppdu_info); 1566 if (ret) { 1567 ath11k_warn(ab, "Failed to parse tlv %d\n", ret); 1568 goto exit; 1569 } 1570 1571 exit: 1572 rcu_read_unlock(); 1573 1574 return ret; 1575 } 1576 1577 static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb) 1578 { 1579 struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data; 1580 struct ath_pktlog_hdr *hdr = (struct ath_pktlog_hdr *)data; 1581 struct ath11k *ar; 1582 u8 pdev_id; 1583 1584 pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr); 1585 ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); 1586 if (!ar) { 1587 ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id); 1588 return; 1589 } 1590 1591 trace_ath11k_htt_pktlog(ar, data->payload, hdr->size, 1592 ar->ab->pktlog_defs_checksum); 1593 } 1594 1595 static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab, 1596 struct sk_buff *skb) 1597 { 1598 u32 *data = (u32 *)skb->data; 1599 u8 pdev_id, ring_type, ring_id, pdev_idx; 1600 u16 hp, tp; 1601 u32 backpressure_time; 1602 struct ath11k_bp_stats *bp_stats; 1603 1604 pdev_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M, *data); 1605 ring_type = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M, *data); 1606 ring_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_ID_M, *data); 1607 ++data; 1608 1609 hp = FIELD_GET(HTT_BACKPRESSURE_EVENT_HP_M, *data); 1610 tp = FIELD_GET(HTT_BACKPRESSURE_EVENT_TP_M, *data); 1611 ++data; 1612 1613 backpressure_time = *data; 1614 1615 ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n", 1616 pdev_id, ring_type, ring_id, hp, tp, backpressure_time); 1617 1618 if (ring_type == HTT_BACKPRESSURE_UMAC_RING_TYPE) { 1619 if (ring_id >= HTT_SW_UMAC_RING_IDX_MAX) 1620 return; 1621 1622 bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[ring_id]; 1623 } else if (ring_type == HTT_BACKPRESSURE_LMAC_RING_TYPE) { 1624 pdev_idx = DP_HW2SW_MACID(pdev_id); 1625 1626 if (ring_id >= HTT_SW_LMAC_RING_IDX_MAX || pdev_idx >= MAX_RADIOS) 1627 return; 1628 1629 bp_stats = &ab->soc_stats.bp_stats.lmac_ring_bp_stats[ring_id][pdev_idx]; 1630 } else { 1631 ath11k_warn(ab, "unknown ring type received in htt bp event %d\n", 1632 ring_type); 1633 return; 1634 } 1635 1636 spin_lock_bh(&ab->base_lock); 1637 bp_stats->hp = hp; 1638 bp_stats->tp = tp; 1639 bp_stats->count++; 1640 bp_stats->jiffies = jiffies; 1641 spin_unlock_bh(&ab->base_lock); 1642 } 1643 1644 void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab, 1645 struct sk_buff *skb) 1646 { 1647 struct ath11k_dp *dp = &ab->dp; 1648 struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data; 1649 enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp); 1650 u16 peer_id; 1651 u8 vdev_id; 1652 u8 mac_addr[ETH_ALEN]; 1653 u16 peer_mac_h16; 1654 u16 ast_hash; 1655 1656 ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type); 1657 1658 switch (type) { 1659 case HTT_T2H_MSG_TYPE_VERSION_CONF: 1660 dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR, 1661 resp->version_msg.version); 1662 dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR, 1663 resp->version_msg.version); 1664 complete(&dp->htt_tgt_version_received); 1665 break; 1666 case HTT_T2H_MSG_TYPE_PEER_MAP: 1667 vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID, 1668 resp->peer_map_ev.info); 1669 peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID, 1670 resp->peer_map_ev.info); 1671 peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16, 1672 resp->peer_map_ev.info1); 1673 ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32, 1674 peer_mac_h16, mac_addr); 1675 ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0); 1676 break; 1677 case HTT_T2H_MSG_TYPE_PEER_MAP2: 1678 vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID, 1679 resp->peer_map_ev.info); 1680 peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID, 1681 resp->peer_map_ev.info); 1682 peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16, 1683 resp->peer_map_ev.info1); 1684 ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32, 1685 peer_mac_h16, mac_addr); 1686 ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL, 1687 resp->peer_map_ev.info2); 1688 ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash); 1689 break; 1690 case HTT_T2H_MSG_TYPE_PEER_UNMAP: 1691 case HTT_T2H_MSG_TYPE_PEER_UNMAP2: 1692 peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID, 1693 resp->peer_unmap_ev.info); 1694 ath11k_peer_unmap_event(ab, peer_id); 1695 break; 1696 case HTT_T2H_MSG_TYPE_PPDU_STATS_IND: 1697 ath11k_htt_pull_ppdu_stats(ab, skb); 1698 break; 1699 case HTT_T2H_MSG_TYPE_EXT_STATS_CONF: 1700 ath11k_debugfs_htt_ext_stats_handler(ab, skb); 1701 break; 1702 case HTT_T2H_MSG_TYPE_PKTLOG: 1703 ath11k_htt_pktlog(ab, skb); 1704 break; 1705 case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND: 1706 ath11k_htt_backpressure_event_handler(ab, skb); 1707 break; 1708 default: 1709 ath11k_warn(ab, "htt event %d not handled\n", type); 1710 break; 1711 } 1712 1713 dev_kfree_skb_any(skb); 1714 } 1715 1716 static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar, 1717 struct sk_buff_head *msdu_list, 1718 struct sk_buff *first, struct sk_buff *last, 1719 u8 l3pad_bytes, int msdu_len) 1720 { 1721 struct sk_buff *skb; 1722 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first); 1723 int buf_first_hdr_len, buf_first_len; 1724 struct hal_rx_desc *ldesc; 1725 int space_extra; 1726 int rem_len; 1727 int buf_len; 1728 1729 /* As the msdu is spread across multiple rx buffers, 1730 * find the offset to the start of msdu for computing 1731 * the length of the msdu in the first buffer. 1732 */ 1733 buf_first_hdr_len = HAL_RX_DESC_SIZE + l3pad_bytes; 1734 buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len; 1735 1736 if (WARN_ON_ONCE(msdu_len <= buf_first_len)) { 1737 skb_put(first, buf_first_hdr_len + msdu_len); 1738 skb_pull(first, buf_first_hdr_len); 1739 return 0; 1740 } 1741 1742 ldesc = (struct hal_rx_desc *)last->data; 1743 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ldesc); 1744 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ldesc); 1745 1746 /* MSDU spans over multiple buffers because the length of the MSDU 1747 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data 1748 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. 1749 */ 1750 skb_put(first, DP_RX_BUFFER_SIZE); 1751 skb_pull(first, buf_first_hdr_len); 1752 1753 /* When an MSDU spread over multiple buffers attention, MSDU_END and 1754 * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs. 1755 */ 1756 ath11k_dp_rx_desc_end_tlv_copy(rxcb->rx_desc, ldesc); 1757 1758 space_extra = msdu_len - (buf_first_len + skb_tailroom(first)); 1759 if (space_extra > 0 && 1760 (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) { 1761 /* Free up all buffers of the MSDU */ 1762 while ((skb = __skb_dequeue(msdu_list)) != NULL) { 1763 rxcb = ATH11K_SKB_RXCB(skb); 1764 if (!rxcb->is_continuation) { 1765 dev_kfree_skb_any(skb); 1766 break; 1767 } 1768 dev_kfree_skb_any(skb); 1769 } 1770 return -ENOMEM; 1771 } 1772 1773 rem_len = msdu_len - buf_first_len; 1774 while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) { 1775 rxcb = ATH11K_SKB_RXCB(skb); 1776 if (rxcb->is_continuation) 1777 buf_len = DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE; 1778 else 1779 buf_len = rem_len; 1780 1781 if (buf_len > (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE)) { 1782 WARN_ON_ONCE(1); 1783 dev_kfree_skb_any(skb); 1784 return -EINVAL; 1785 } 1786 1787 skb_put(skb, buf_len + HAL_RX_DESC_SIZE); 1788 skb_pull(skb, HAL_RX_DESC_SIZE); 1789 skb_copy_from_linear_data(skb, skb_put(first, buf_len), 1790 buf_len); 1791 dev_kfree_skb_any(skb); 1792 1793 rem_len -= buf_len; 1794 if (!rxcb->is_continuation) 1795 break; 1796 } 1797 1798 return 0; 1799 } 1800 1801 static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list, 1802 struct sk_buff *first) 1803 { 1804 struct sk_buff *skb; 1805 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first); 1806 1807 if (!rxcb->is_continuation) 1808 return first; 1809 1810 skb_queue_walk(msdu_list, skb) { 1811 rxcb = ATH11K_SKB_RXCB(skb); 1812 if (!rxcb->is_continuation) 1813 return skb; 1814 } 1815 1816 return NULL; 1817 } 1818 1819 static void ath11k_dp_rx_h_csum_offload(struct sk_buff *msdu) 1820 { 1821 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1822 bool ip_csum_fail, l4_csum_fail; 1823 1824 ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rxcb->rx_desc); 1825 l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rxcb->rx_desc); 1826 1827 msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ? 1828 CHECKSUM_NONE : CHECKSUM_UNNECESSARY; 1829 } 1830 1831 static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, 1832 enum hal_encrypt_type enctype) 1833 { 1834 switch (enctype) { 1835 case HAL_ENCRYPT_TYPE_OPEN: 1836 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1837 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1838 return 0; 1839 case HAL_ENCRYPT_TYPE_CCMP_128: 1840 return IEEE80211_CCMP_MIC_LEN; 1841 case HAL_ENCRYPT_TYPE_CCMP_256: 1842 return IEEE80211_CCMP_256_MIC_LEN; 1843 case HAL_ENCRYPT_TYPE_GCMP_128: 1844 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1845 return IEEE80211_GCMP_MIC_LEN; 1846 case HAL_ENCRYPT_TYPE_WEP_40: 1847 case HAL_ENCRYPT_TYPE_WEP_104: 1848 case HAL_ENCRYPT_TYPE_WEP_128: 1849 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1850 case HAL_ENCRYPT_TYPE_WAPI: 1851 break; 1852 } 1853 1854 ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype); 1855 return 0; 1856 } 1857 1858 static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar, 1859 enum hal_encrypt_type enctype) 1860 { 1861 switch (enctype) { 1862 case HAL_ENCRYPT_TYPE_OPEN: 1863 return 0; 1864 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1865 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1866 return IEEE80211_TKIP_IV_LEN; 1867 case HAL_ENCRYPT_TYPE_CCMP_128: 1868 return IEEE80211_CCMP_HDR_LEN; 1869 case HAL_ENCRYPT_TYPE_CCMP_256: 1870 return IEEE80211_CCMP_256_HDR_LEN; 1871 case HAL_ENCRYPT_TYPE_GCMP_128: 1872 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1873 return IEEE80211_GCMP_HDR_LEN; 1874 case HAL_ENCRYPT_TYPE_WEP_40: 1875 case HAL_ENCRYPT_TYPE_WEP_104: 1876 case HAL_ENCRYPT_TYPE_WEP_128: 1877 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1878 case HAL_ENCRYPT_TYPE_WAPI: 1879 break; 1880 } 1881 1882 ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 1883 return 0; 1884 } 1885 1886 static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar, 1887 enum hal_encrypt_type enctype) 1888 { 1889 switch (enctype) { 1890 case HAL_ENCRYPT_TYPE_OPEN: 1891 case HAL_ENCRYPT_TYPE_CCMP_128: 1892 case HAL_ENCRYPT_TYPE_CCMP_256: 1893 case HAL_ENCRYPT_TYPE_GCMP_128: 1894 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1895 return 0; 1896 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1897 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1898 return IEEE80211_TKIP_ICV_LEN; 1899 case HAL_ENCRYPT_TYPE_WEP_40: 1900 case HAL_ENCRYPT_TYPE_WEP_104: 1901 case HAL_ENCRYPT_TYPE_WEP_128: 1902 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1903 case HAL_ENCRYPT_TYPE_WAPI: 1904 break; 1905 } 1906 1907 ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 1908 return 0; 1909 } 1910 1911 static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar, 1912 struct sk_buff *msdu, 1913 u8 *first_hdr, 1914 enum hal_encrypt_type enctype, 1915 struct ieee80211_rx_status *status) 1916 { 1917 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1918 u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN]; 1919 struct ieee80211_hdr *hdr; 1920 size_t hdr_len; 1921 u8 da[ETH_ALEN]; 1922 u8 sa[ETH_ALEN]; 1923 u16 qos_ctl = 0; 1924 u8 *qos; 1925 1926 /* copy SA & DA and pull decapped header */ 1927 hdr = (struct ieee80211_hdr *)msdu->data; 1928 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1929 ether_addr_copy(da, ieee80211_get_DA(hdr)); 1930 ether_addr_copy(sa, ieee80211_get_SA(hdr)); 1931 skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control)); 1932 1933 if (rxcb->is_first_msdu) { 1934 /* original 802.11 header is valid for the first msdu 1935 * hence we can reuse the same header 1936 */ 1937 hdr = (struct ieee80211_hdr *)first_hdr; 1938 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1939 1940 /* Each A-MSDU subframe will be reported as a separate MSDU, 1941 * so strip the A-MSDU bit from QoS Ctl. 1942 */ 1943 if (ieee80211_is_data_qos(hdr->frame_control)) { 1944 qos = ieee80211_get_qos_ctl(hdr); 1945 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 1946 } 1947 } else { 1948 /* Rebuild qos header if this is a middle/last msdu */ 1949 hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 1950 1951 /* Reset the order bit as the HT_Control header is stripped */ 1952 hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER)); 1953 1954 qos_ctl = rxcb->tid; 1955 1956 if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(rxcb->rx_desc)) 1957 qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT; 1958 1959 /* TODO Add other QoS ctl fields when required */ 1960 1961 /* copy decap header before overwriting for reuse below */ 1962 memcpy(decap_hdr, (uint8_t *)hdr, hdr_len); 1963 } 1964 1965 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 1966 memcpy(skb_push(msdu, 1967 ath11k_dp_rx_crypto_param_len(ar, enctype)), 1968 (void *)hdr + hdr_len, 1969 ath11k_dp_rx_crypto_param_len(ar, enctype)); 1970 } 1971 1972 if (!rxcb->is_first_msdu) { 1973 memcpy(skb_push(msdu, 1974 IEEE80211_QOS_CTL_LEN), &qos_ctl, 1975 IEEE80211_QOS_CTL_LEN); 1976 memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len); 1977 return; 1978 } 1979 1980 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1981 1982 /* original 802.11 header has a different DA and in 1983 * case of 4addr it may also have different SA 1984 */ 1985 hdr = (struct ieee80211_hdr *)msdu->data; 1986 ether_addr_copy(ieee80211_get_DA(hdr), da); 1987 ether_addr_copy(ieee80211_get_SA(hdr), sa); 1988 } 1989 1990 static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu, 1991 enum hal_encrypt_type enctype, 1992 struct ieee80211_rx_status *status, 1993 bool decrypted) 1994 { 1995 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1996 struct ieee80211_hdr *hdr; 1997 size_t hdr_len; 1998 size_t crypto_len; 1999 2000 if (!rxcb->is_first_msdu || 2001 !(rxcb->is_first_msdu && rxcb->is_last_msdu)) { 2002 WARN_ON_ONCE(1); 2003 return; 2004 } 2005 2006 skb_trim(msdu, msdu->len - FCS_LEN); 2007 2008 if (!decrypted) 2009 return; 2010 2011 hdr = (void *)msdu->data; 2012 2013 /* Tail */ 2014 if (status->flag & RX_FLAG_IV_STRIPPED) { 2015 skb_trim(msdu, msdu->len - 2016 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 2017 2018 skb_trim(msdu, msdu->len - 2019 ath11k_dp_rx_crypto_icv_len(ar, enctype)); 2020 } else { 2021 /* MIC */ 2022 if (status->flag & RX_FLAG_MIC_STRIPPED) 2023 skb_trim(msdu, msdu->len - 2024 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 2025 2026 /* ICV */ 2027 if (status->flag & RX_FLAG_ICV_STRIPPED) 2028 skb_trim(msdu, msdu->len - 2029 ath11k_dp_rx_crypto_icv_len(ar, enctype)); 2030 } 2031 2032 /* MMIC */ 2033 if ((status->flag & RX_FLAG_MMIC_STRIPPED) && 2034 !ieee80211_has_morefrags(hdr->frame_control) && 2035 enctype == HAL_ENCRYPT_TYPE_TKIP_MIC) 2036 skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN); 2037 2038 /* Head */ 2039 if (status->flag & RX_FLAG_IV_STRIPPED) { 2040 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2041 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 2042 2043 memmove((void *)msdu->data + crypto_len, 2044 (void *)msdu->data, hdr_len); 2045 skb_pull(msdu, crypto_len); 2046 } 2047 } 2048 2049 static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar, 2050 struct sk_buff *msdu, 2051 enum hal_encrypt_type enctype) 2052 { 2053 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 2054 struct ieee80211_hdr *hdr; 2055 size_t hdr_len, crypto_len; 2056 void *rfc1042; 2057 bool is_amsdu; 2058 2059 is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu); 2060 hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(rxcb->rx_desc); 2061 rfc1042 = hdr; 2062 2063 if (rxcb->is_first_msdu) { 2064 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2065 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 2066 2067 rfc1042 += hdr_len + crypto_len; 2068 } 2069 2070 if (is_amsdu) 2071 rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr); 2072 2073 return rfc1042; 2074 } 2075 2076 static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar, 2077 struct sk_buff *msdu, 2078 u8 *first_hdr, 2079 enum hal_encrypt_type enctype, 2080 struct ieee80211_rx_status *status) 2081 { 2082 struct ieee80211_hdr *hdr; 2083 struct ethhdr *eth; 2084 size_t hdr_len; 2085 u8 da[ETH_ALEN]; 2086 u8 sa[ETH_ALEN]; 2087 void *rfc1042; 2088 2089 rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype); 2090 if (WARN_ON_ONCE(!rfc1042)) 2091 return; 2092 2093 /* pull decapped header and copy SA & DA */ 2094 eth = (struct ethhdr *)msdu->data; 2095 ether_addr_copy(da, eth->h_dest); 2096 ether_addr_copy(sa, eth->h_source); 2097 skb_pull(msdu, sizeof(struct ethhdr)); 2098 2099 /* push rfc1042/llc/snap */ 2100 memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042, 2101 sizeof(struct ath11k_dp_rfc1042_hdr)); 2102 2103 /* push original 802.11 header */ 2104 hdr = (struct ieee80211_hdr *)first_hdr; 2105 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2106 2107 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 2108 memcpy(skb_push(msdu, 2109 ath11k_dp_rx_crypto_param_len(ar, enctype)), 2110 (void *)hdr + hdr_len, 2111 ath11k_dp_rx_crypto_param_len(ar, enctype)); 2112 } 2113 2114 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 2115 2116 /* original 802.11 header has a different DA and in 2117 * case of 4addr it may also have different SA 2118 */ 2119 hdr = (struct ieee80211_hdr *)msdu->data; 2120 ether_addr_copy(ieee80211_get_DA(hdr), da); 2121 ether_addr_copy(ieee80211_get_SA(hdr), sa); 2122 } 2123 2124 static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu, 2125 struct hal_rx_desc *rx_desc, 2126 enum hal_encrypt_type enctype, 2127 struct ieee80211_rx_status *status, 2128 bool decrypted) 2129 { 2130 u8 *first_hdr; 2131 u8 decap; 2132 2133 first_hdr = ath11k_dp_rx_h_80211_hdr(rx_desc); 2134 decap = ath11k_dp_rx_h_msdu_start_decap_type(rx_desc); 2135 2136 switch (decap) { 2137 case DP_RX_DECAP_TYPE_NATIVE_WIFI: 2138 ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr, 2139 enctype, status); 2140 break; 2141 case DP_RX_DECAP_TYPE_RAW: 2142 ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status, 2143 decrypted); 2144 break; 2145 case DP_RX_DECAP_TYPE_ETHERNET2_DIX: 2146 /* TODO undecap support for middle/last msdu's of amsdu */ 2147 ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr, 2148 enctype, status); 2149 break; 2150 case DP_RX_DECAP_TYPE_8023: 2151 /* TODO: Handle undecap for these formats */ 2152 break; 2153 } 2154 } 2155 2156 static void ath11k_dp_rx_h_mpdu(struct ath11k *ar, 2157 struct sk_buff *msdu, 2158 struct hal_rx_desc *rx_desc, 2159 struct ieee80211_rx_status *rx_status) 2160 { 2161 bool fill_crypto_hdr, mcast; 2162 enum hal_encrypt_type enctype; 2163 bool is_decrypted = false; 2164 struct ieee80211_hdr *hdr; 2165 struct ath11k_peer *peer; 2166 u32 err_bitmap; 2167 2168 hdr = (struct ieee80211_hdr *)msdu->data; 2169 2170 /* PN for multicast packets will be checked in mac80211 */ 2171 2172 mcast = is_multicast_ether_addr(hdr->addr1); 2173 fill_crypto_hdr = mcast; 2174 2175 spin_lock_bh(&ar->ab->base_lock); 2176 peer = ath11k_peer_find_by_addr(ar->ab, hdr->addr2); 2177 if (peer) { 2178 if (mcast) 2179 enctype = peer->sec_type_grp; 2180 else 2181 enctype = peer->sec_type; 2182 } else { 2183 enctype = HAL_ENCRYPT_TYPE_OPEN; 2184 } 2185 spin_unlock_bh(&ar->ab->base_lock); 2186 2187 err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_desc); 2188 if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap) 2189 is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc); 2190 2191 /* Clear per-MPDU flags while leaving per-PPDU flags intact */ 2192 rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | 2193 RX_FLAG_MMIC_ERROR | 2194 RX_FLAG_DECRYPTED | 2195 RX_FLAG_IV_STRIPPED | 2196 RX_FLAG_MMIC_STRIPPED); 2197 2198 if (err_bitmap & DP_RX_MPDU_ERR_FCS) 2199 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 2200 if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC) 2201 rx_status->flag |= RX_FLAG_MMIC_ERROR; 2202 2203 if (is_decrypted) { 2204 rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED; 2205 2206 if (fill_crypto_hdr) 2207 rx_status->flag |= RX_FLAG_MIC_STRIPPED | 2208 RX_FLAG_ICV_STRIPPED; 2209 else 2210 rx_status->flag |= RX_FLAG_IV_STRIPPED | 2211 RX_FLAG_PN_VALIDATED; 2212 } 2213 2214 ath11k_dp_rx_h_csum_offload(msdu); 2215 ath11k_dp_rx_h_undecap(ar, msdu, rx_desc, 2216 enctype, rx_status, is_decrypted); 2217 2218 if (!is_decrypted || fill_crypto_hdr) 2219 return; 2220 2221 hdr = (void *)msdu->data; 2222 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 2223 } 2224 2225 static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc, 2226 struct ieee80211_rx_status *rx_status) 2227 { 2228 struct ieee80211_supported_band *sband; 2229 enum rx_msdu_start_pkt_type pkt_type; 2230 u8 bw; 2231 u8 rate_mcs, nss; 2232 u8 sgi; 2233 bool is_cck; 2234 2235 pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(rx_desc); 2236 bw = ath11k_dp_rx_h_msdu_start_rx_bw(rx_desc); 2237 rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(rx_desc); 2238 nss = ath11k_dp_rx_h_msdu_start_nss(rx_desc); 2239 sgi = ath11k_dp_rx_h_msdu_start_sgi(rx_desc); 2240 2241 switch (pkt_type) { 2242 case RX_MSDU_START_PKT_TYPE_11A: 2243 case RX_MSDU_START_PKT_TYPE_11B: 2244 is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B); 2245 sband = &ar->mac.sbands[rx_status->band]; 2246 rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs, 2247 is_cck); 2248 break; 2249 case RX_MSDU_START_PKT_TYPE_11N: 2250 rx_status->encoding = RX_ENC_HT; 2251 if (rate_mcs > ATH11K_HT_MCS_MAX) { 2252 ath11k_warn(ar->ab, 2253 "Received with invalid mcs in HT mode %d\n", 2254 rate_mcs); 2255 break; 2256 } 2257 rx_status->rate_idx = rate_mcs + (8 * (nss - 1)); 2258 if (sgi) 2259 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 2260 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 2261 break; 2262 case RX_MSDU_START_PKT_TYPE_11AC: 2263 rx_status->encoding = RX_ENC_VHT; 2264 rx_status->rate_idx = rate_mcs; 2265 if (rate_mcs > ATH11K_VHT_MCS_MAX) { 2266 ath11k_warn(ar->ab, 2267 "Received with invalid mcs in VHT mode %d\n", 2268 rate_mcs); 2269 break; 2270 } 2271 rx_status->nss = nss; 2272 if (sgi) 2273 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 2274 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 2275 break; 2276 case RX_MSDU_START_PKT_TYPE_11AX: 2277 rx_status->rate_idx = rate_mcs; 2278 if (rate_mcs > ATH11K_HE_MCS_MAX) { 2279 ath11k_warn(ar->ab, 2280 "Received with invalid mcs in HE mode %d\n", 2281 rate_mcs); 2282 break; 2283 } 2284 rx_status->encoding = RX_ENC_HE; 2285 rx_status->nss = nss; 2286 rx_status->he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi); 2287 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 2288 break; 2289 } 2290 } 2291 2292 static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc, 2293 struct ieee80211_rx_status *rx_status) 2294 { 2295 u8 channel_num; 2296 u32 center_freq; 2297 2298 rx_status->freq = 0; 2299 rx_status->rate_idx = 0; 2300 rx_status->nss = 0; 2301 rx_status->encoding = RX_ENC_LEGACY; 2302 rx_status->bw = RATE_INFO_BW_20; 2303 2304 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 2305 2306 channel_num = ath11k_dp_rx_h_msdu_start_freq(rx_desc); 2307 center_freq = ath11k_dp_rx_h_msdu_start_freq(rx_desc) >> 16; 2308 2309 if (center_freq >= 5935 && center_freq <= 7105) { 2310 rx_status->band = NL80211_BAND_6GHZ; 2311 } else if (channel_num >= 1 && channel_num <= 14) { 2312 rx_status->band = NL80211_BAND_2GHZ; 2313 } else if (channel_num >= 36 && channel_num <= 173) { 2314 rx_status->band = NL80211_BAND_5GHZ; 2315 } else { 2316 spin_lock_bh(&ar->data_lock); 2317 rx_status->band = ar->rx_channel->band; 2318 channel_num = 2319 ieee80211_frequency_to_channel(ar->rx_channel->center_freq); 2320 spin_unlock_bh(&ar->data_lock); 2321 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ", 2322 rx_desc, sizeof(struct hal_rx_desc)); 2323 } 2324 2325 rx_status->freq = ieee80211_channel_to_frequency(channel_num, 2326 rx_status->band); 2327 2328 ath11k_dp_rx_h_rate(ar, rx_desc, rx_status); 2329 } 2330 2331 static char *ath11k_print_get_tid(struct ieee80211_hdr *hdr, char *out, 2332 size_t size) 2333 { 2334 u8 *qc; 2335 int tid; 2336 2337 if (!ieee80211_is_data_qos(hdr->frame_control)) 2338 return ""; 2339 2340 qc = ieee80211_get_qos_ctl(hdr); 2341 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 2342 snprintf(out, size, "tid %d", tid); 2343 2344 return out; 2345 } 2346 2347 static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi, 2348 struct sk_buff *msdu) 2349 { 2350 static const struct ieee80211_radiotap_he known = { 2351 .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | 2352 IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN), 2353 .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN), 2354 }; 2355 struct ieee80211_rx_status *status; 2356 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; 2357 struct ieee80211_radiotap_he *he = NULL; 2358 char tid[32]; 2359 2360 status = IEEE80211_SKB_RXCB(msdu); 2361 if (status->encoding == RX_ENC_HE) { 2362 he = skb_push(msdu, sizeof(known)); 2363 memcpy(he, &known, sizeof(known)); 2364 status->flag |= RX_FLAG_RADIOTAP_HE; 2365 } 2366 2367 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 2368 "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", 2369 msdu, 2370 msdu->len, 2371 ieee80211_get_SA(hdr), 2372 ath11k_print_get_tid(hdr, tid, sizeof(tid)), 2373 is_multicast_ether_addr(ieee80211_get_DA(hdr)) ? 2374 "mcast" : "ucast", 2375 (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4, 2376 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", 2377 (status->encoding == RX_ENC_HT) ? "ht" : "", 2378 (status->encoding == RX_ENC_VHT) ? "vht" : "", 2379 (status->encoding == RX_ENC_HE) ? "he" : "", 2380 (status->bw == RATE_INFO_BW_40) ? "40" : "", 2381 (status->bw == RATE_INFO_BW_80) ? "80" : "", 2382 (status->bw == RATE_INFO_BW_160) ? "160" : "", 2383 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", 2384 status->rate_idx, 2385 status->nss, 2386 status->freq, 2387 status->band, status->flag, 2388 !!(status->flag & RX_FLAG_FAILED_FCS_CRC), 2389 !!(status->flag & RX_FLAG_MMIC_ERROR), 2390 !!(status->flag & RX_FLAG_AMSDU_MORE)); 2391 2392 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DP_RX, NULL, "dp rx msdu: ", 2393 msdu->data, msdu->len); 2394 2395 /* TODO: trace rx packet */ 2396 2397 ieee80211_rx_napi(ar->hw, NULL, msdu, napi); 2398 } 2399 2400 static int ath11k_dp_rx_process_msdu(struct ath11k *ar, 2401 struct sk_buff *msdu, 2402 struct sk_buff_head *msdu_list) 2403 { 2404 struct hal_rx_desc *rx_desc, *lrx_desc; 2405 struct ieee80211_rx_status rx_status = {0}; 2406 struct ieee80211_rx_status *status; 2407 struct ath11k_skb_rxcb *rxcb; 2408 struct ieee80211_hdr *hdr; 2409 struct sk_buff *last_buf; 2410 u8 l3_pad_bytes; 2411 u8 *hdr_status; 2412 u16 msdu_len; 2413 int ret; 2414 2415 last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu); 2416 if (!last_buf) { 2417 ath11k_warn(ar->ab, 2418 "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n"); 2419 ret = -EIO; 2420 goto free_out; 2421 } 2422 2423 rx_desc = (struct hal_rx_desc *)msdu->data; 2424 lrx_desc = (struct hal_rx_desc *)last_buf->data; 2425 if (!ath11k_dp_rx_h_attn_msdu_done(lrx_desc)) { 2426 ath11k_warn(ar->ab, "msdu_done bit in attention is not set\n"); 2427 ret = -EIO; 2428 goto free_out; 2429 } 2430 2431 rxcb = ATH11K_SKB_RXCB(msdu); 2432 rxcb->rx_desc = rx_desc; 2433 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc); 2434 l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(lrx_desc); 2435 2436 if (rxcb->is_frag) { 2437 skb_pull(msdu, HAL_RX_DESC_SIZE); 2438 } else if (!rxcb->is_continuation) { 2439 if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) { 2440 hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc); 2441 ret = -EINVAL; 2442 ath11k_warn(ar->ab, "invalid msdu len %u\n", msdu_len); 2443 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status, 2444 sizeof(struct ieee80211_hdr)); 2445 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc, 2446 sizeof(struct hal_rx_desc)); 2447 goto free_out; 2448 } 2449 skb_put(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes + msdu_len); 2450 skb_pull(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes); 2451 } else { 2452 ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list, 2453 msdu, last_buf, 2454 l3_pad_bytes, msdu_len); 2455 if (ret) { 2456 ath11k_warn(ar->ab, 2457 "failed to coalesce msdu rx buffer%d\n", ret); 2458 goto free_out; 2459 } 2460 } 2461 2462 hdr = (struct ieee80211_hdr *)msdu->data; 2463 2464 /* Process only data frames */ 2465 if (!ieee80211_is_data(hdr->frame_control)) 2466 return -EINVAL; 2467 2468 ath11k_dp_rx_h_ppdu(ar, rx_desc, &rx_status); 2469 ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, &rx_status); 2470 2471 rx_status.flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED; 2472 2473 status = IEEE80211_SKB_RXCB(msdu); 2474 *status = rx_status; 2475 return 0; 2476 2477 free_out: 2478 return ret; 2479 } 2480 2481 static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab, 2482 struct napi_struct *napi, 2483 struct sk_buff_head *msdu_list, 2484 int *quota, int ring_id) 2485 { 2486 struct ath11k_skb_rxcb *rxcb; 2487 struct sk_buff *msdu; 2488 struct ath11k *ar; 2489 u8 mac_id; 2490 int ret; 2491 2492 if (skb_queue_empty(msdu_list)) 2493 return; 2494 2495 rcu_read_lock(); 2496 2497 while (*quota && (msdu = __skb_dequeue(msdu_list))) { 2498 rxcb = ATH11K_SKB_RXCB(msdu); 2499 mac_id = rxcb->mac_id; 2500 ar = ab->pdevs[mac_id].ar; 2501 if (!rcu_dereference(ab->pdevs_active[mac_id])) { 2502 dev_kfree_skb_any(msdu); 2503 continue; 2504 } 2505 2506 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 2507 dev_kfree_skb_any(msdu); 2508 continue; 2509 } 2510 2511 ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list); 2512 if (ret) { 2513 ath11k_dbg(ab, ATH11K_DBG_DATA, 2514 "Unable to process msdu %d", ret); 2515 dev_kfree_skb_any(msdu); 2516 continue; 2517 } 2518 2519 ath11k_dp_rx_deliver_msdu(ar, napi, msdu); 2520 (*quota)--; 2521 } 2522 2523 rcu_read_unlock(); 2524 } 2525 2526 int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id, 2527 struct napi_struct *napi, int budget) 2528 { 2529 struct ath11k_dp *dp = &ab->dp; 2530 struct dp_rxdma_ring *rx_ring; 2531 int num_buffs_reaped[MAX_RADIOS] = {0}; 2532 struct sk_buff_head msdu_list; 2533 struct ath11k_skb_rxcb *rxcb; 2534 int total_msdu_reaped = 0; 2535 struct hal_srng *srng; 2536 struct sk_buff *msdu; 2537 int quota = budget; 2538 bool done = false; 2539 int buf_id, mac_id; 2540 struct ath11k *ar; 2541 u32 *rx_desc; 2542 int i; 2543 2544 __skb_queue_head_init(&msdu_list); 2545 2546 srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id]; 2547 2548 spin_lock_bh(&srng->lock); 2549 2550 ath11k_hal_srng_access_begin(ab, srng); 2551 2552 try_again: 2553 while ((rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 2554 struct hal_reo_dest_ring desc = *(struct hal_reo_dest_ring *)rx_desc; 2555 enum hal_reo_dest_ring_push_reason push_reason; 2556 u32 cookie; 2557 2558 cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, 2559 desc.buf_addr_info.info1); 2560 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 2561 cookie); 2562 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie); 2563 2564 ar = ab->pdevs[mac_id].ar; 2565 rx_ring = &ar->dp.rx_refill_buf_ring; 2566 spin_lock_bh(&rx_ring->idr_lock); 2567 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 2568 if (!msdu) { 2569 ath11k_warn(ab, "frame rx with invalid buf_id %d\n", 2570 buf_id); 2571 spin_unlock_bh(&rx_ring->idr_lock); 2572 continue; 2573 } 2574 2575 idr_remove(&rx_ring->bufs_idr, buf_id); 2576 spin_unlock_bh(&rx_ring->idr_lock); 2577 2578 rxcb = ATH11K_SKB_RXCB(msdu); 2579 dma_unmap_single(ab->dev, rxcb->paddr, 2580 msdu->len + skb_tailroom(msdu), 2581 DMA_FROM_DEVICE); 2582 2583 num_buffs_reaped[mac_id]++; 2584 total_msdu_reaped++; 2585 2586 push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON, 2587 desc.info0); 2588 if (push_reason != 2589 HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) { 2590 dev_kfree_skb_any(msdu); 2591 ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++; 2592 continue; 2593 } 2594 2595 rxcb->is_first_msdu = !!(desc.rx_msdu_info.info0 & 2596 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU); 2597 rxcb->is_last_msdu = !!(desc.rx_msdu_info.info0 & 2598 RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU); 2599 rxcb->is_continuation = !!(desc.rx_msdu_info.info0 & 2600 RX_MSDU_DESC_INFO0_MSDU_CONTINUATION); 2601 rxcb->mac_id = mac_id; 2602 rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM, 2603 desc.info0); 2604 2605 __skb_queue_tail(&msdu_list, msdu); 2606 2607 if (total_msdu_reaped >= quota && !rxcb->is_continuation) { 2608 done = true; 2609 break; 2610 } 2611 } 2612 2613 /* Hw might have updated the head pointer after we cached it. 2614 * In this case, even though there are entries in the ring we'll 2615 * get rx_desc NULL. Give the read another try with updated cached 2616 * head pointer so that we can reap complete MPDU in the current 2617 * rx processing. 2618 */ 2619 if (!done && ath11k_hal_srng_dst_num_free(ab, srng, true)) { 2620 ath11k_hal_srng_access_end(ab, srng); 2621 goto try_again; 2622 } 2623 2624 ath11k_hal_srng_access_end(ab, srng); 2625 2626 spin_unlock_bh(&srng->lock); 2627 2628 if (!total_msdu_reaped) 2629 goto exit; 2630 2631 for (i = 0; i < ab->num_radios; i++) { 2632 if (!num_buffs_reaped[i]) 2633 continue; 2634 2635 ar = ab->pdevs[i].ar; 2636 rx_ring = &ar->dp.rx_refill_buf_ring; 2637 2638 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i], 2639 HAL_RX_BUF_RBM_SW3_BM); 2640 } 2641 2642 ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list, 2643 "a, ring_id); 2644 2645 exit: 2646 return budget - quota; 2647 } 2648 2649 static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta, 2650 struct hal_rx_mon_ppdu_info *ppdu_info) 2651 { 2652 struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats; 2653 u32 num_msdu; 2654 2655 if (!rx_stats) 2656 return; 2657 2658 num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count + 2659 ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count; 2660 2661 rx_stats->num_msdu += num_msdu; 2662 rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count + 2663 ppdu_info->tcp_ack_msdu_count; 2664 rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count; 2665 rx_stats->other_msdu_count += ppdu_info->other_msdu_count; 2666 2667 if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A || 2668 ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) { 2669 ppdu_info->nss = 1; 2670 ppdu_info->mcs = HAL_RX_MAX_MCS; 2671 ppdu_info->tid = IEEE80211_NUM_TIDS; 2672 } 2673 2674 if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS) 2675 rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu; 2676 2677 if (ppdu_info->mcs <= HAL_RX_MAX_MCS) 2678 rx_stats->mcs_count[ppdu_info->mcs] += num_msdu; 2679 2680 if (ppdu_info->gi < HAL_RX_GI_MAX) 2681 rx_stats->gi_count[ppdu_info->gi] += num_msdu; 2682 2683 if (ppdu_info->bw < HAL_RX_BW_MAX) 2684 rx_stats->bw_count[ppdu_info->bw] += num_msdu; 2685 2686 if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX) 2687 rx_stats->coding_count[ppdu_info->ldpc] += num_msdu; 2688 2689 if (ppdu_info->tid <= IEEE80211_NUM_TIDS) 2690 rx_stats->tid_count[ppdu_info->tid] += num_msdu; 2691 2692 if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX) 2693 rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu; 2694 2695 if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX) 2696 rx_stats->reception_type[ppdu_info->reception_type] += num_msdu; 2697 2698 if (ppdu_info->is_stbc) 2699 rx_stats->stbc_count += num_msdu; 2700 2701 if (ppdu_info->beamformed) 2702 rx_stats->beamformed_count += num_msdu; 2703 2704 if (ppdu_info->num_mpdu_fcs_ok > 1) 2705 rx_stats->ampdu_msdu_count += num_msdu; 2706 else 2707 rx_stats->non_ampdu_msdu_count += num_msdu; 2708 2709 rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok; 2710 rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err; 2711 rx_stats->dcm_count += ppdu_info->dcm; 2712 rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu; 2713 2714 arsta->rssi_comb = ppdu_info->rssi_comb; 2715 rx_stats->rx_duration += ppdu_info->rx_duration; 2716 arsta->rx_duration = rx_stats->rx_duration; 2717 } 2718 2719 static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab, 2720 struct dp_rxdma_ring *rx_ring, 2721 int *buf_id) 2722 { 2723 struct sk_buff *skb; 2724 dma_addr_t paddr; 2725 2726 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + 2727 DP_RX_BUFFER_ALIGN_SIZE); 2728 2729 if (!skb) 2730 goto fail_alloc_skb; 2731 2732 if (!IS_ALIGNED((unsigned long)skb->data, 2733 DP_RX_BUFFER_ALIGN_SIZE)) { 2734 skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - 2735 skb->data); 2736 } 2737 2738 paddr = dma_map_single(ab->dev, skb->data, 2739 skb->len + skb_tailroom(skb), 2740 DMA_FROM_DEVICE); 2741 if (unlikely(dma_mapping_error(ab->dev, paddr))) 2742 goto fail_free_skb; 2743 2744 spin_lock_bh(&rx_ring->idr_lock); 2745 *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, 2746 rx_ring->bufs_max, GFP_ATOMIC); 2747 spin_unlock_bh(&rx_ring->idr_lock); 2748 if (*buf_id < 0) 2749 goto fail_dma_unmap; 2750 2751 ATH11K_SKB_RXCB(skb)->paddr = paddr; 2752 return skb; 2753 2754 fail_dma_unmap: 2755 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 2756 DMA_FROM_DEVICE); 2757 fail_free_skb: 2758 dev_kfree_skb_any(skb); 2759 fail_alloc_skb: 2760 return NULL; 2761 } 2762 2763 int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id, 2764 struct dp_rxdma_ring *rx_ring, 2765 int req_entries, 2766 enum hal_rx_buf_return_buf_manager mgr) 2767 { 2768 struct hal_srng *srng; 2769 u32 *desc; 2770 struct sk_buff *skb; 2771 int num_free; 2772 int num_remain; 2773 int buf_id; 2774 u32 cookie; 2775 dma_addr_t paddr; 2776 2777 req_entries = min(req_entries, rx_ring->bufs_max); 2778 2779 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 2780 2781 spin_lock_bh(&srng->lock); 2782 2783 ath11k_hal_srng_access_begin(ab, srng); 2784 2785 num_free = ath11k_hal_srng_src_num_free(ab, srng, true); 2786 2787 req_entries = min(num_free, req_entries); 2788 num_remain = req_entries; 2789 2790 while (num_remain > 0) { 2791 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, 2792 &buf_id); 2793 if (!skb) 2794 break; 2795 paddr = ATH11K_SKB_RXCB(skb)->paddr; 2796 2797 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 2798 if (!desc) 2799 goto fail_desc_get; 2800 2801 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 2802 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 2803 2804 num_remain--; 2805 2806 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); 2807 } 2808 2809 ath11k_hal_srng_access_end(ab, srng); 2810 2811 spin_unlock_bh(&srng->lock); 2812 2813 return req_entries - num_remain; 2814 2815 fail_desc_get: 2816 spin_lock_bh(&rx_ring->idr_lock); 2817 idr_remove(&rx_ring->bufs_idr, buf_id); 2818 spin_unlock_bh(&rx_ring->idr_lock); 2819 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 2820 DMA_FROM_DEVICE); 2821 dev_kfree_skb_any(skb); 2822 ath11k_hal_srng_access_end(ab, srng); 2823 spin_unlock_bh(&srng->lock); 2824 2825 return req_entries - num_remain; 2826 } 2827 2828 static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id, 2829 int *budget, struct sk_buff_head *skb_list) 2830 { 2831 struct ath11k *ar; 2832 struct ath11k_pdev_dp *dp; 2833 struct dp_rxdma_ring *rx_ring; 2834 struct hal_srng *srng; 2835 void *rx_mon_status_desc; 2836 struct sk_buff *skb; 2837 struct ath11k_skb_rxcb *rxcb; 2838 struct hal_tlv_hdr *tlv; 2839 u32 cookie; 2840 int buf_id, srng_id; 2841 dma_addr_t paddr; 2842 u8 rbm; 2843 int num_buffs_reaped = 0; 2844 2845 ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar; 2846 dp = &ar->dp; 2847 srng_id = ath11k_hw_mac_id_to_srng_id(&ab->hw_params, mac_id); 2848 rx_ring = &dp->rx_mon_status_refill_ring[srng_id]; 2849 2850 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 2851 2852 spin_lock_bh(&srng->lock); 2853 2854 ath11k_hal_srng_access_begin(ab, srng); 2855 while (*budget) { 2856 *budget -= 1; 2857 rx_mon_status_desc = 2858 ath11k_hal_srng_src_peek(ab, srng); 2859 if (!rx_mon_status_desc) 2860 break; 2861 2862 ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr, 2863 &cookie, &rbm); 2864 if (paddr) { 2865 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie); 2866 2867 spin_lock_bh(&rx_ring->idr_lock); 2868 skb = idr_find(&rx_ring->bufs_idr, buf_id); 2869 if (!skb) { 2870 ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n", 2871 buf_id); 2872 spin_unlock_bh(&rx_ring->idr_lock); 2873 goto move_next; 2874 } 2875 2876 idr_remove(&rx_ring->bufs_idr, buf_id); 2877 spin_unlock_bh(&rx_ring->idr_lock); 2878 2879 rxcb = ATH11K_SKB_RXCB(skb); 2880 2881 dma_unmap_single(ab->dev, rxcb->paddr, 2882 skb->len + skb_tailroom(skb), 2883 DMA_FROM_DEVICE); 2884 2885 tlv = (struct hal_tlv_hdr *)skb->data; 2886 if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != 2887 HAL_RX_STATUS_BUFFER_DONE) { 2888 ath11k_warn(ab, "mon status DONE not set %lx\n", 2889 FIELD_GET(HAL_TLV_HDR_TAG, 2890 tlv->tl)); 2891 dev_kfree_skb_any(skb); 2892 goto move_next; 2893 } 2894 2895 __skb_queue_tail(skb_list, skb); 2896 } 2897 move_next: 2898 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, 2899 &buf_id); 2900 2901 if (!skb) { 2902 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0, 2903 HAL_RX_BUF_RBM_SW3_BM); 2904 num_buffs_reaped++; 2905 break; 2906 } 2907 rxcb = ATH11K_SKB_RXCB(skb); 2908 2909 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 2910 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 2911 2912 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr, 2913 cookie, HAL_RX_BUF_RBM_SW3_BM); 2914 ath11k_hal_srng_src_get_next_entry(ab, srng); 2915 num_buffs_reaped++; 2916 } 2917 ath11k_hal_srng_access_end(ab, srng); 2918 spin_unlock_bh(&srng->lock); 2919 2920 return num_buffs_reaped; 2921 } 2922 2923 int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id, 2924 struct napi_struct *napi, int budget) 2925 { 2926 struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id); 2927 enum hal_rx_mon_status hal_status; 2928 struct sk_buff *skb; 2929 struct sk_buff_head skb_list; 2930 struct hal_rx_mon_ppdu_info ppdu_info; 2931 struct ath11k_peer *peer; 2932 struct ath11k_sta *arsta; 2933 int num_buffs_reaped = 0; 2934 2935 __skb_queue_head_init(&skb_list); 2936 2937 num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget, 2938 &skb_list); 2939 if (!num_buffs_reaped) 2940 goto exit; 2941 2942 while ((skb = __skb_dequeue(&skb_list))) { 2943 memset(&ppdu_info, 0, sizeof(ppdu_info)); 2944 ppdu_info.peer_id = HAL_INVALID_PEERID; 2945 2946 if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) 2947 trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE); 2948 2949 hal_status = ath11k_hal_rx_parse_mon_status(ab, &ppdu_info, skb); 2950 2951 if (ppdu_info.peer_id == HAL_INVALID_PEERID || 2952 hal_status != HAL_RX_MON_STATUS_PPDU_DONE) { 2953 dev_kfree_skb_any(skb); 2954 continue; 2955 } 2956 2957 rcu_read_lock(); 2958 spin_lock_bh(&ab->base_lock); 2959 peer = ath11k_peer_find_by_id(ab, ppdu_info.peer_id); 2960 2961 if (!peer || !peer->sta) { 2962 ath11k_dbg(ab, ATH11K_DBG_DATA, 2963 "failed to find the peer with peer_id %d\n", 2964 ppdu_info.peer_id); 2965 spin_unlock_bh(&ab->base_lock); 2966 rcu_read_unlock(); 2967 dev_kfree_skb_any(skb); 2968 continue; 2969 } 2970 2971 arsta = (struct ath11k_sta *)peer->sta->drv_priv; 2972 ath11k_dp_rx_update_peer_stats(arsta, &ppdu_info); 2973 2974 if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr)) 2975 trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE); 2976 2977 spin_unlock_bh(&ab->base_lock); 2978 rcu_read_unlock(); 2979 2980 dev_kfree_skb_any(skb); 2981 } 2982 exit: 2983 return num_buffs_reaped; 2984 } 2985 2986 static void ath11k_dp_rx_frag_timer(struct timer_list *timer) 2987 { 2988 struct dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer); 2989 2990 spin_lock_bh(&rx_tid->ab->base_lock); 2991 if (rx_tid->last_frag_no && 2992 rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) { 2993 spin_unlock_bh(&rx_tid->ab->base_lock); 2994 return; 2995 } 2996 ath11k_dp_rx_frags_cleanup(rx_tid, true); 2997 spin_unlock_bh(&rx_tid->ab->base_lock); 2998 } 2999 3000 int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id) 3001 { 3002 struct ath11k_base *ab = ar->ab; 3003 struct crypto_shash *tfm; 3004 struct ath11k_peer *peer; 3005 struct dp_rx_tid *rx_tid; 3006 int i; 3007 3008 tfm = crypto_alloc_shash("michael_mic", 0, 0); 3009 if (IS_ERR(tfm)) 3010 return PTR_ERR(tfm); 3011 3012 spin_lock_bh(&ab->base_lock); 3013 3014 peer = ath11k_peer_find(ab, vdev_id, peer_mac); 3015 if (!peer) { 3016 ath11k_warn(ab, "failed to find the peer to set up fragment info\n"); 3017 spin_unlock_bh(&ab->base_lock); 3018 return -ENOENT; 3019 } 3020 3021 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 3022 rx_tid = &peer->rx_tid[i]; 3023 rx_tid->ab = ab; 3024 timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0); 3025 skb_queue_head_init(&rx_tid->rx_frags); 3026 } 3027 3028 peer->tfm_mmic = tfm; 3029 spin_unlock_bh(&ab->base_lock); 3030 3031 return 0; 3032 } 3033 3034 static int ath11k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key, 3035 struct ieee80211_hdr *hdr, u8 *data, 3036 size_t data_len, u8 *mic) 3037 { 3038 SHASH_DESC_ON_STACK(desc, tfm); 3039 u8 mic_hdr[16] = {0}; 3040 u8 tid = 0; 3041 int ret; 3042 3043 if (!tfm) 3044 return -EINVAL; 3045 3046 desc->tfm = tfm; 3047 3048 ret = crypto_shash_setkey(tfm, key, 8); 3049 if (ret) 3050 goto out; 3051 3052 ret = crypto_shash_init(desc); 3053 if (ret) 3054 goto out; 3055 3056 /* TKIP MIC header */ 3057 memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN); 3058 memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN); 3059 if (ieee80211_is_data_qos(hdr->frame_control)) 3060 tid = ieee80211_get_tid(hdr); 3061 mic_hdr[12] = tid; 3062 3063 ret = crypto_shash_update(desc, mic_hdr, 16); 3064 if (ret) 3065 goto out; 3066 ret = crypto_shash_update(desc, data, data_len); 3067 if (ret) 3068 goto out; 3069 ret = crypto_shash_final(desc, mic); 3070 out: 3071 shash_desc_zero(desc); 3072 return ret; 3073 } 3074 3075 static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer *peer, 3076 struct sk_buff *msdu) 3077 { 3078 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data; 3079 struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu); 3080 struct ieee80211_key_conf *key_conf; 3081 struct ieee80211_hdr *hdr; 3082 u8 mic[IEEE80211_CCMP_MIC_LEN]; 3083 int head_len, tail_len, ret; 3084 size_t data_len; 3085 u32 hdr_len; 3086 u8 *key, *data; 3087 u8 key_idx; 3088 3089 if (ath11k_dp_rx_h_mpdu_start_enctype(rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC) 3090 return 0; 3091 3092 hdr = (struct ieee80211_hdr *)(msdu->data + HAL_RX_DESC_SIZE); 3093 hdr_len = ieee80211_hdrlen(hdr->frame_control); 3094 head_len = hdr_len + HAL_RX_DESC_SIZE + IEEE80211_TKIP_IV_LEN; 3095 tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN; 3096 3097 if (!is_multicast_ether_addr(hdr->addr1)) 3098 key_idx = peer->ucast_keyidx; 3099 else 3100 key_idx = peer->mcast_keyidx; 3101 3102 key_conf = peer->keys[key_idx]; 3103 3104 data = msdu->data + head_len; 3105 data_len = msdu->len - head_len - tail_len; 3106 key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; 3107 3108 ret = ath11k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic); 3109 if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN)) 3110 goto mic_fail; 3111 3112 return 0; 3113 3114 mic_fail: 3115 (ATH11K_SKB_RXCB(msdu))->is_first_msdu = true; 3116 (ATH11K_SKB_RXCB(msdu))->is_last_msdu = true; 3117 3118 rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED | 3119 RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED; 3120 skb_pull(msdu, HAL_RX_DESC_SIZE); 3121 3122 ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs); 3123 ath11k_dp_rx_h_undecap(ar, msdu, rx_desc, 3124 HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true); 3125 ieee80211_rx(ar->hw, msdu); 3126 return -EINVAL; 3127 } 3128 3129 static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu, 3130 enum hal_encrypt_type enctype, u32 flags) 3131 { 3132 struct ieee80211_hdr *hdr; 3133 size_t hdr_len; 3134 size_t crypto_len; 3135 3136 if (!flags) 3137 return; 3138 3139 hdr = (struct ieee80211_hdr *)(msdu->data + HAL_RX_DESC_SIZE); 3140 3141 if (flags & RX_FLAG_MIC_STRIPPED) 3142 skb_trim(msdu, msdu->len - 3143 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 3144 3145 if (flags & RX_FLAG_ICV_STRIPPED) 3146 skb_trim(msdu, msdu->len - 3147 ath11k_dp_rx_crypto_icv_len(ar, enctype)); 3148 3149 if (flags & RX_FLAG_IV_STRIPPED) { 3150 hdr_len = ieee80211_hdrlen(hdr->frame_control); 3151 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 3152 3153 memmove((void *)msdu->data + HAL_RX_DESC_SIZE + crypto_len, 3154 (void *)msdu->data + HAL_RX_DESC_SIZE, hdr_len); 3155 skb_pull(msdu, crypto_len); 3156 } 3157 } 3158 3159 static int ath11k_dp_rx_h_defrag(struct ath11k *ar, 3160 struct ath11k_peer *peer, 3161 struct dp_rx_tid *rx_tid, 3162 struct sk_buff **defrag_skb) 3163 { 3164 struct hal_rx_desc *rx_desc; 3165 struct sk_buff *skb, *first_frag, *last_frag; 3166 struct ieee80211_hdr *hdr; 3167 enum hal_encrypt_type enctype; 3168 bool is_decrypted = false; 3169 int msdu_len = 0; 3170 int extra_space; 3171 u32 flags; 3172 3173 first_frag = skb_peek(&rx_tid->rx_frags); 3174 last_frag = skb_peek_tail(&rx_tid->rx_frags); 3175 3176 skb_queue_walk(&rx_tid->rx_frags, skb) { 3177 flags = 0; 3178 rx_desc = (struct hal_rx_desc *)skb->data; 3179 hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE); 3180 3181 enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc); 3182 if (enctype != HAL_ENCRYPT_TYPE_OPEN) 3183 is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc); 3184 3185 if (is_decrypted) { 3186 if (skb != first_frag) 3187 flags |= RX_FLAG_IV_STRIPPED; 3188 if (skb != last_frag) 3189 flags |= RX_FLAG_ICV_STRIPPED | 3190 RX_FLAG_MIC_STRIPPED; 3191 } 3192 3193 /* RX fragments are always raw packets */ 3194 if (skb != last_frag) 3195 skb_trim(skb, skb->len - FCS_LEN); 3196 ath11k_dp_rx_h_undecap_frag(ar, skb, enctype, flags); 3197 3198 if (skb != first_frag) 3199 skb_pull(skb, HAL_RX_DESC_SIZE + 3200 ieee80211_hdrlen(hdr->frame_control)); 3201 msdu_len += skb->len; 3202 } 3203 3204 extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag)); 3205 if (extra_space > 0 && 3206 (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0)) 3207 return -ENOMEM; 3208 3209 __skb_unlink(first_frag, &rx_tid->rx_frags); 3210 while ((skb = __skb_dequeue(&rx_tid->rx_frags))) { 3211 skb_put_data(first_frag, skb->data, skb->len); 3212 dev_kfree_skb_any(skb); 3213 } 3214 3215 hdr = (struct ieee80211_hdr *)(first_frag->data + HAL_RX_DESC_SIZE); 3216 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); 3217 ATH11K_SKB_RXCB(first_frag)->is_frag = 1; 3218 3219 if (ath11k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag)) 3220 first_frag = NULL; 3221 3222 *defrag_skb = first_frag; 3223 return 0; 3224 } 3225 3226 static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_tid *rx_tid, 3227 struct sk_buff *defrag_skb) 3228 { 3229 struct ath11k_base *ab = ar->ab; 3230 struct ath11k_pdev_dp *dp = &ar->dp; 3231 struct dp_rxdma_ring *rx_refill_ring = &dp->rx_refill_buf_ring; 3232 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data; 3233 struct hal_reo_entrance_ring *reo_ent_ring; 3234 struct hal_reo_dest_ring *reo_dest_ring; 3235 struct dp_link_desc_bank *link_desc_banks; 3236 struct hal_rx_msdu_link *msdu_link; 3237 struct hal_rx_msdu_details *msdu0; 3238 struct hal_srng *srng; 3239 dma_addr_t paddr; 3240 u32 desc_bank, msdu_info, mpdu_info; 3241 u32 dst_idx, cookie; 3242 u32 *msdu_len_offset; 3243 int ret, buf_id; 3244 3245 link_desc_banks = ab->dp.link_desc_banks; 3246 reo_dest_ring = rx_tid->dst_ring_desc; 3247 3248 ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank); 3249 msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr + 3250 (paddr - link_desc_banks[desc_bank].paddr)); 3251 msdu0 = &msdu_link->msdu_link[0]; 3252 dst_idx = FIELD_GET(RX_MSDU_DESC_INFO0_REO_DEST_IND, msdu0->rx_msdu_info.info0); 3253 memset(msdu0, 0, sizeof(*msdu0)); 3254 3255 msdu_info = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1) | 3256 FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1) | 3257 FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION, 0) | 3258 FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH, 3259 defrag_skb->len - HAL_RX_DESC_SIZE) | 3260 FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND, dst_idx) | 3261 FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA, 1) | 3262 FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA, 1); 3263 msdu0->rx_msdu_info.info0 = msdu_info; 3264 3265 /* change msdu len in hal rx desc */ 3266 msdu_len_offset = (u32 *)&rx_desc->msdu_start; 3267 *msdu_len_offset &= ~(RX_MSDU_START_INFO1_MSDU_LENGTH); 3268 *msdu_len_offset |= defrag_skb->len - HAL_RX_DESC_SIZE; 3269 3270 paddr = dma_map_single(ab->dev, defrag_skb->data, 3271 defrag_skb->len + skb_tailroom(defrag_skb), 3272 DMA_FROM_DEVICE); 3273 if (dma_mapping_error(ab->dev, paddr)) 3274 return -ENOMEM; 3275 3276 spin_lock_bh(&rx_refill_ring->idr_lock); 3277 buf_id = idr_alloc(&rx_refill_ring->bufs_idr, defrag_skb, 0, 3278 rx_refill_ring->bufs_max * 3, GFP_ATOMIC); 3279 spin_unlock_bh(&rx_refill_ring->idr_lock); 3280 if (buf_id < 0) { 3281 ret = -ENOMEM; 3282 goto err_unmap_dma; 3283 } 3284 3285 ATH11K_SKB_RXCB(defrag_skb)->paddr = paddr; 3286 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, dp->mac_id) | 3287 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 3288 3289 ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie, HAL_RX_BUF_RBM_SW3_BM); 3290 3291 /* Fill mpdu details into reo entrace ring */ 3292 srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id]; 3293 3294 spin_lock_bh(&srng->lock); 3295 ath11k_hal_srng_access_begin(ab, srng); 3296 3297 reo_ent_ring = (struct hal_reo_entrance_ring *) 3298 ath11k_hal_srng_src_get_next_entry(ab, srng); 3299 if (!reo_ent_ring) { 3300 ath11k_hal_srng_access_end(ab, srng); 3301 spin_unlock_bh(&srng->lock); 3302 ret = -ENOSPC; 3303 goto err_free_idr; 3304 } 3305 memset(reo_ent_ring, 0, sizeof(*reo_ent_ring)); 3306 3307 ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank); 3308 ath11k_hal_rx_buf_addr_info_set(reo_ent_ring, paddr, desc_bank, 3309 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST); 3310 3311 mpdu_info = FIELD_PREP(RX_MPDU_DESC_INFO0_MSDU_COUNT, 1) | 3312 FIELD_PREP(RX_MPDU_DESC_INFO0_SEQ_NUM, rx_tid->cur_sn) | 3313 FIELD_PREP(RX_MPDU_DESC_INFO0_FRAG_FLAG, 0) | 3314 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_SA, 1) | 3315 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_DA, 1) | 3316 FIELD_PREP(RX_MPDU_DESC_INFO0_RAW_MPDU, 1) | 3317 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_PN, 1); 3318 3319 reo_ent_ring->rx_mpdu_info.info0 = mpdu_info; 3320 reo_ent_ring->rx_mpdu_info.meta_data = reo_dest_ring->rx_mpdu_info.meta_data; 3321 reo_ent_ring->queue_addr_lo = reo_dest_ring->queue_addr_lo; 3322 reo_ent_ring->info0 = FIELD_PREP(HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI, 3323 FIELD_GET(HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI, 3324 reo_dest_ring->info0)) | 3325 FIELD_PREP(HAL_REO_ENTR_RING_INFO0_DEST_IND, dst_idx); 3326 ath11k_hal_srng_access_end(ab, srng); 3327 spin_unlock_bh(&srng->lock); 3328 3329 return 0; 3330 3331 err_free_idr: 3332 spin_lock_bh(&rx_refill_ring->idr_lock); 3333 idr_remove(&rx_refill_ring->bufs_idr, buf_id); 3334 spin_unlock_bh(&rx_refill_ring->idr_lock); 3335 err_unmap_dma: 3336 dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb), 3337 DMA_FROM_DEVICE); 3338 return ret; 3339 } 3340 3341 static int ath11k_dp_rx_h_cmp_frags(struct sk_buff *a, struct sk_buff *b) 3342 { 3343 int frag1, frag2; 3344 3345 frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(a); 3346 frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(b); 3347 3348 return frag1 - frag2; 3349 } 3350 3351 static void ath11k_dp_rx_h_sort_frags(struct sk_buff_head *frag_list, 3352 struct sk_buff *cur_frag) 3353 { 3354 struct sk_buff *skb; 3355 int cmp; 3356 3357 skb_queue_walk(frag_list, skb) { 3358 cmp = ath11k_dp_rx_h_cmp_frags(skb, cur_frag); 3359 if (cmp < 0) 3360 continue; 3361 __skb_queue_before(frag_list, skb, cur_frag); 3362 return; 3363 } 3364 __skb_queue_tail(frag_list, cur_frag); 3365 } 3366 3367 static u64 ath11k_dp_rx_h_get_pn(struct sk_buff *skb) 3368 { 3369 struct ieee80211_hdr *hdr; 3370 u64 pn = 0; 3371 u8 *ehdr; 3372 3373 hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE); 3374 ehdr = skb->data + HAL_RX_DESC_SIZE + ieee80211_hdrlen(hdr->frame_control); 3375 3376 pn = ehdr[0]; 3377 pn |= (u64)ehdr[1] << 8; 3378 pn |= (u64)ehdr[4] << 16; 3379 pn |= (u64)ehdr[5] << 24; 3380 pn |= (u64)ehdr[6] << 32; 3381 pn |= (u64)ehdr[7] << 40; 3382 3383 return pn; 3384 } 3385 3386 static bool 3387 ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k *ar, struct dp_rx_tid *rx_tid) 3388 { 3389 enum hal_encrypt_type encrypt_type; 3390 struct sk_buff *first_frag, *skb; 3391 struct hal_rx_desc *desc; 3392 u64 last_pn; 3393 u64 cur_pn; 3394 3395 first_frag = skb_peek(&rx_tid->rx_frags); 3396 desc = (struct hal_rx_desc *)first_frag->data; 3397 3398 encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(desc); 3399 if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 && 3400 encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 && 3401 encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 && 3402 encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256) 3403 return true; 3404 3405 last_pn = ath11k_dp_rx_h_get_pn(first_frag); 3406 skb_queue_walk(&rx_tid->rx_frags, skb) { 3407 if (skb == first_frag) 3408 continue; 3409 3410 cur_pn = ath11k_dp_rx_h_get_pn(skb); 3411 if (cur_pn != last_pn + 1) 3412 return false; 3413 last_pn = cur_pn; 3414 } 3415 return true; 3416 } 3417 3418 static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar, 3419 struct sk_buff *msdu, 3420 u32 *ring_desc) 3421 { 3422 struct ath11k_base *ab = ar->ab; 3423 struct hal_rx_desc *rx_desc; 3424 struct ath11k_peer *peer; 3425 struct dp_rx_tid *rx_tid; 3426 struct sk_buff *defrag_skb = NULL; 3427 u32 peer_id; 3428 u16 seqno, frag_no; 3429 u8 tid; 3430 int ret = 0; 3431 bool more_frags; 3432 3433 rx_desc = (struct hal_rx_desc *)msdu->data; 3434 peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(rx_desc); 3435 tid = ath11k_dp_rx_h_mpdu_start_tid(rx_desc); 3436 seqno = ath11k_dp_rx_h_mpdu_start_seq_no(rx_desc); 3437 frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(msdu); 3438 more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(msdu); 3439 3440 if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(rx_desc) || 3441 !ath11k_dp_rx_h_mpdu_start_fc_valid(rx_desc) || 3442 tid > IEEE80211_NUM_TIDS) 3443 return -EINVAL; 3444 3445 /* received unfragmented packet in reo 3446 * exception ring, this shouldn't happen 3447 * as these packets typically come from 3448 * reo2sw srngs. 3449 */ 3450 if (WARN_ON_ONCE(!frag_no && !more_frags)) 3451 return -EINVAL; 3452 3453 spin_lock_bh(&ab->base_lock); 3454 peer = ath11k_peer_find_by_id(ab, peer_id); 3455 if (!peer) { 3456 ath11k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n", 3457 peer_id); 3458 ret = -ENOENT; 3459 goto out_unlock; 3460 } 3461 rx_tid = &peer->rx_tid[tid]; 3462 3463 if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) || 3464 skb_queue_empty(&rx_tid->rx_frags)) { 3465 /* Flush stored fragments and start a new sequence */ 3466 ath11k_dp_rx_frags_cleanup(rx_tid, true); 3467 rx_tid->cur_sn = seqno; 3468 } 3469 3470 if (rx_tid->rx_frag_bitmap & BIT(frag_no)) { 3471 /* Fragment already present */ 3472 ret = -EINVAL; 3473 goto out_unlock; 3474 } 3475 3476 if (frag_no > __fls(rx_tid->rx_frag_bitmap)) 3477 __skb_queue_tail(&rx_tid->rx_frags, msdu); 3478 else 3479 ath11k_dp_rx_h_sort_frags(&rx_tid->rx_frags, msdu); 3480 3481 rx_tid->rx_frag_bitmap |= BIT(frag_no); 3482 if (!more_frags) 3483 rx_tid->last_frag_no = frag_no; 3484 3485 if (frag_no == 0) { 3486 rx_tid->dst_ring_desc = kmemdup(ring_desc, 3487 sizeof(*rx_tid->dst_ring_desc), 3488 GFP_ATOMIC); 3489 if (!rx_tid->dst_ring_desc) { 3490 ret = -ENOMEM; 3491 goto out_unlock; 3492 } 3493 } else { 3494 ath11k_dp_rx_link_desc_return(ab, ring_desc, 3495 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3496 } 3497 3498 if (!rx_tid->last_frag_no || 3499 rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) { 3500 mod_timer(&rx_tid->frag_timer, jiffies + 3501 ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS); 3502 goto out_unlock; 3503 } 3504 3505 spin_unlock_bh(&ab->base_lock); 3506 del_timer_sync(&rx_tid->frag_timer); 3507 spin_lock_bh(&ab->base_lock); 3508 3509 peer = ath11k_peer_find_by_id(ab, peer_id); 3510 if (!peer) 3511 goto err_frags_cleanup; 3512 3513 if (!ath11k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid)) 3514 goto err_frags_cleanup; 3515 3516 if (ath11k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb)) 3517 goto err_frags_cleanup; 3518 3519 if (!defrag_skb) 3520 goto err_frags_cleanup; 3521 3522 if (ath11k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb)) 3523 goto err_frags_cleanup; 3524 3525 ath11k_dp_rx_frags_cleanup(rx_tid, false); 3526 goto out_unlock; 3527 3528 err_frags_cleanup: 3529 dev_kfree_skb_any(defrag_skb); 3530 ath11k_dp_rx_frags_cleanup(rx_tid, true); 3531 out_unlock: 3532 spin_unlock_bh(&ab->base_lock); 3533 return ret; 3534 } 3535 3536 static int 3537 ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool drop) 3538 { 3539 struct ath11k_pdev_dp *dp = &ar->dp; 3540 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 3541 struct sk_buff *msdu; 3542 struct ath11k_skb_rxcb *rxcb; 3543 struct hal_rx_desc *rx_desc; 3544 u8 *hdr_status; 3545 u16 msdu_len; 3546 3547 spin_lock_bh(&rx_ring->idr_lock); 3548 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 3549 if (!msdu) { 3550 ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n", 3551 buf_id); 3552 spin_unlock_bh(&rx_ring->idr_lock); 3553 return -EINVAL; 3554 } 3555 3556 idr_remove(&rx_ring->bufs_idr, buf_id); 3557 spin_unlock_bh(&rx_ring->idr_lock); 3558 3559 rxcb = ATH11K_SKB_RXCB(msdu); 3560 dma_unmap_single(ar->ab->dev, rxcb->paddr, 3561 msdu->len + skb_tailroom(msdu), 3562 DMA_FROM_DEVICE); 3563 3564 if (drop) { 3565 dev_kfree_skb_any(msdu); 3566 return 0; 3567 } 3568 3569 rcu_read_lock(); 3570 if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) { 3571 dev_kfree_skb_any(msdu); 3572 goto exit; 3573 } 3574 3575 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 3576 dev_kfree_skb_any(msdu); 3577 goto exit; 3578 } 3579 3580 rx_desc = (struct hal_rx_desc *)msdu->data; 3581 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc); 3582 if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) { 3583 hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc); 3584 ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len); 3585 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status, 3586 sizeof(struct ieee80211_hdr)); 3587 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc, 3588 sizeof(struct hal_rx_desc)); 3589 dev_kfree_skb_any(msdu); 3590 goto exit; 3591 } 3592 3593 skb_put(msdu, HAL_RX_DESC_SIZE + msdu_len); 3594 3595 if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) { 3596 dev_kfree_skb_any(msdu); 3597 ath11k_dp_rx_link_desc_return(ar->ab, ring_desc, 3598 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3599 } 3600 exit: 3601 rcu_read_unlock(); 3602 return 0; 3603 } 3604 3605 int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi, 3606 int budget) 3607 { 3608 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; 3609 struct dp_link_desc_bank *link_desc_banks; 3610 enum hal_rx_buf_return_buf_manager rbm; 3611 int tot_n_bufs_reaped, quota, ret, i; 3612 int n_bufs_reaped[MAX_RADIOS] = {0}; 3613 struct dp_rxdma_ring *rx_ring; 3614 struct dp_srng *reo_except; 3615 u32 desc_bank, num_msdus; 3616 struct hal_srng *srng; 3617 struct ath11k_dp *dp; 3618 void *link_desc_va; 3619 int buf_id, mac_id; 3620 struct ath11k *ar; 3621 dma_addr_t paddr; 3622 u32 *desc; 3623 bool is_frag; 3624 u8 drop = 0; 3625 3626 tot_n_bufs_reaped = 0; 3627 quota = budget; 3628 3629 dp = &ab->dp; 3630 reo_except = &dp->reo_except_ring; 3631 link_desc_banks = dp->link_desc_banks; 3632 3633 srng = &ab->hal.srng_list[reo_except->ring_id]; 3634 3635 spin_lock_bh(&srng->lock); 3636 3637 ath11k_hal_srng_access_begin(ab, srng); 3638 3639 while (budget && 3640 (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 3641 struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc; 3642 3643 ab->soc_stats.err_ring_pkts++; 3644 ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr, 3645 &desc_bank); 3646 if (ret) { 3647 ath11k_warn(ab, "failed to parse error reo desc %d\n", 3648 ret); 3649 continue; 3650 } 3651 link_desc_va = link_desc_banks[desc_bank].vaddr + 3652 (paddr - link_desc_banks[desc_bank].paddr); 3653 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies, 3654 &rbm); 3655 if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST && 3656 rbm != HAL_RX_BUF_RBM_SW3_BM) { 3657 ab->soc_stats.invalid_rbm++; 3658 ath11k_warn(ab, "invalid return buffer manager %d\n", rbm); 3659 ath11k_dp_rx_link_desc_return(ab, desc, 3660 HAL_WBM_REL_BM_ACT_REL_MSDU); 3661 continue; 3662 } 3663 3664 is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG); 3665 3666 /* Process only rx fragments with one msdu per link desc below, and drop 3667 * msdu's indicated due to error reasons. 3668 */ 3669 if (!is_frag || num_msdus > 1) { 3670 drop = 1; 3671 /* Return the link desc back to wbm idle list */ 3672 ath11k_dp_rx_link_desc_return(ab, desc, 3673 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3674 } 3675 3676 for (i = 0; i < num_msdus; i++) { 3677 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 3678 msdu_cookies[i]); 3679 3680 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, 3681 msdu_cookies[i]); 3682 3683 ar = ab->pdevs[mac_id].ar; 3684 3685 if (!ath11k_dp_process_rx_err_buf(ar, desc, buf_id, drop)) { 3686 n_bufs_reaped[mac_id]++; 3687 tot_n_bufs_reaped++; 3688 } 3689 } 3690 3691 if (tot_n_bufs_reaped >= quota) { 3692 tot_n_bufs_reaped = quota; 3693 goto exit; 3694 } 3695 3696 budget = quota - tot_n_bufs_reaped; 3697 } 3698 3699 exit: 3700 ath11k_hal_srng_access_end(ab, srng); 3701 3702 spin_unlock_bh(&srng->lock); 3703 3704 for (i = 0; i < ab->num_radios; i++) { 3705 if (!n_bufs_reaped[i]) 3706 continue; 3707 3708 ar = ab->pdevs[i].ar; 3709 rx_ring = &ar->dp.rx_refill_buf_ring; 3710 3711 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i], 3712 HAL_RX_BUF_RBM_SW3_BM); 3713 } 3714 3715 return tot_n_bufs_reaped; 3716 } 3717 3718 static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar, 3719 int msdu_len, 3720 struct sk_buff_head *msdu_list) 3721 { 3722 struct sk_buff *skb, *tmp; 3723 struct ath11k_skb_rxcb *rxcb; 3724 int n_buffs; 3725 3726 n_buffs = DIV_ROUND_UP(msdu_len, 3727 (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE)); 3728 3729 skb_queue_walk_safe(msdu_list, skb, tmp) { 3730 rxcb = ATH11K_SKB_RXCB(skb); 3731 if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO && 3732 rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) { 3733 if (!n_buffs) 3734 break; 3735 __skb_unlink(skb, msdu_list); 3736 dev_kfree_skb_any(skb); 3737 n_buffs--; 3738 } 3739 } 3740 } 3741 3742 static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu, 3743 struct ieee80211_rx_status *status, 3744 struct sk_buff_head *msdu_list) 3745 { 3746 u16 msdu_len; 3747 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 3748 u8 l3pad_bytes; 3749 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3750 3751 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc); 3752 3753 if (!rxcb->is_frag && ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE)) { 3754 /* First buffer will be freed by the caller, so deduct it's length */ 3755 msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE); 3756 ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list); 3757 return -EINVAL; 3758 } 3759 3760 if (!ath11k_dp_rx_h_attn_msdu_done(desc)) { 3761 ath11k_warn(ar->ab, 3762 "msdu_done bit not set in null_q_des processing\n"); 3763 __skb_queue_purge(msdu_list); 3764 return -EIO; 3765 } 3766 3767 /* Handle NULL queue descriptor violations arising out a missing 3768 * REO queue for a given peer or a given TID. This typically 3769 * may happen if a packet is received on a QOS enabled TID before the 3770 * ADDBA negotiation for that TID, when the TID queue is setup. Or 3771 * it may also happen for MC/BC frames if they are not routed to the 3772 * non-QOS TID queue, in the absence of any other default TID queue. 3773 * This error can show up both in a REO destination or WBM release ring. 3774 */ 3775 3776 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc); 3777 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc); 3778 3779 if (rxcb->is_frag) { 3780 skb_pull(msdu, HAL_RX_DESC_SIZE); 3781 } else { 3782 l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc); 3783 3784 if ((HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) 3785 return -EINVAL; 3786 3787 skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len); 3788 skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes); 3789 } 3790 ath11k_dp_rx_h_ppdu(ar, desc, status); 3791 3792 ath11k_dp_rx_h_mpdu(ar, msdu, desc, status); 3793 3794 rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(desc); 3795 3796 /* Please note that caller will having the access to msdu and completing 3797 * rx with mac80211. Need not worry about cleaning up amsdu_list. 3798 */ 3799 3800 return 0; 3801 } 3802 3803 static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu, 3804 struct ieee80211_rx_status *status, 3805 struct sk_buff_head *msdu_list) 3806 { 3807 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3808 bool drop = false; 3809 3810 ar->ab->soc_stats.reo_error[rxcb->err_code]++; 3811 3812 switch (rxcb->err_code) { 3813 case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO: 3814 if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list)) 3815 drop = true; 3816 break; 3817 case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED: 3818 /* TODO: Do not drop PN failed packets in the driver; 3819 * instead, it is good to drop such packets in mac80211 3820 * after incrementing the replay counters. 3821 */ 3822 fallthrough; 3823 default: 3824 /* TODO: Review other errors and process them to mac80211 3825 * as appropriate. 3826 */ 3827 drop = true; 3828 break; 3829 } 3830 3831 return drop; 3832 } 3833 3834 static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu, 3835 struct ieee80211_rx_status *status) 3836 { 3837 u16 msdu_len; 3838 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 3839 u8 l3pad_bytes; 3840 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3841 3842 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc); 3843 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc); 3844 3845 l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc); 3846 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc); 3847 skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len); 3848 skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes); 3849 3850 ath11k_dp_rx_h_ppdu(ar, desc, status); 3851 3852 status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR | 3853 RX_FLAG_DECRYPTED); 3854 3855 ath11k_dp_rx_h_undecap(ar, msdu, desc, 3856 HAL_ENCRYPT_TYPE_TKIP_MIC, status, false); 3857 } 3858 3859 static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar, struct sk_buff *msdu, 3860 struct ieee80211_rx_status *status) 3861 { 3862 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3863 bool drop = false; 3864 3865 ar->ab->soc_stats.rxdma_error[rxcb->err_code]++; 3866 3867 switch (rxcb->err_code) { 3868 case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR: 3869 ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status); 3870 break; 3871 default: 3872 /* TODO: Review other rxdma error code to check if anything is 3873 * worth reporting to mac80211 3874 */ 3875 drop = true; 3876 break; 3877 } 3878 3879 return drop; 3880 } 3881 3882 static void ath11k_dp_rx_wbm_err(struct ath11k *ar, 3883 struct napi_struct *napi, 3884 struct sk_buff *msdu, 3885 struct sk_buff_head *msdu_list) 3886 { 3887 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3888 struct ieee80211_rx_status rxs = {0}; 3889 struct ieee80211_rx_status *status; 3890 bool drop = true; 3891 3892 switch (rxcb->err_rel_src) { 3893 case HAL_WBM_REL_SRC_MODULE_REO: 3894 drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list); 3895 break; 3896 case HAL_WBM_REL_SRC_MODULE_RXDMA: 3897 drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs); 3898 break; 3899 default: 3900 /* msdu will get freed */ 3901 break; 3902 } 3903 3904 if (drop) { 3905 dev_kfree_skb_any(msdu); 3906 return; 3907 } 3908 3909 status = IEEE80211_SKB_RXCB(msdu); 3910 *status = rxs; 3911 3912 ath11k_dp_rx_deliver_msdu(ar, napi, msdu); 3913 } 3914 3915 int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab, 3916 struct napi_struct *napi, int budget) 3917 { 3918 struct ath11k *ar; 3919 struct ath11k_dp *dp = &ab->dp; 3920 struct dp_rxdma_ring *rx_ring; 3921 struct hal_rx_wbm_rel_info err_info; 3922 struct hal_srng *srng; 3923 struct sk_buff *msdu; 3924 struct sk_buff_head msdu_list[MAX_RADIOS]; 3925 struct ath11k_skb_rxcb *rxcb; 3926 u32 *rx_desc; 3927 int buf_id, mac_id; 3928 int num_buffs_reaped[MAX_RADIOS] = {0}; 3929 int total_num_buffs_reaped = 0; 3930 int ret, i; 3931 3932 for (i = 0; i < ab->num_radios; i++) 3933 __skb_queue_head_init(&msdu_list[i]); 3934 3935 srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id]; 3936 3937 spin_lock_bh(&srng->lock); 3938 3939 ath11k_hal_srng_access_begin(ab, srng); 3940 3941 while (budget) { 3942 rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng); 3943 if (!rx_desc) 3944 break; 3945 3946 ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info); 3947 if (ret) { 3948 ath11k_warn(ab, 3949 "failed to parse rx error in wbm_rel ring desc %d\n", 3950 ret); 3951 continue; 3952 } 3953 3954 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie); 3955 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie); 3956 3957 ar = ab->pdevs[mac_id].ar; 3958 rx_ring = &ar->dp.rx_refill_buf_ring; 3959 3960 spin_lock_bh(&rx_ring->idr_lock); 3961 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 3962 if (!msdu) { 3963 ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n", 3964 buf_id, mac_id); 3965 spin_unlock_bh(&rx_ring->idr_lock); 3966 continue; 3967 } 3968 3969 idr_remove(&rx_ring->bufs_idr, buf_id); 3970 spin_unlock_bh(&rx_ring->idr_lock); 3971 3972 rxcb = ATH11K_SKB_RXCB(msdu); 3973 dma_unmap_single(ab->dev, rxcb->paddr, 3974 msdu->len + skb_tailroom(msdu), 3975 DMA_FROM_DEVICE); 3976 3977 num_buffs_reaped[mac_id]++; 3978 total_num_buffs_reaped++; 3979 budget--; 3980 3981 if (err_info.push_reason != 3982 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { 3983 dev_kfree_skb_any(msdu); 3984 continue; 3985 } 3986 3987 rxcb->err_rel_src = err_info.err_rel_src; 3988 rxcb->err_code = err_info.err_code; 3989 rxcb->rx_desc = (struct hal_rx_desc *)msdu->data; 3990 __skb_queue_tail(&msdu_list[mac_id], msdu); 3991 } 3992 3993 ath11k_hal_srng_access_end(ab, srng); 3994 3995 spin_unlock_bh(&srng->lock); 3996 3997 if (!total_num_buffs_reaped) 3998 goto done; 3999 4000 for (i = 0; i < ab->num_radios; i++) { 4001 if (!num_buffs_reaped[i]) 4002 continue; 4003 4004 ar = ab->pdevs[i].ar; 4005 rx_ring = &ar->dp.rx_refill_buf_ring; 4006 4007 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i], 4008 HAL_RX_BUF_RBM_SW3_BM); 4009 } 4010 4011 rcu_read_lock(); 4012 for (i = 0; i < ab->num_radios; i++) { 4013 if (!rcu_dereference(ab->pdevs_active[i])) { 4014 __skb_queue_purge(&msdu_list[i]); 4015 continue; 4016 } 4017 4018 ar = ab->pdevs[i].ar; 4019 4020 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 4021 __skb_queue_purge(&msdu_list[i]); 4022 continue; 4023 } 4024 4025 while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL) 4026 ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]); 4027 } 4028 rcu_read_unlock(); 4029 done: 4030 return total_num_buffs_reaped; 4031 } 4032 4033 int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget) 4034 { 4035 struct ath11k *ar; 4036 struct dp_srng *err_ring; 4037 struct dp_rxdma_ring *rx_ring; 4038 struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks; 4039 struct hal_srng *srng; 4040 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; 4041 enum hal_rx_buf_return_buf_manager rbm; 4042 enum hal_reo_entr_rxdma_ecode rxdma_err_code; 4043 struct ath11k_skb_rxcb *rxcb; 4044 struct sk_buff *skb; 4045 struct hal_reo_entrance_ring *entr_ring; 4046 void *desc; 4047 int num_buf_freed = 0; 4048 int quota = budget; 4049 dma_addr_t paddr; 4050 u32 desc_bank; 4051 void *link_desc_va; 4052 int num_msdus; 4053 int i; 4054 int buf_id; 4055 4056 ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar; 4057 err_ring = &ar->dp.rxdma_err_dst_ring[ath11k_hw_mac_id_to_srng_id(&ab->hw_params, 4058 mac_id)]; 4059 rx_ring = &ar->dp.rx_refill_buf_ring; 4060 4061 srng = &ab->hal.srng_list[err_ring->ring_id]; 4062 4063 spin_lock_bh(&srng->lock); 4064 4065 ath11k_hal_srng_access_begin(ab, srng); 4066 4067 while (quota-- && 4068 (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 4069 ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank); 4070 4071 entr_ring = (struct hal_reo_entrance_ring *)desc; 4072 rxdma_err_code = 4073 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE, 4074 entr_ring->info1); 4075 ab->soc_stats.rxdma_error[rxdma_err_code]++; 4076 4077 link_desc_va = link_desc_banks[desc_bank].vaddr + 4078 (paddr - link_desc_banks[desc_bank].paddr); 4079 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, 4080 msdu_cookies, &rbm); 4081 4082 for (i = 0; i < num_msdus; i++) { 4083 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 4084 msdu_cookies[i]); 4085 4086 spin_lock_bh(&rx_ring->idr_lock); 4087 skb = idr_find(&rx_ring->bufs_idr, buf_id); 4088 if (!skb) { 4089 ath11k_warn(ab, "rxdma error with invalid buf_id %d\n", 4090 buf_id); 4091 spin_unlock_bh(&rx_ring->idr_lock); 4092 continue; 4093 } 4094 4095 idr_remove(&rx_ring->bufs_idr, buf_id); 4096 spin_unlock_bh(&rx_ring->idr_lock); 4097 4098 rxcb = ATH11K_SKB_RXCB(skb); 4099 dma_unmap_single(ab->dev, rxcb->paddr, 4100 skb->len + skb_tailroom(skb), 4101 DMA_FROM_DEVICE); 4102 dev_kfree_skb_any(skb); 4103 4104 num_buf_freed++; 4105 } 4106 4107 ath11k_dp_rx_link_desc_return(ab, desc, 4108 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 4109 } 4110 4111 ath11k_hal_srng_access_end(ab, srng); 4112 4113 spin_unlock_bh(&srng->lock); 4114 4115 if (num_buf_freed) 4116 ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed, 4117 HAL_RX_BUF_RBM_SW3_BM); 4118 4119 return budget - quota; 4120 } 4121 4122 void ath11k_dp_process_reo_status(struct ath11k_base *ab) 4123 { 4124 struct ath11k_dp *dp = &ab->dp; 4125 struct hal_srng *srng; 4126 struct dp_reo_cmd *cmd, *tmp; 4127 bool found = false; 4128 u32 *reo_desc; 4129 u16 tag; 4130 struct hal_reo_status reo_status; 4131 4132 srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id]; 4133 4134 memset(&reo_status, 0, sizeof(reo_status)); 4135 4136 spin_lock_bh(&srng->lock); 4137 4138 ath11k_hal_srng_access_begin(ab, srng); 4139 4140 while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 4141 tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc); 4142 4143 switch (tag) { 4144 case HAL_REO_GET_QUEUE_STATS_STATUS: 4145 ath11k_hal_reo_status_queue_stats(ab, reo_desc, 4146 &reo_status); 4147 break; 4148 case HAL_REO_FLUSH_QUEUE_STATUS: 4149 ath11k_hal_reo_flush_queue_status(ab, reo_desc, 4150 &reo_status); 4151 break; 4152 case HAL_REO_FLUSH_CACHE_STATUS: 4153 ath11k_hal_reo_flush_cache_status(ab, reo_desc, 4154 &reo_status); 4155 break; 4156 case HAL_REO_UNBLOCK_CACHE_STATUS: 4157 ath11k_hal_reo_unblk_cache_status(ab, reo_desc, 4158 &reo_status); 4159 break; 4160 case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS: 4161 ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc, 4162 &reo_status); 4163 break; 4164 case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS: 4165 ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc, 4166 &reo_status); 4167 break; 4168 case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS: 4169 ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc, 4170 &reo_status); 4171 break; 4172 default: 4173 ath11k_warn(ab, "Unknown reo status type %d\n", tag); 4174 continue; 4175 } 4176 4177 spin_lock_bh(&dp->reo_cmd_lock); 4178 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 4179 if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) { 4180 found = true; 4181 list_del(&cmd->list); 4182 break; 4183 } 4184 } 4185 spin_unlock_bh(&dp->reo_cmd_lock); 4186 4187 if (found) { 4188 cmd->handler(dp, (void *)&cmd->data, 4189 reo_status.uniform_hdr.cmd_status); 4190 kfree(cmd); 4191 } 4192 4193 found = false; 4194 } 4195 4196 ath11k_hal_srng_access_end(ab, srng); 4197 4198 spin_unlock_bh(&srng->lock); 4199 } 4200 4201 void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id) 4202 { 4203 struct ath11k *ar = ab->pdevs[mac_id].ar; 4204 4205 ath11k_dp_rx_pdev_srng_free(ar); 4206 ath11k_dp_rxdma_pdev_buf_free(ar); 4207 } 4208 4209 int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id) 4210 { 4211 struct ath11k *ar = ab->pdevs[mac_id].ar; 4212 struct ath11k_pdev_dp *dp = &ar->dp; 4213 u32 ring_id; 4214 int i; 4215 int ret; 4216 4217 ret = ath11k_dp_rx_pdev_srng_alloc(ar); 4218 if (ret) { 4219 ath11k_warn(ab, "failed to setup rx srngs\n"); 4220 return ret; 4221 } 4222 4223 ret = ath11k_dp_rxdma_pdev_buf_setup(ar); 4224 if (ret) { 4225 ath11k_warn(ab, "failed to setup rxdma ring\n"); 4226 return ret; 4227 } 4228 4229 ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; 4230 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF); 4231 if (ret) { 4232 ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n", 4233 ret); 4234 return ret; 4235 } 4236 4237 if (ab->hw_params.rx_mac_buf_ring) { 4238 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 4239 ring_id = dp->rx_mac_buf_ring[i].ring_id; 4240 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, 4241 mac_id + i, HAL_RXDMA_BUF); 4242 if (ret) { 4243 ath11k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n", 4244 i, ret); 4245 return ret; 4246 } 4247 } 4248 } 4249 4250 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 4251 ring_id = dp->rxdma_err_dst_ring[i].ring_id; 4252 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, 4253 mac_id + i, HAL_RXDMA_DST); 4254 if (ret) { 4255 ath11k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n", 4256 i, ret); 4257 return ret; 4258 } 4259 } 4260 4261 if (!ab->hw_params.rxdma1_enable) 4262 goto config_refill_ring; 4263 4264 ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id; 4265 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, 4266 mac_id, HAL_RXDMA_MONITOR_BUF); 4267 if (ret) { 4268 ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n", 4269 ret); 4270 return ret; 4271 } 4272 ret = ath11k_dp_tx_htt_srng_setup(ab, 4273 dp->rxdma_mon_dst_ring.ring_id, 4274 mac_id, HAL_RXDMA_MONITOR_DST); 4275 if (ret) { 4276 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n", 4277 ret); 4278 return ret; 4279 } 4280 ret = ath11k_dp_tx_htt_srng_setup(ab, 4281 dp->rxdma_mon_desc_ring.ring_id, 4282 mac_id, HAL_RXDMA_MONITOR_DESC); 4283 if (ret) { 4284 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n", 4285 ret); 4286 return ret; 4287 } 4288 4289 config_refill_ring: 4290 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 4291 ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; 4292 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i, 4293 HAL_RXDMA_MONITOR_STATUS); 4294 if (ret) { 4295 ath11k_warn(ab, 4296 "failed to configure mon_status_refill_ring%d %d\n", 4297 i, ret); 4298 return ret; 4299 } 4300 } 4301 4302 return 0; 4303 } 4304 4305 static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len) 4306 { 4307 if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) { 4308 *frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc); 4309 *total_len -= *frag_len; 4310 } else { 4311 *frag_len = *total_len; 4312 *total_len = 0; 4313 } 4314 } 4315 4316 static 4317 int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar, 4318 void *p_last_buf_addr_info, 4319 u8 mac_id) 4320 { 4321 struct ath11k_pdev_dp *dp = &ar->dp; 4322 struct dp_srng *dp_srng; 4323 void *hal_srng; 4324 void *src_srng_desc; 4325 int ret = 0; 4326 4327 if (ar->ab->hw_params.rxdma1_enable) { 4328 dp_srng = &dp->rxdma_mon_desc_ring; 4329 hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id]; 4330 } else { 4331 dp_srng = &ar->ab->dp.wbm_desc_rel_ring; 4332 hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id]; 4333 } 4334 4335 ath11k_hal_srng_access_begin(ar->ab, hal_srng); 4336 4337 src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng); 4338 4339 if (src_srng_desc) { 4340 struct ath11k_buffer_addr *src_desc = 4341 (struct ath11k_buffer_addr *)src_srng_desc; 4342 4343 *src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info); 4344 } else { 4345 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4346 "Monitor Link Desc Ring %d Full", mac_id); 4347 ret = -ENOMEM; 4348 } 4349 4350 ath11k_hal_srng_access_end(ar->ab, hal_srng); 4351 return ret; 4352 } 4353 4354 static 4355 void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc, 4356 dma_addr_t *paddr, u32 *sw_cookie, 4357 u8 *rbm, 4358 void **pp_buf_addr_info) 4359 { 4360 struct hal_rx_msdu_link *msdu_link = 4361 (struct hal_rx_msdu_link *)rx_msdu_link_desc; 4362 struct ath11k_buffer_addr *buf_addr_info; 4363 4364 buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info; 4365 4366 ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm); 4367 4368 *pp_buf_addr_info = (void *)buf_addr_info; 4369 } 4370 4371 static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len) 4372 { 4373 if (skb->len > len) { 4374 skb_trim(skb, len); 4375 } else { 4376 if (skb_tailroom(skb) < len - skb->len) { 4377 if ((pskb_expand_head(skb, 0, 4378 len - skb->len - skb_tailroom(skb), 4379 GFP_ATOMIC))) { 4380 dev_kfree_skb_any(skb); 4381 return -ENOMEM; 4382 } 4383 } 4384 skb_put(skb, (len - skb->len)); 4385 } 4386 return 0; 4387 } 4388 4389 static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar, 4390 void *msdu_link_desc, 4391 struct hal_rx_msdu_list *msdu_list, 4392 u16 *num_msdus) 4393 { 4394 struct hal_rx_msdu_details *msdu_details = NULL; 4395 struct rx_msdu_desc *msdu_desc_info = NULL; 4396 struct hal_rx_msdu_link *msdu_link = NULL; 4397 int i; 4398 u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1); 4399 u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1); 4400 u8 tmp = 0; 4401 4402 msdu_link = (struct hal_rx_msdu_link *)msdu_link_desc; 4403 msdu_details = &msdu_link->msdu_link[0]; 4404 4405 for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) { 4406 if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR, 4407 msdu_details[i].buf_addr_info.info0) == 0) { 4408 msdu_desc_info = &msdu_details[i - 1].rx_msdu_info; 4409 msdu_desc_info->info0 |= last; 4410 ; 4411 break; 4412 } 4413 msdu_desc_info = &msdu_details[i].rx_msdu_info; 4414 4415 if (!i) 4416 msdu_desc_info->info0 |= first; 4417 else if (i == (HAL_RX_NUM_MSDU_DESC - 1)) 4418 msdu_desc_info->info0 |= last; 4419 msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0; 4420 msdu_list->msdu_info[i].msdu_len = 4421 HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0); 4422 msdu_list->sw_cookie[i] = 4423 FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, 4424 msdu_details[i].buf_addr_info.info1); 4425 tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR, 4426 msdu_details[i].buf_addr_info.info1); 4427 msdu_list->rbm[i] = tmp; 4428 } 4429 *num_msdus = i; 4430 } 4431 4432 static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id, 4433 u32 *rx_bufs_used) 4434 { 4435 u32 ret = 0; 4436 4437 if ((*ppdu_id < msdu_ppdu_id) && 4438 ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) { 4439 *ppdu_id = msdu_ppdu_id; 4440 ret = msdu_ppdu_id; 4441 } else if ((*ppdu_id > msdu_ppdu_id) && 4442 ((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) { 4443 /* mon_dst is behind than mon_status 4444 * skip dst_ring and free it 4445 */ 4446 *rx_bufs_used += 1; 4447 *ppdu_id = msdu_ppdu_id; 4448 ret = msdu_ppdu_id; 4449 } 4450 return ret; 4451 } 4452 4453 static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info, 4454 bool *is_frag, u32 *total_len, 4455 u32 *frag_len, u32 *msdu_cnt) 4456 { 4457 if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) { 4458 if (!*is_frag) { 4459 *total_len = info->msdu_len; 4460 *is_frag = true; 4461 } 4462 ath11k_dp_mon_set_frag_len(total_len, 4463 frag_len); 4464 } else { 4465 if (*is_frag) { 4466 ath11k_dp_mon_set_frag_len(total_len, 4467 frag_len); 4468 } else { 4469 *frag_len = info->msdu_len; 4470 } 4471 *is_frag = false; 4472 *msdu_cnt -= 1; 4473 } 4474 } 4475 4476 static u32 4477 ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id, 4478 void *ring_entry, struct sk_buff **head_msdu, 4479 struct sk_buff **tail_msdu, u32 *npackets, 4480 u32 *ppdu_id) 4481 { 4482 struct ath11k_pdev_dp *dp = &ar->dp; 4483 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4484 struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring; 4485 struct sk_buff *msdu = NULL, *last = NULL; 4486 struct hal_rx_msdu_list msdu_list; 4487 void *p_buf_addr_info, *p_last_buf_addr_info; 4488 struct hal_rx_desc *rx_desc; 4489 void *rx_msdu_link_desc; 4490 dma_addr_t paddr; 4491 u16 num_msdus = 0; 4492 u32 rx_buf_size, rx_pkt_offset, sw_cookie; 4493 u32 rx_bufs_used = 0, i = 0; 4494 u32 msdu_ppdu_id = 0, msdu_cnt = 0; 4495 u32 total_len = 0, frag_len = 0; 4496 bool is_frag, is_first_msdu; 4497 bool drop_mpdu = false; 4498 struct ath11k_skb_rxcb *rxcb; 4499 struct hal_reo_entrance_ring *ent_desc = 4500 (struct hal_reo_entrance_ring *)ring_entry; 4501 int buf_id; 4502 u32 rx_link_buf_info[2]; 4503 u8 rbm; 4504 4505 if (!ar->ab->hw_params.rxdma1_enable) 4506 rx_ring = &dp->rx_refill_buf_ring; 4507 4508 ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr, 4509 &sw_cookie, 4510 &p_last_buf_addr_info, &rbm, 4511 &msdu_cnt); 4512 4513 if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON, 4514 ent_desc->info1) == 4515 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { 4516 u8 rxdma_err = 4517 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE, 4518 ent_desc->info1); 4519 if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR || 4520 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR || 4521 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) { 4522 drop_mpdu = true; 4523 pmon->rx_mon_stats.dest_mpdu_drop++; 4524 } 4525 } 4526 4527 is_frag = false; 4528 is_first_msdu = true; 4529 4530 do { 4531 if (pmon->mon_last_linkdesc_paddr == paddr) { 4532 pmon->rx_mon_stats.dup_mon_linkdesc_cnt++; 4533 return rx_bufs_used; 4534 } 4535 4536 if (ar->ab->hw_params.rxdma1_enable) 4537 rx_msdu_link_desc = 4538 (void *)pmon->link_desc_banks[sw_cookie].vaddr + 4539 (paddr - pmon->link_desc_banks[sw_cookie].paddr); 4540 else 4541 rx_msdu_link_desc = 4542 (void *)ar->ab->dp.link_desc_banks[sw_cookie].vaddr + 4543 (paddr - ar->ab->dp.link_desc_banks[sw_cookie].paddr); 4544 4545 ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list, 4546 &num_msdus); 4547 4548 for (i = 0; i < num_msdus; i++) { 4549 u32 l2_hdr_offset; 4550 4551 if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) { 4552 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4553 "i %d last_cookie %d is same\n", 4554 i, pmon->mon_last_buf_cookie); 4555 drop_mpdu = true; 4556 pmon->rx_mon_stats.dup_mon_buf_cnt++; 4557 continue; 4558 } 4559 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 4560 msdu_list.sw_cookie[i]); 4561 4562 spin_lock_bh(&rx_ring->idr_lock); 4563 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 4564 spin_unlock_bh(&rx_ring->idr_lock); 4565 if (!msdu) { 4566 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4567 "msdu_pop: invalid buf_id %d\n", buf_id); 4568 break; 4569 } 4570 rxcb = ATH11K_SKB_RXCB(msdu); 4571 if (!rxcb->unmapped) { 4572 dma_unmap_single(ar->ab->dev, rxcb->paddr, 4573 msdu->len + 4574 skb_tailroom(msdu), 4575 DMA_FROM_DEVICE); 4576 rxcb->unmapped = 1; 4577 } 4578 if (drop_mpdu) { 4579 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4580 "i %d drop msdu %p *ppdu_id %x\n", 4581 i, msdu, *ppdu_id); 4582 dev_kfree_skb_any(msdu); 4583 msdu = NULL; 4584 goto next_msdu; 4585 } 4586 4587 rx_desc = (struct hal_rx_desc *)msdu->data; 4588 4589 rx_pkt_offset = sizeof(struct hal_rx_desc); 4590 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(rx_desc); 4591 4592 if (is_first_msdu) { 4593 if (!ath11k_dp_rxdesc_mpdu_valid(rx_desc)) { 4594 drop_mpdu = true; 4595 dev_kfree_skb_any(msdu); 4596 msdu = NULL; 4597 pmon->mon_last_linkdesc_paddr = paddr; 4598 goto next_msdu; 4599 } 4600 4601 msdu_ppdu_id = 4602 ath11k_dp_rxdesc_get_ppduid(rx_desc); 4603 4604 if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id, 4605 ppdu_id, 4606 &rx_bufs_used)) { 4607 if (rx_bufs_used) { 4608 drop_mpdu = true; 4609 dev_kfree_skb_any(msdu); 4610 msdu = NULL; 4611 goto next_msdu; 4612 } 4613 return rx_bufs_used; 4614 } 4615 pmon->mon_last_linkdesc_paddr = paddr; 4616 is_first_msdu = false; 4617 } 4618 ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i], 4619 &is_frag, &total_len, 4620 &frag_len, &msdu_cnt); 4621 rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len; 4622 4623 ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size); 4624 4625 if (!(*head_msdu)) 4626 *head_msdu = msdu; 4627 else if (last) 4628 last->next = msdu; 4629 4630 last = msdu; 4631 next_msdu: 4632 pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i]; 4633 rx_bufs_used++; 4634 spin_lock_bh(&rx_ring->idr_lock); 4635 idr_remove(&rx_ring->bufs_idr, buf_id); 4636 spin_unlock_bh(&rx_ring->idr_lock); 4637 } 4638 4639 ath11k_hal_rx_buf_addr_info_set(rx_link_buf_info, paddr, sw_cookie, rbm); 4640 4641 ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr, 4642 &sw_cookie, &rbm, 4643 &p_buf_addr_info); 4644 4645 if (ar->ab->hw_params.rxdma1_enable) { 4646 if (ath11k_dp_rx_monitor_link_desc_return(ar, 4647 p_last_buf_addr_info, 4648 dp->mac_id)) 4649 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4650 "dp_rx_monitor_link_desc_return failed"); 4651 } else { 4652 ath11k_dp_rx_link_desc_return(ar->ab, rx_link_buf_info, 4653 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 4654 } 4655 4656 p_last_buf_addr_info = p_buf_addr_info; 4657 4658 } while (paddr && msdu_cnt); 4659 4660 if (last) 4661 last->next = NULL; 4662 4663 *tail_msdu = msdu; 4664 4665 if (msdu_cnt == 0) 4666 *npackets = 1; 4667 4668 return rx_bufs_used; 4669 } 4670 4671 static void ath11k_dp_rx_msdus_set_payload(struct sk_buff *msdu) 4672 { 4673 u32 rx_pkt_offset, l2_hdr_offset; 4674 4675 rx_pkt_offset = sizeof(struct hal_rx_desc); 4676 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad((struct hal_rx_desc *)msdu->data); 4677 skb_pull(msdu, rx_pkt_offset + l2_hdr_offset); 4678 } 4679 4680 static struct sk_buff * 4681 ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, 4682 u32 mac_id, struct sk_buff *head_msdu, 4683 struct sk_buff *last_msdu, 4684 struct ieee80211_rx_status *rxs) 4685 { 4686 struct sk_buff *msdu, *mpdu_buf, *prev_buf; 4687 u32 decap_format, wifi_hdr_len; 4688 struct hal_rx_desc *rx_desc; 4689 char *hdr_desc; 4690 u8 *dest; 4691 struct ieee80211_hdr_3addr *wh; 4692 4693 mpdu_buf = NULL; 4694 4695 if (!head_msdu) 4696 goto err_merge_fail; 4697 4698 rx_desc = (struct hal_rx_desc *)head_msdu->data; 4699 4700 if (ath11k_dp_rxdesc_get_mpdulen_err(rx_desc)) 4701 return NULL; 4702 4703 decap_format = ath11k_dp_rxdesc_get_decap_format(rx_desc); 4704 4705 ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs); 4706 4707 if (decap_format == DP_RX_DECAP_TYPE_RAW) { 4708 ath11k_dp_rx_msdus_set_payload(head_msdu); 4709 4710 prev_buf = head_msdu; 4711 msdu = head_msdu->next; 4712 4713 while (msdu) { 4714 ath11k_dp_rx_msdus_set_payload(msdu); 4715 4716 prev_buf = msdu; 4717 msdu = msdu->next; 4718 } 4719 4720 prev_buf->next = NULL; 4721 4722 skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN); 4723 } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) { 4724 __le16 qos_field; 4725 u8 qos_pkt = 0; 4726 4727 rx_desc = (struct hal_rx_desc *)head_msdu->data; 4728 hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc); 4729 4730 /* Base size */ 4731 wifi_hdr_len = sizeof(struct ieee80211_hdr_3addr); 4732 wh = (struct ieee80211_hdr_3addr *)hdr_desc; 4733 4734 if (ieee80211_is_data_qos(wh->frame_control)) { 4735 struct ieee80211_qos_hdr *qwh = 4736 (struct ieee80211_qos_hdr *)hdr_desc; 4737 4738 qos_field = qwh->qos_ctrl; 4739 qos_pkt = 1; 4740 } 4741 msdu = head_msdu; 4742 4743 while (msdu) { 4744 rx_desc = (struct hal_rx_desc *)msdu->data; 4745 hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc); 4746 4747 if (qos_pkt) { 4748 dest = skb_push(msdu, sizeof(__le16)); 4749 if (!dest) 4750 goto err_merge_fail; 4751 memcpy(dest, hdr_desc, wifi_hdr_len); 4752 memcpy(dest + wifi_hdr_len, 4753 (u8 *)&qos_field, sizeof(__le16)); 4754 } 4755 ath11k_dp_rx_msdus_set_payload(msdu); 4756 prev_buf = msdu; 4757 msdu = msdu->next; 4758 } 4759 dest = skb_put(prev_buf, HAL_RX_FCS_LEN); 4760 if (!dest) 4761 goto err_merge_fail; 4762 4763 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4764 "mpdu_buf %pK mpdu_buf->len %u", 4765 prev_buf, prev_buf->len); 4766 } else { 4767 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4768 "decap format %d is not supported!\n", 4769 decap_format); 4770 goto err_merge_fail; 4771 } 4772 4773 return head_msdu; 4774 4775 err_merge_fail: 4776 if (mpdu_buf && decap_format != DP_RX_DECAP_TYPE_RAW) { 4777 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4778 "err_merge_fail mpdu_buf %pK", mpdu_buf); 4779 /* Free the head buffer */ 4780 dev_kfree_skb_any(mpdu_buf); 4781 } 4782 return NULL; 4783 } 4784 4785 static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id, 4786 struct sk_buff *head_msdu, 4787 struct sk_buff *tail_msdu, 4788 struct napi_struct *napi) 4789 { 4790 struct ath11k_pdev_dp *dp = &ar->dp; 4791 struct sk_buff *mon_skb, *skb_next, *header; 4792 struct ieee80211_rx_status *rxs = &dp->rx_status, *status; 4793 4794 mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu, 4795 tail_msdu, rxs); 4796 4797 if (!mon_skb) 4798 goto mon_deliver_fail; 4799 4800 header = mon_skb; 4801 4802 rxs->flag = 0; 4803 do { 4804 skb_next = mon_skb->next; 4805 if (!skb_next) 4806 rxs->flag &= ~RX_FLAG_AMSDU_MORE; 4807 else 4808 rxs->flag |= RX_FLAG_AMSDU_MORE; 4809 4810 if (mon_skb == header) { 4811 header = NULL; 4812 rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN; 4813 } else { 4814 rxs->flag |= RX_FLAG_ALLOW_SAME_PN; 4815 } 4816 rxs->flag |= RX_FLAG_ONLY_MONITOR; 4817 4818 status = IEEE80211_SKB_RXCB(mon_skb); 4819 *status = *rxs; 4820 4821 ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb); 4822 mon_skb = skb_next; 4823 } while (mon_skb); 4824 rxs->flag = 0; 4825 4826 return 0; 4827 4828 mon_deliver_fail: 4829 mon_skb = head_msdu; 4830 while (mon_skb) { 4831 skb_next = mon_skb->next; 4832 dev_kfree_skb_any(mon_skb); 4833 mon_skb = skb_next; 4834 } 4835 return -EINVAL; 4836 } 4837 4838 static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id, 4839 u32 quota, struct napi_struct *napi) 4840 { 4841 struct ath11k_pdev_dp *dp = &ar->dp; 4842 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4843 void *ring_entry; 4844 void *mon_dst_srng; 4845 u32 ppdu_id; 4846 u32 rx_bufs_used; 4847 u32 ring_id; 4848 struct ath11k_pdev_mon_stats *rx_mon_stats; 4849 u32 npackets = 0; 4850 4851 if (ar->ab->hw_params.rxdma1_enable) 4852 ring_id = dp->rxdma_mon_dst_ring.ring_id; 4853 else 4854 ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id; 4855 4856 mon_dst_srng = &ar->ab->hal.srng_list[ring_id]; 4857 4858 if (!mon_dst_srng) { 4859 ath11k_warn(ar->ab, 4860 "HAL Monitor Destination Ring Init Failed -- %pK", 4861 mon_dst_srng); 4862 return; 4863 } 4864 4865 spin_lock_bh(&pmon->mon_lock); 4866 4867 ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng); 4868 4869 ppdu_id = pmon->mon_ppdu_info.ppdu_id; 4870 rx_bufs_used = 0; 4871 rx_mon_stats = &pmon->rx_mon_stats; 4872 4873 while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) { 4874 struct sk_buff *head_msdu, *tail_msdu; 4875 4876 head_msdu = NULL; 4877 tail_msdu = NULL; 4878 4879 rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry, 4880 &head_msdu, 4881 &tail_msdu, 4882 &npackets, &ppdu_id); 4883 4884 if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) { 4885 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 4886 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4887 "dest_rx: new ppdu_id %x != status ppdu_id %x", 4888 ppdu_id, pmon->mon_ppdu_info.ppdu_id); 4889 break; 4890 } 4891 if (head_msdu && tail_msdu) { 4892 ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu, 4893 tail_msdu, napi); 4894 rx_mon_stats->dest_mpdu_done++; 4895 } 4896 4897 ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab, 4898 mon_dst_srng); 4899 } 4900 ath11k_hal_srng_access_end(ar->ab, mon_dst_srng); 4901 4902 spin_unlock_bh(&pmon->mon_lock); 4903 4904 if (rx_bufs_used) { 4905 rx_mon_stats->dest_ppdu_done++; 4906 if (ar->ab->hw_params.rxdma1_enable) 4907 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, 4908 &dp->rxdma_mon_buf_ring, 4909 rx_bufs_used, 4910 HAL_RX_BUF_RBM_SW3_BM); 4911 else 4912 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, 4913 &dp->rx_refill_buf_ring, 4914 rx_bufs_used, 4915 HAL_RX_BUF_RBM_SW3_BM); 4916 } 4917 } 4918 4919 static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar, 4920 int mac_id, u32 quota, 4921 struct napi_struct *napi) 4922 { 4923 struct ath11k_pdev_dp *dp = &ar->dp; 4924 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4925 struct hal_rx_mon_ppdu_info *ppdu_info; 4926 struct sk_buff *status_skb; 4927 u32 tlv_status = HAL_TLV_STATUS_BUF_DONE; 4928 struct ath11k_pdev_mon_stats *rx_mon_stats; 4929 4930 ppdu_info = &pmon->mon_ppdu_info; 4931 rx_mon_stats = &pmon->rx_mon_stats; 4932 4933 if (pmon->mon_ppdu_status != DP_PPDU_STATUS_START) 4934 return; 4935 4936 while (!skb_queue_empty(&pmon->rx_status_q)) { 4937 status_skb = skb_dequeue(&pmon->rx_status_q); 4938 4939 tlv_status = ath11k_hal_rx_parse_mon_status(ar->ab, ppdu_info, 4940 status_skb); 4941 if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) { 4942 rx_mon_stats->status_ppdu_done++; 4943 pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE; 4944 ath11k_dp_rx_mon_dest_process(ar, mac_id, quota, napi); 4945 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 4946 } 4947 dev_kfree_skb_any(status_skb); 4948 } 4949 } 4950 4951 static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id, 4952 struct napi_struct *napi, int budget) 4953 { 4954 struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id); 4955 struct ath11k_pdev_dp *dp = &ar->dp; 4956 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4957 int num_buffs_reaped = 0; 4958 4959 num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, mac_id, &budget, 4960 &pmon->rx_status_q); 4961 if (num_buffs_reaped) 4962 ath11k_dp_rx_mon_status_process_tlv(ar, mac_id, budget, napi); 4963 4964 return num_buffs_reaped; 4965 } 4966 4967 int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id, 4968 struct napi_struct *napi, int budget) 4969 { 4970 struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id); 4971 int ret = 0; 4972 4973 if (test_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags)) 4974 ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget); 4975 else 4976 ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget); 4977 return ret; 4978 } 4979 4980 static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar) 4981 { 4982 struct ath11k_pdev_dp *dp = &ar->dp; 4983 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4984 4985 skb_queue_head_init(&pmon->rx_status_q); 4986 4987 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 4988 4989 memset(&pmon->rx_mon_stats, 0, 4990 sizeof(pmon->rx_mon_stats)); 4991 return 0; 4992 } 4993 4994 int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar) 4995 { 4996 struct ath11k_pdev_dp *dp = &ar->dp; 4997 struct ath11k_mon_data *pmon = &dp->mon_data; 4998 struct hal_srng *mon_desc_srng = NULL; 4999 struct dp_srng *dp_srng; 5000 int ret = 0; 5001 u32 n_link_desc = 0; 5002 5003 ret = ath11k_dp_rx_pdev_mon_status_attach(ar); 5004 if (ret) { 5005 ath11k_warn(ar->ab, "pdev_mon_status_attach() failed"); 5006 return ret; 5007 } 5008 5009 /* if rxdma1_enable is false, no need to setup 5010 * rxdma_mon_desc_ring. 5011 */ 5012 if (!ar->ab->hw_params.rxdma1_enable) 5013 return 0; 5014 5015 dp_srng = &dp->rxdma_mon_desc_ring; 5016 n_link_desc = dp_srng->size / 5017 ath11k_hal_srng_get_entrysize(ar->ab, HAL_RXDMA_MONITOR_DESC); 5018 mon_desc_srng = 5019 &ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id]; 5020 5021 ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks, 5022 HAL_RXDMA_MONITOR_DESC, mon_desc_srng, 5023 n_link_desc); 5024 if (ret) { 5025 ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed"); 5026 return ret; 5027 } 5028 pmon->mon_last_linkdesc_paddr = 0; 5029 pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1; 5030 spin_lock_init(&pmon->mon_lock); 5031 5032 return 0; 5033 } 5034 5035 static int ath11k_dp_mon_link_free(struct ath11k *ar) 5036 { 5037 struct ath11k_pdev_dp *dp = &ar->dp; 5038 struct ath11k_mon_data *pmon = &dp->mon_data; 5039 5040 ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks, 5041 HAL_RXDMA_MONITOR_DESC, 5042 &dp->rxdma_mon_desc_ring); 5043 return 0; 5044 } 5045 5046 int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar) 5047 { 5048 ath11k_dp_mon_link_free(ar); 5049 return 0; 5050 } 5051 5052 int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab) 5053 { 5054 /* start reap timer */ 5055 mod_timer(&ab->mon_reap_timer, 5056 jiffies + msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL)); 5057 5058 return 0; 5059 } 5060 5061 int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer) 5062 { 5063 int ret; 5064 5065 if (stop_timer) 5066 del_timer_sync(&ab->mon_reap_timer); 5067 5068 /* reap all the monitor related rings */ 5069 ret = ath11k_dp_purge_mon_ring(ab); 5070 if (ret) { 5071 ath11k_warn(ab, "failed to purge dp mon ring: %d\n", ret); 5072 return ret; 5073 } 5074 5075 return 0; 5076 } 5077