1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/ieee80211.h> 7 #include "core.h" 8 #include "debug.h" 9 #include "hal_desc.h" 10 #include "hw.h" 11 #include "dp_rx.h" 12 #include "hal_rx.h" 13 #include "dp_tx.h" 14 #include "peer.h" 15 16 static u8 *ath11k_dp_rx_h_80211_hdr(struct hal_rx_desc *desc) 17 { 18 return desc->hdr_status; 19 } 20 21 static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct hal_rx_desc *desc) 22 { 23 if (!(__le32_to_cpu(desc->mpdu_start.info1) & 24 RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID)) 25 return HAL_ENCRYPT_TYPE_OPEN; 26 27 return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE, 28 __le32_to_cpu(desc->mpdu_start.info2)); 29 } 30 31 static u8 ath11k_dp_rx_h_mpdu_start_decap_type(struct hal_rx_desc *desc) 32 { 33 return FIELD_GET(RX_MPDU_START_INFO5_DECAP_TYPE, 34 __le32_to_cpu(desc->mpdu_start.info5)); 35 } 36 37 static bool ath11k_dp_rx_h_attn_msdu_done(struct hal_rx_desc *desc) 38 { 39 return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE, 40 __le32_to_cpu(desc->attention.info2)); 41 } 42 43 static bool ath11k_dp_rx_h_attn_first_mpdu(struct hal_rx_desc *desc) 44 { 45 return !!FIELD_GET(RX_ATTENTION_INFO1_FIRST_MPDU, 46 __le32_to_cpu(desc->attention.info1)); 47 } 48 49 static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct hal_rx_desc *desc) 50 { 51 return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL, 52 __le32_to_cpu(desc->attention.info1)); 53 } 54 55 static bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct hal_rx_desc *desc) 56 { 57 return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL, 58 __le32_to_cpu(desc->attention.info1)); 59 } 60 61 static bool ath11k_dp_rx_h_attn_is_decrypted(struct hal_rx_desc *desc) 62 { 63 return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE, 64 __le32_to_cpu(desc->attention.info2)) == 65 RX_DESC_DECRYPT_STATUS_CODE_OK); 66 } 67 68 static u32 ath11k_dp_rx_h_attn_mpdu_err(struct hal_rx_desc *desc) 69 { 70 u32 info = __le32_to_cpu(desc->attention.info1); 71 u32 errmap = 0; 72 73 if (info & RX_ATTENTION_INFO1_FCS_ERR) 74 errmap |= DP_RX_MPDU_ERR_FCS; 75 76 if (info & RX_ATTENTION_INFO1_DECRYPT_ERR) 77 errmap |= DP_RX_MPDU_ERR_DECRYPT; 78 79 if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR) 80 errmap |= DP_RX_MPDU_ERR_TKIP_MIC; 81 82 if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR) 83 errmap |= DP_RX_MPDU_ERR_AMSDU_ERR; 84 85 if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR) 86 errmap |= DP_RX_MPDU_ERR_OVERFLOW; 87 88 if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR) 89 errmap |= DP_RX_MPDU_ERR_MSDU_LEN; 90 91 if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR) 92 errmap |= DP_RX_MPDU_ERR_MPDU_LEN; 93 94 return errmap; 95 } 96 97 static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct hal_rx_desc *desc) 98 { 99 return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH, 100 __le32_to_cpu(desc->msdu_start.info1)); 101 } 102 103 static u8 ath11k_dp_rx_h_msdu_start_sgi(struct hal_rx_desc *desc) 104 { 105 return FIELD_GET(RX_MSDU_START_INFO3_SGI, 106 __le32_to_cpu(desc->msdu_start.info3)); 107 } 108 109 static u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct hal_rx_desc *desc) 110 { 111 return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS, 112 __le32_to_cpu(desc->msdu_start.info3)); 113 } 114 115 static u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct hal_rx_desc *desc) 116 { 117 return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW, 118 __le32_to_cpu(desc->msdu_start.info3)); 119 } 120 121 static u32 ath11k_dp_rx_h_msdu_start_freq(struct hal_rx_desc *desc) 122 { 123 return __le32_to_cpu(desc->msdu_start.phy_meta_data); 124 } 125 126 static u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct hal_rx_desc *desc) 127 { 128 return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE, 129 __le32_to_cpu(desc->msdu_start.info3)); 130 } 131 132 static u8 ath11k_dp_rx_h_msdu_start_nss(struct hal_rx_desc *desc) 133 { 134 u8 mimo_ss_bitmap = FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP, 135 __le32_to_cpu(desc->msdu_start.info3)); 136 137 return hweight8(mimo_ss_bitmap); 138 } 139 140 static u8 ath11k_dp_rx_h_msdu_end_l3pad(struct hal_rx_desc *desc) 141 { 142 return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING, 143 __le32_to_cpu(desc->msdu_end.info2)); 144 } 145 146 static bool ath11k_dp_rx_h_msdu_end_first_msdu(struct hal_rx_desc *desc) 147 { 148 return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU, 149 __le32_to_cpu(desc->msdu_end.info2)); 150 } 151 152 static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct hal_rx_desc *desc) 153 { 154 return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU, 155 __le32_to_cpu(desc->msdu_end.info2)); 156 } 157 158 static void ath11k_dp_rx_desc_end_tlv_copy(struct hal_rx_desc *fdesc, 159 struct hal_rx_desc *ldesc) 160 { 161 memcpy((u8 *)&fdesc->msdu_end, (u8 *)&ldesc->msdu_end, 162 sizeof(struct rx_msdu_end)); 163 memcpy((u8 *)&fdesc->attention, (u8 *)&ldesc->attention, 164 sizeof(struct rx_attention)); 165 memcpy((u8 *)&fdesc->mpdu_end, (u8 *)&ldesc->mpdu_end, 166 sizeof(struct rx_mpdu_end)); 167 } 168 169 static u32 ath11k_dp_rxdesc_get_mpdulen_err(struct hal_rx_desc *rx_desc) 170 { 171 struct rx_attention *rx_attn; 172 173 rx_attn = &rx_desc->attention; 174 175 return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR, 176 __le32_to_cpu(rx_attn->info1)); 177 } 178 179 static u32 ath11k_dp_rxdesc_get_decap_format(struct hal_rx_desc *rx_desc) 180 { 181 struct rx_msdu_start *rx_msdu_start; 182 183 rx_msdu_start = &rx_desc->msdu_start; 184 185 return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT, 186 __le32_to_cpu(rx_msdu_start->info2)); 187 } 188 189 static u8 *ath11k_dp_rxdesc_get_80211hdr(struct hal_rx_desc *rx_desc) 190 { 191 u8 *rx_pkt_hdr; 192 193 rx_pkt_hdr = &rx_desc->msdu_payload[0]; 194 195 return rx_pkt_hdr; 196 } 197 198 static bool ath11k_dp_rxdesc_mpdu_valid(struct hal_rx_desc *rx_desc) 199 { 200 u32 tlv_tag; 201 202 tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG, 203 __le32_to_cpu(rx_desc->mpdu_start_tag)); 204 205 return tlv_tag == HAL_RX_MPDU_START ? true : false; 206 } 207 208 static u32 ath11k_dp_rxdesc_get_ppduid(struct hal_rx_desc *rx_desc) 209 { 210 return __le16_to_cpu(rx_desc->mpdu_start.phy_ppdu_id); 211 } 212 213 /* Returns number of Rx buffers replenished */ 214 int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id, 215 struct dp_rxdma_ring *rx_ring, 216 int req_entries, 217 enum hal_rx_buf_return_buf_manager mgr, 218 gfp_t gfp) 219 { 220 struct hal_srng *srng; 221 u32 *desc; 222 struct sk_buff *skb; 223 int num_free; 224 int num_remain; 225 int buf_id; 226 u32 cookie; 227 dma_addr_t paddr; 228 229 req_entries = min(req_entries, rx_ring->bufs_max); 230 231 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 232 233 spin_lock_bh(&srng->lock); 234 235 ath11k_hal_srng_access_begin(ab, srng); 236 237 num_free = ath11k_hal_srng_src_num_free(ab, srng, true); 238 if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4)) 239 req_entries = num_free; 240 241 req_entries = min(num_free, req_entries); 242 num_remain = req_entries; 243 244 while (num_remain > 0) { 245 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + 246 DP_RX_BUFFER_ALIGN_SIZE); 247 if (!skb) 248 break; 249 250 if (!IS_ALIGNED((unsigned long)skb->data, 251 DP_RX_BUFFER_ALIGN_SIZE)) { 252 skb_pull(skb, 253 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - 254 skb->data); 255 } 256 257 paddr = dma_map_single(ab->dev, skb->data, 258 skb->len + skb_tailroom(skb), 259 DMA_FROM_DEVICE); 260 if (dma_mapping_error(ab->dev, paddr)) 261 goto fail_free_skb; 262 263 spin_lock_bh(&rx_ring->idr_lock); 264 buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, 265 rx_ring->bufs_max * 3, gfp); 266 spin_unlock_bh(&rx_ring->idr_lock); 267 if (buf_id < 0) 268 goto fail_dma_unmap; 269 270 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 271 if (!desc) 272 goto fail_idr_remove; 273 274 ATH11K_SKB_RXCB(skb)->paddr = paddr; 275 276 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 277 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 278 279 num_remain--; 280 281 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); 282 } 283 284 ath11k_hal_srng_access_end(ab, srng); 285 286 spin_unlock_bh(&srng->lock); 287 288 return req_entries - num_remain; 289 290 fail_idr_remove: 291 spin_lock_bh(&rx_ring->idr_lock); 292 idr_remove(&rx_ring->bufs_idr, buf_id); 293 spin_unlock_bh(&rx_ring->idr_lock); 294 fail_dma_unmap: 295 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 296 DMA_FROM_DEVICE); 297 fail_free_skb: 298 dev_kfree_skb_any(skb); 299 300 ath11k_hal_srng_access_end(ab, srng); 301 302 spin_unlock_bh(&srng->lock); 303 304 return req_entries - num_remain; 305 } 306 307 static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar, 308 struct dp_rxdma_ring *rx_ring) 309 { 310 struct ath11k_pdev_dp *dp = &ar->dp; 311 struct sk_buff *skb; 312 int buf_id; 313 314 spin_lock_bh(&rx_ring->idr_lock); 315 idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { 316 idr_remove(&rx_ring->bufs_idr, buf_id); 317 /* TODO: Understand where internal driver does this dma_unmap of 318 * of rxdma_buffer. 319 */ 320 dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr, 321 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); 322 dev_kfree_skb_any(skb); 323 } 324 325 idr_destroy(&rx_ring->bufs_idr); 326 spin_unlock_bh(&rx_ring->idr_lock); 327 328 rx_ring = &dp->rx_mon_status_refill_ring; 329 330 spin_lock_bh(&rx_ring->idr_lock); 331 idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { 332 idr_remove(&rx_ring->bufs_idr, buf_id); 333 /* XXX: Understand where internal driver does this dma_unmap of 334 * of rxdma_buffer. 335 */ 336 dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr, 337 skb->len + skb_tailroom(skb), DMA_BIDIRECTIONAL); 338 dev_kfree_skb_any(skb); 339 } 340 341 idr_destroy(&rx_ring->bufs_idr); 342 spin_unlock_bh(&rx_ring->idr_lock); 343 return 0; 344 } 345 346 static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar) 347 { 348 struct ath11k_pdev_dp *dp = &ar->dp; 349 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 350 351 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 352 353 rx_ring = &dp->rxdma_mon_buf_ring; 354 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 355 356 rx_ring = &dp->rx_mon_status_refill_ring; 357 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 358 return 0; 359 } 360 361 static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar, 362 struct dp_rxdma_ring *rx_ring, 363 u32 ringtype) 364 { 365 struct ath11k_pdev_dp *dp = &ar->dp; 366 int num_entries; 367 368 num_entries = rx_ring->refill_buf_ring.size / 369 ath11k_hal_srng_get_entrysize(ringtype); 370 371 rx_ring->bufs_max = num_entries; 372 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries, 373 HAL_RX_BUF_RBM_SW3_BM, GFP_KERNEL); 374 return 0; 375 } 376 377 static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar) 378 { 379 struct ath11k_pdev_dp *dp = &ar->dp; 380 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 381 382 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF); 383 384 rx_ring = &dp->rxdma_mon_buf_ring; 385 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF); 386 387 rx_ring = &dp->rx_mon_status_refill_ring; 388 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS); 389 390 return 0; 391 } 392 393 static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar) 394 { 395 struct ath11k_pdev_dp *dp = &ar->dp; 396 397 ath11k_dp_srng_cleanup(ar->ab, &dp->rx_refill_buf_ring.refill_buf_ring); 398 ath11k_dp_srng_cleanup(ar->ab, &dp->rxdma_err_dst_ring); 399 ath11k_dp_srng_cleanup(ar->ab, &dp->rx_mon_status_refill_ring.refill_buf_ring); 400 ath11k_dp_srng_cleanup(ar->ab, &dp->rxdma_mon_buf_ring.refill_buf_ring); 401 } 402 403 void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab) 404 { 405 struct ath11k_pdev_dp *dp; 406 struct ath11k *ar; 407 int i; 408 409 for (i = 0; i < ab->num_radios; i++) { 410 ar = ab->pdevs[i].ar; 411 dp = &ar->dp; 412 ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring); 413 } 414 } 415 416 int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab) 417 { 418 struct ath11k *ar; 419 struct ath11k_pdev_dp *dp; 420 int ret; 421 int i; 422 423 for (i = 0; i < ab->num_radios; i++) { 424 ar = ab->pdevs[i].ar; 425 dp = &ar->dp; 426 ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring, HAL_REO_DST, 427 dp->mac_id, dp->mac_id, 428 DP_REO_DST_RING_SIZE); 429 if (ret) { 430 ath11k_warn(ar->ab, "failed to setup reo_dst_ring\n"); 431 goto err_reo_cleanup; 432 } 433 } 434 435 return 0; 436 437 err_reo_cleanup: 438 ath11k_dp_pdev_reo_cleanup(ab); 439 440 return ret; 441 } 442 443 static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar) 444 { 445 struct ath11k_pdev_dp *dp = &ar->dp; 446 struct dp_srng *srng = NULL; 447 int ret; 448 449 ret = ath11k_dp_srng_setup(ar->ab, 450 &dp->rx_refill_buf_ring.refill_buf_ring, 451 HAL_RXDMA_BUF, 0, 452 dp->mac_id, DP_RXDMA_BUF_RING_SIZE); 453 if (ret) { 454 ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n"); 455 return ret; 456 } 457 458 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring, 459 HAL_RXDMA_DST, 0, dp->mac_id, 460 DP_RXDMA_ERR_DST_RING_SIZE); 461 if (ret) { 462 ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring\n"); 463 return ret; 464 } 465 466 srng = &dp->rx_mon_status_refill_ring.refill_buf_ring; 467 ret = ath11k_dp_srng_setup(ar->ab, 468 srng, 469 HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id, 470 DP_RXDMA_MON_STATUS_RING_SIZE); 471 if (ret) { 472 ath11k_warn(ar->ab, 473 "failed to setup rx_mon_status_refill_ring\n"); 474 return ret; 475 } 476 ret = ath11k_dp_srng_setup(ar->ab, 477 &dp->rxdma_mon_buf_ring.refill_buf_ring, 478 HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id, 479 DP_RXDMA_MONITOR_BUF_RING_SIZE); 480 if (ret) { 481 ath11k_warn(ar->ab, 482 "failed to setup HAL_RXDMA_MONITOR_BUF\n"); 483 return ret; 484 } 485 486 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring, 487 HAL_RXDMA_MONITOR_DST, 0, dp->mac_id, 488 DP_RXDMA_MONITOR_DST_RING_SIZE); 489 if (ret) { 490 ath11k_warn(ar->ab, 491 "failed to setup HAL_RXDMA_MONITOR_DST\n"); 492 return ret; 493 } 494 495 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring, 496 HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id, 497 DP_RXDMA_MONITOR_DESC_RING_SIZE); 498 if (ret) { 499 ath11k_warn(ar->ab, 500 "failed to setup HAL_RXDMA_MONITOR_DESC\n"); 501 return ret; 502 } 503 504 return 0; 505 } 506 507 void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab) 508 { 509 struct ath11k_dp *dp = &ab->dp; 510 struct dp_reo_cmd *cmd, *tmp; 511 struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache; 512 513 spin_lock_bh(&dp->reo_cmd_lock); 514 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 515 list_del(&cmd->list); 516 dma_unmap_single(ab->dev, cmd->data.paddr, 517 cmd->data.size, DMA_BIDIRECTIONAL); 518 kfree(cmd->data.vaddr); 519 kfree(cmd); 520 } 521 522 list_for_each_entry_safe(cmd_cache, tmp_cache, 523 &dp->reo_cmd_cache_flush_list, list) { 524 list_del(&cmd_cache->list); 525 dma_unmap_single(ab->dev, cmd_cache->data.paddr, 526 cmd_cache->data.size, DMA_BIDIRECTIONAL); 527 kfree(cmd_cache->data.vaddr); 528 kfree(cmd_cache); 529 } 530 spin_unlock_bh(&dp->reo_cmd_lock); 531 } 532 533 static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx, 534 enum hal_reo_cmd_status status) 535 { 536 struct dp_rx_tid *rx_tid = ctx; 537 538 if (status != HAL_REO_CMD_SUCCESS) 539 ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n", 540 rx_tid->tid, status); 541 542 dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size, 543 DMA_BIDIRECTIONAL); 544 kfree(rx_tid->vaddr); 545 } 546 547 static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab, 548 struct dp_rx_tid *rx_tid) 549 { 550 struct ath11k_hal_reo_cmd cmd = {0}; 551 unsigned long tot_desc_sz, desc_sz; 552 int ret; 553 554 tot_desc_sz = rx_tid->size; 555 desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID); 556 557 while (tot_desc_sz > desc_sz) { 558 tot_desc_sz -= desc_sz; 559 cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz); 560 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 561 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 562 HAL_REO_CMD_FLUSH_CACHE, &cmd, 563 NULL); 564 if (ret) 565 ath11k_warn(ab, 566 "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n", 567 rx_tid->tid, ret); 568 } 569 570 memset(&cmd, 0, sizeof(cmd)); 571 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 572 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 573 cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS; 574 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 575 HAL_REO_CMD_FLUSH_CACHE, 576 &cmd, ath11k_dp_reo_cmd_free); 577 if (ret) { 578 ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n", 579 rx_tid->tid, ret); 580 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 581 DMA_BIDIRECTIONAL); 582 kfree(rx_tid->vaddr); 583 } 584 } 585 586 static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx, 587 enum hal_reo_cmd_status status) 588 { 589 struct ath11k_base *ab = dp->ab; 590 struct dp_rx_tid *rx_tid = ctx; 591 struct dp_reo_cache_flush_elem *elem, *tmp; 592 593 if (status == HAL_REO_CMD_DRAIN) { 594 goto free_desc; 595 } else if (status != HAL_REO_CMD_SUCCESS) { 596 /* Shouldn't happen! Cleanup in case of other failure? */ 597 ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n", 598 rx_tid->tid, status); 599 return; 600 } 601 602 elem = kzalloc(sizeof(*elem), GFP_ATOMIC); 603 if (!elem) 604 goto free_desc; 605 606 elem->ts = jiffies; 607 memcpy(&elem->data, rx_tid, sizeof(*rx_tid)); 608 609 spin_lock_bh(&dp->reo_cmd_lock); 610 list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list); 611 spin_unlock_bh(&dp->reo_cmd_lock); 612 613 /* Flush and invalidate aged REO desc from HW cache */ 614 spin_lock_bh(&dp->reo_cmd_lock); 615 list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list, 616 list) { 617 if (time_after(jiffies, elem->ts + 618 msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) { 619 list_del(&elem->list); 620 spin_unlock_bh(&dp->reo_cmd_lock); 621 622 ath11k_dp_reo_cache_flush(ab, &elem->data); 623 kfree(elem); 624 spin_lock_bh(&dp->reo_cmd_lock); 625 } 626 } 627 spin_unlock_bh(&dp->reo_cmd_lock); 628 629 return; 630 free_desc: 631 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 632 DMA_BIDIRECTIONAL); 633 kfree(rx_tid->vaddr); 634 } 635 636 void ath11k_peer_rx_tid_delete(struct ath11k *ar, 637 struct ath11k_peer *peer, u8 tid) 638 { 639 struct ath11k_hal_reo_cmd cmd = {0}; 640 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 641 int ret; 642 643 if (!rx_tid->active) 644 return; 645 646 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 647 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 648 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 649 cmd.upd0 |= HAL_REO_CMD_UPD0_VLD; 650 ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid, 651 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 652 ath11k_dp_rx_tid_del_func); 653 if (ret) { 654 ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n", 655 tid, ret); 656 dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size, 657 DMA_BIDIRECTIONAL); 658 kfree(rx_tid->vaddr); 659 } 660 661 rx_tid->active = false; 662 } 663 664 void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer) 665 { 666 int i; 667 668 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) 669 ath11k_peer_rx_tid_delete(ar, peer, i); 670 } 671 672 static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar, 673 struct ath11k_peer *peer, 674 struct dp_rx_tid *rx_tid, 675 u32 ba_win_sz, u16 ssn, 676 bool update_ssn) 677 { 678 struct ath11k_hal_reo_cmd cmd = {0}; 679 int ret; 680 681 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 682 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 683 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 684 cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE; 685 cmd.ba_window_size = ba_win_sz; 686 687 if (update_ssn) { 688 cmd.upd0 |= HAL_REO_CMD_UPD0_SSN; 689 cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn); 690 } 691 692 ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid, 693 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 694 NULL); 695 if (ret) { 696 ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n", 697 rx_tid->tid, ret); 698 return ret; 699 } 700 701 rx_tid->ba_win_sz = ba_win_sz; 702 703 return 0; 704 } 705 706 static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab, 707 const u8 *peer_mac, int vdev_id, u8 tid) 708 { 709 struct ath11k_peer *peer; 710 struct dp_rx_tid *rx_tid; 711 712 spin_lock_bh(&ab->base_lock); 713 714 peer = ath11k_peer_find(ab, vdev_id, peer_mac); 715 if (!peer) { 716 ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n"); 717 goto unlock_exit; 718 } 719 720 rx_tid = &peer->rx_tid[tid]; 721 if (!rx_tid->active) 722 goto unlock_exit; 723 724 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 725 DMA_BIDIRECTIONAL); 726 kfree(rx_tid->vaddr); 727 728 rx_tid->active = false; 729 730 unlock_exit: 731 spin_unlock_bh(&ab->base_lock); 732 } 733 734 int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, 735 u8 tid, u32 ba_win_sz, u16 ssn) 736 { 737 struct ath11k_base *ab = ar->ab; 738 struct ath11k_peer *peer; 739 struct dp_rx_tid *rx_tid; 740 u32 hw_desc_sz; 741 u32 *addr_aligned; 742 void *vaddr; 743 dma_addr_t paddr; 744 int ret; 745 746 spin_lock_bh(&ab->base_lock); 747 748 peer = ath11k_peer_find(ab, vdev_id, peer_mac); 749 if (!peer) { 750 ath11k_warn(ab, "failed to find the peer to set up rx tid\n"); 751 spin_unlock_bh(&ab->base_lock); 752 return -ENOENT; 753 } 754 755 rx_tid = &peer->rx_tid[tid]; 756 /* Update the tid queue if it is already setup */ 757 if (rx_tid->active) { 758 paddr = rx_tid->paddr; 759 ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid, 760 ba_win_sz, ssn, true); 761 spin_unlock_bh(&ab->base_lock); 762 if (ret) { 763 ath11k_warn(ab, "failed to update reo for rx tid %d\n", tid); 764 return ret; 765 } 766 767 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, 768 peer_mac, paddr, 769 tid, 1, ba_win_sz); 770 if (ret) 771 ath11k_warn(ab, "failed to send wmi command to update rx reorder queue, tid :%d (%d)\n", 772 tid, ret); 773 return ret; 774 } 775 776 rx_tid->tid = tid; 777 778 rx_tid->ba_win_sz = ba_win_sz; 779 780 /* TODO: Optimize the memory allocation for qos tid based on the 781 * the actual BA window size in REO tid update path. 782 */ 783 if (tid == HAL_DESC_REO_NON_QOS_TID) 784 hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid); 785 else 786 hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid); 787 788 vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_KERNEL); 789 if (!vaddr) { 790 spin_unlock_bh(&ab->base_lock); 791 return -ENOMEM; 792 } 793 794 addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN); 795 796 ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz, ssn); 797 798 paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz, 799 DMA_BIDIRECTIONAL); 800 801 ret = dma_mapping_error(ab->dev, paddr); 802 if (ret) { 803 spin_unlock_bh(&ab->base_lock); 804 goto err_mem_free; 805 } 806 807 rx_tid->vaddr = vaddr; 808 rx_tid->paddr = paddr; 809 rx_tid->size = hw_desc_sz; 810 rx_tid->active = true; 811 812 spin_unlock_bh(&ab->base_lock); 813 814 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, 815 paddr, tid, 1, ba_win_sz); 816 if (ret) { 817 ath11k_warn(ar->ab, "failed to setup rx reorder queue, tid :%d (%d)\n", 818 tid, ret); 819 ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid); 820 } 821 822 return ret; 823 824 err_mem_free: 825 kfree(vaddr); 826 827 return ret; 828 } 829 830 int ath11k_dp_rx_ampdu_start(struct ath11k *ar, 831 struct ieee80211_ampdu_params *params) 832 { 833 struct ath11k_base *ab = ar->ab; 834 struct ath11k_sta *arsta = (void *)params->sta->drv_priv; 835 int vdev_id = arsta->arvif->vdev_id; 836 int ret; 837 838 ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id, 839 params->tid, params->buf_size, 840 params->ssn); 841 if (ret) 842 ath11k_warn(ab, "failed to setup rx tid %d\n", ret); 843 844 return ret; 845 } 846 847 int ath11k_dp_rx_ampdu_stop(struct ath11k *ar, 848 struct ieee80211_ampdu_params *params) 849 { 850 struct ath11k_base *ab = ar->ab; 851 struct ath11k_peer *peer; 852 struct ath11k_sta *arsta = (void *)params->sta->drv_priv; 853 int vdev_id = arsta->arvif->vdev_id; 854 dma_addr_t paddr; 855 bool active; 856 int ret; 857 858 spin_lock_bh(&ab->base_lock); 859 860 peer = ath11k_peer_find(ab, vdev_id, params->sta->addr); 861 if (!peer) { 862 ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n"); 863 spin_unlock_bh(&ab->base_lock); 864 return -ENOENT; 865 } 866 867 paddr = peer->rx_tid[params->tid].paddr; 868 active = peer->rx_tid[params->tid].active; 869 870 if (!active) { 871 spin_unlock_bh(&ab->base_lock); 872 return 0; 873 } 874 875 ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false); 876 spin_unlock_bh(&ab->base_lock); 877 if (ret) { 878 ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n", 879 params->tid, ret); 880 return ret; 881 } 882 883 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, 884 params->sta->addr, paddr, 885 params->tid, 1, 1); 886 if (ret) 887 ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n", 888 ret); 889 890 return ret; 891 } 892 893 static int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats, 894 u16 peer_id) 895 { 896 int i; 897 898 for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) { 899 if (ppdu_stats->user_stats[i].is_valid_peer_id) { 900 if (peer_id == ppdu_stats->user_stats[i].peer_id) 901 return i; 902 } else { 903 return i; 904 } 905 } 906 907 return -EINVAL; 908 } 909 910 static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab, 911 u16 tag, u16 len, const void *ptr, 912 void *data) 913 { 914 struct htt_ppdu_stats_info *ppdu_info; 915 struct htt_ppdu_user_stats *user_stats; 916 int cur_user; 917 u16 peer_id; 918 919 ppdu_info = (struct htt_ppdu_stats_info *)data; 920 921 switch (tag) { 922 case HTT_PPDU_STATS_TAG_COMMON: 923 if (len < sizeof(struct htt_ppdu_stats_common)) { 924 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 925 len, tag); 926 return -EINVAL; 927 } 928 memcpy((void *)&ppdu_info->ppdu_stats.common, ptr, 929 sizeof(struct htt_ppdu_stats_common)); 930 break; 931 case HTT_PPDU_STATS_TAG_USR_RATE: 932 if (len < sizeof(struct htt_ppdu_stats_user_rate)) { 933 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 934 len, tag); 935 return -EINVAL; 936 } 937 938 peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id; 939 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 940 peer_id); 941 if (cur_user < 0) 942 return -EINVAL; 943 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 944 user_stats->peer_id = peer_id; 945 user_stats->is_valid_peer_id = true; 946 memcpy((void *)&user_stats->rate, ptr, 947 sizeof(struct htt_ppdu_stats_user_rate)); 948 user_stats->tlv_flags |= BIT(tag); 949 break; 950 case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON: 951 if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) { 952 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 953 len, tag); 954 return -EINVAL; 955 } 956 957 peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id; 958 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 959 peer_id); 960 if (cur_user < 0) 961 return -EINVAL; 962 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 963 user_stats->peer_id = peer_id; 964 user_stats->is_valid_peer_id = true; 965 memcpy((void *)&user_stats->cmpltn_cmn, ptr, 966 sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)); 967 user_stats->tlv_flags |= BIT(tag); 968 break; 969 case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS: 970 if (len < 971 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) { 972 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 973 len, tag); 974 return -EINVAL; 975 } 976 977 peer_id = 978 ((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id; 979 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 980 peer_id); 981 if (cur_user < 0) 982 return -EINVAL; 983 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 984 user_stats->peer_id = peer_id; 985 user_stats->is_valid_peer_id = true; 986 memcpy((void *)&user_stats->ack_ba, ptr, 987 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)); 988 user_stats->tlv_flags |= BIT(tag); 989 break; 990 } 991 return 0; 992 } 993 994 int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len, 995 int (*iter)(struct ath11k_base *ar, u16 tag, u16 len, 996 const void *ptr, void *data), 997 void *data) 998 { 999 const struct htt_tlv *tlv; 1000 const void *begin = ptr; 1001 u16 tlv_tag, tlv_len; 1002 int ret = -EINVAL; 1003 1004 while (len > 0) { 1005 if (len < sizeof(*tlv)) { 1006 ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", 1007 ptr - begin, len, sizeof(*tlv)); 1008 return -EINVAL; 1009 } 1010 tlv = (struct htt_tlv *)ptr; 1011 tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header); 1012 tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header); 1013 ptr += sizeof(*tlv); 1014 len -= sizeof(*tlv); 1015 1016 if (tlv_len > len) { 1017 ath11k_err(ab, "htt tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n", 1018 tlv_tag, ptr - begin, len, tlv_len); 1019 return -EINVAL; 1020 } 1021 ret = iter(ab, tlv_tag, tlv_len, ptr, data); 1022 if (ret == -ENOMEM) 1023 return ret; 1024 1025 ptr += tlv_len; 1026 len -= tlv_len; 1027 } 1028 return 0; 1029 } 1030 1031 static inline u32 ath11k_he_gi_to_nl80211_he_gi(u8 sgi) 1032 { 1033 u32 ret = 0; 1034 1035 switch (sgi) { 1036 case RX_MSDU_START_SGI_0_8_US: 1037 ret = NL80211_RATE_INFO_HE_GI_0_8; 1038 break; 1039 case RX_MSDU_START_SGI_1_6_US: 1040 ret = NL80211_RATE_INFO_HE_GI_1_6; 1041 break; 1042 case RX_MSDU_START_SGI_3_2_US: 1043 ret = NL80211_RATE_INFO_HE_GI_3_2; 1044 break; 1045 } 1046 1047 return ret; 1048 } 1049 1050 static void 1051 ath11k_update_per_peer_tx_stats(struct ath11k *ar, 1052 struct htt_ppdu_stats *ppdu_stats, u8 user) 1053 { 1054 struct ath11k_base *ab = ar->ab; 1055 struct ath11k_peer *peer; 1056 struct ieee80211_sta *sta; 1057 struct ath11k_sta *arsta; 1058 struct htt_ppdu_stats_user_rate *user_rate; 1059 struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats; 1060 struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user]; 1061 struct htt_ppdu_stats_common *common = &ppdu_stats->common; 1062 int ret; 1063 u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0; 1064 u32 succ_bytes = 0; 1065 u16 rate = 0, succ_pkts = 0; 1066 u32 tx_duration = 0; 1067 u8 tid = HTT_PPDU_STATS_NON_QOS_TID; 1068 bool is_ampdu = false; 1069 1070 if (!usr_stats) 1071 return; 1072 1073 if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE))) 1074 return; 1075 1076 if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) 1077 is_ampdu = 1078 HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags); 1079 1080 if (usr_stats->tlv_flags & 1081 BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) { 1082 succ_bytes = usr_stats->ack_ba.success_bytes; 1083 succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M, 1084 usr_stats->ack_ba.info); 1085 tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM, 1086 usr_stats->ack_ba.info); 1087 } 1088 1089 if (common->fes_duration_us) 1090 tx_duration = common->fes_duration_us; 1091 1092 user_rate = &usr_stats->rate; 1093 flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags); 1094 bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2; 1095 nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1; 1096 mcs = HTT_USR_RATE_MCS(user_rate->rate_flags); 1097 sgi = HTT_USR_RATE_GI(user_rate->rate_flags); 1098 dcm = HTT_USR_RATE_DCM(user_rate->rate_flags); 1099 1100 /* Note: If host configured fixed rates and in some other special 1101 * cases, the broadcast/management frames are sent in different rates. 1102 * Firmware rate's control to be skipped for this? 1103 */ 1104 1105 if (flags == WMI_RATE_PREAMBLE_HE && mcs > 11) { 1106 ath11k_warn(ab, "Invalid HE mcs %hhd peer stats", mcs); 1107 return; 1108 } 1109 1110 if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) { 1111 ath11k_warn(ab, "Invalid HE mcs %hhd peer stats", mcs); 1112 return; 1113 } 1114 1115 if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) { 1116 ath11k_warn(ab, "Invalid VHT mcs %hhd peer stats", mcs); 1117 return; 1118 } 1119 1120 if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) { 1121 ath11k_warn(ab, "Invalid HT mcs %hhd nss %hhd peer stats", 1122 mcs, nss); 1123 return; 1124 } 1125 1126 if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) { 1127 ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs, 1128 flags, 1129 &rate_idx, 1130 &rate); 1131 if (ret < 0) 1132 return; 1133 } 1134 1135 rcu_read_lock(); 1136 spin_lock_bh(&ab->base_lock); 1137 peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id); 1138 1139 if (!peer || !peer->sta) { 1140 spin_unlock_bh(&ab->base_lock); 1141 rcu_read_unlock(); 1142 return; 1143 } 1144 1145 sta = peer->sta; 1146 arsta = (struct ath11k_sta *)sta->drv_priv; 1147 1148 memset(&arsta->txrate, 0, sizeof(arsta->txrate)); 1149 1150 switch (flags) { 1151 case WMI_RATE_PREAMBLE_OFDM: 1152 arsta->txrate.legacy = rate; 1153 break; 1154 case WMI_RATE_PREAMBLE_CCK: 1155 arsta->txrate.legacy = rate; 1156 break; 1157 case WMI_RATE_PREAMBLE_HT: 1158 arsta->txrate.mcs = mcs + 8 * (nss - 1); 1159 arsta->txrate.flags = RATE_INFO_FLAGS_MCS; 1160 if (sgi) 1161 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1162 break; 1163 case WMI_RATE_PREAMBLE_VHT: 1164 arsta->txrate.mcs = mcs; 1165 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; 1166 if (sgi) 1167 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1168 break; 1169 case WMI_RATE_PREAMBLE_HE: 1170 arsta->txrate.mcs = mcs; 1171 arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS; 1172 arsta->txrate.he_dcm = dcm; 1173 arsta->txrate.he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi); 1174 arsta->txrate.he_ru_alloc = ath11k_he_ru_tones_to_nl80211_he_ru_alloc( 1175 (user_rate->ru_end - 1176 user_rate->ru_start) + 1); 1177 break; 1178 } 1179 1180 arsta->txrate.nss = nss; 1181 arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw); 1182 arsta->tx_duration += tx_duration; 1183 memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info)); 1184 1185 /* PPDU stats reported for mgmt packet doesn't have valid tx bytes. 1186 * So skip peer stats update for mgmt packets. 1187 */ 1188 if (tid < HTT_PPDU_STATS_NON_QOS_TID) { 1189 memset(peer_stats, 0, sizeof(*peer_stats)); 1190 peer_stats->succ_pkts = succ_pkts; 1191 peer_stats->succ_bytes = succ_bytes; 1192 peer_stats->is_ampdu = is_ampdu; 1193 peer_stats->duration = tx_duration; 1194 peer_stats->ba_fails = 1195 HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) + 1196 HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags); 1197 1198 if (ath11k_debug_is_extd_tx_stats_enabled(ar)) 1199 ath11k_accumulate_per_peer_tx_stats(arsta, 1200 peer_stats, rate_idx); 1201 } 1202 1203 spin_unlock_bh(&ab->base_lock); 1204 rcu_read_unlock(); 1205 } 1206 1207 static void ath11k_htt_update_ppdu_stats(struct ath11k *ar, 1208 struct htt_ppdu_stats *ppdu_stats) 1209 { 1210 u8 user; 1211 1212 for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++) 1213 ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user); 1214 } 1215 1216 static 1217 struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar, 1218 u32 ppdu_id) 1219 { 1220 struct htt_ppdu_stats_info *ppdu_info; 1221 1222 spin_lock_bh(&ar->data_lock); 1223 if (!list_empty(&ar->ppdu_stats_info)) { 1224 list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) { 1225 if (ppdu_info->ppdu_id == ppdu_id) { 1226 spin_unlock_bh(&ar->data_lock); 1227 return ppdu_info; 1228 } 1229 } 1230 1231 if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) { 1232 ppdu_info = list_first_entry(&ar->ppdu_stats_info, 1233 typeof(*ppdu_info), list); 1234 list_del(&ppdu_info->list); 1235 ar->ppdu_stat_list_depth--; 1236 ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats); 1237 kfree(ppdu_info); 1238 } 1239 } 1240 spin_unlock_bh(&ar->data_lock); 1241 1242 ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_KERNEL); 1243 if (!ppdu_info) 1244 return NULL; 1245 1246 spin_lock_bh(&ar->data_lock); 1247 list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info); 1248 ar->ppdu_stat_list_depth++; 1249 spin_unlock_bh(&ar->data_lock); 1250 1251 return ppdu_info; 1252 } 1253 1254 static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab, 1255 struct sk_buff *skb) 1256 { 1257 struct ath11k_htt_ppdu_stats_msg *msg; 1258 struct htt_ppdu_stats_info *ppdu_info; 1259 struct ath11k *ar; 1260 int ret; 1261 u8 pdev_id; 1262 u32 ppdu_id, len; 1263 1264 msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data; 1265 len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info); 1266 pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info); 1267 ppdu_id = msg->ppdu_id; 1268 1269 rcu_read_lock(); 1270 ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); 1271 if (!ar) { 1272 ret = -EINVAL; 1273 goto exit; 1274 } 1275 1276 if (ath11k_debug_is_pktlog_lite_mode_enabled(ar)) 1277 trace_ath11k_htt_ppdu_stats(ar, skb->data, len); 1278 1279 ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id); 1280 if (!ppdu_info) { 1281 ret = -EINVAL; 1282 goto exit; 1283 } 1284 1285 ppdu_info->ppdu_id = ppdu_id; 1286 ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len, 1287 ath11k_htt_tlv_ppdu_stats_parse, 1288 (void *)ppdu_info); 1289 if (ret) { 1290 ath11k_warn(ab, "Failed to parse tlv %d\n", ret); 1291 goto exit; 1292 } 1293 1294 exit: 1295 rcu_read_unlock(); 1296 1297 return ret; 1298 } 1299 1300 static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb) 1301 { 1302 struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data; 1303 struct ath_pktlog_hdr *hdr = (struct ath_pktlog_hdr *)data; 1304 struct ath11k *ar; 1305 u8 pdev_id; 1306 1307 pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr); 1308 ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); 1309 if (!ar) { 1310 ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id); 1311 return; 1312 } 1313 1314 trace_ath11k_htt_pktlog(ar, data->payload, hdr->size); 1315 } 1316 1317 void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab, 1318 struct sk_buff *skb) 1319 { 1320 struct ath11k_dp *dp = &ab->dp; 1321 struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data; 1322 enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp); 1323 u16 peer_id; 1324 u8 vdev_id; 1325 u8 mac_addr[ETH_ALEN]; 1326 u16 peer_mac_h16; 1327 u16 ast_hash; 1328 1329 ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type); 1330 1331 switch (type) { 1332 case HTT_T2H_MSG_TYPE_VERSION_CONF: 1333 dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR, 1334 resp->version_msg.version); 1335 dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR, 1336 resp->version_msg.version); 1337 complete(&dp->htt_tgt_version_received); 1338 break; 1339 case HTT_T2H_MSG_TYPE_PEER_MAP: 1340 vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID, 1341 resp->peer_map_ev.info); 1342 peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID, 1343 resp->peer_map_ev.info); 1344 peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16, 1345 resp->peer_map_ev.info1); 1346 ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32, 1347 peer_mac_h16, mac_addr); 1348 ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL, 1349 resp->peer_map_ev.info2); 1350 ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash); 1351 break; 1352 case HTT_T2H_MSG_TYPE_PEER_UNMAP: 1353 peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID, 1354 resp->peer_unmap_ev.info); 1355 ath11k_peer_unmap_event(ab, peer_id); 1356 break; 1357 case HTT_T2H_MSG_TYPE_PPDU_STATS_IND: 1358 ath11k_htt_pull_ppdu_stats(ab, skb); 1359 break; 1360 case HTT_T2H_MSG_TYPE_EXT_STATS_CONF: 1361 ath11k_dbg_htt_ext_stats_handler(ab, skb); 1362 break; 1363 case HTT_T2H_MSG_TYPE_PKTLOG: 1364 ath11k_htt_pktlog(ab, skb); 1365 break; 1366 default: 1367 ath11k_warn(ab, "htt event %d not handled\n", type); 1368 break; 1369 } 1370 1371 dev_kfree_skb_any(skb); 1372 } 1373 1374 static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar, 1375 struct sk_buff_head *msdu_list, 1376 struct sk_buff *first, struct sk_buff *last, 1377 u8 l3pad_bytes, int msdu_len) 1378 { 1379 struct sk_buff *skb; 1380 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first); 1381 int buf_first_hdr_len, buf_first_len; 1382 struct hal_rx_desc *ldesc; 1383 int space_extra; 1384 int rem_len; 1385 int buf_len; 1386 1387 /* As the msdu is spread across multiple rx buffers, 1388 * find the offset to the start of msdu for computing 1389 * the length of the msdu in the first buffer. 1390 */ 1391 buf_first_hdr_len = HAL_RX_DESC_SIZE + l3pad_bytes; 1392 buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len; 1393 1394 if (WARN_ON_ONCE(msdu_len <= buf_first_len)) { 1395 skb_put(first, buf_first_hdr_len + msdu_len); 1396 skb_pull(first, buf_first_hdr_len); 1397 return 0; 1398 } 1399 1400 ldesc = (struct hal_rx_desc *)last->data; 1401 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ldesc); 1402 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ldesc); 1403 1404 /* MSDU spans over multiple buffers because the length of the MSDU 1405 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data 1406 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. 1407 */ 1408 skb_put(first, DP_RX_BUFFER_SIZE); 1409 skb_pull(first, buf_first_hdr_len); 1410 1411 /* When an MSDU spread over multiple buffers attention, MSDU_END and 1412 * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs. 1413 */ 1414 ath11k_dp_rx_desc_end_tlv_copy(rxcb->rx_desc, ldesc); 1415 1416 space_extra = msdu_len - (buf_first_len + skb_tailroom(first)); 1417 if (space_extra > 0 && 1418 (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) { 1419 /* Free up all buffers of the MSDU */ 1420 while ((skb = __skb_dequeue(msdu_list)) != NULL) { 1421 rxcb = ATH11K_SKB_RXCB(skb); 1422 if (!rxcb->is_continuation) { 1423 dev_kfree_skb_any(skb); 1424 break; 1425 } 1426 dev_kfree_skb_any(skb); 1427 } 1428 return -ENOMEM; 1429 } 1430 1431 rem_len = msdu_len - buf_first_len; 1432 while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) { 1433 rxcb = ATH11K_SKB_RXCB(skb); 1434 if (rxcb->is_continuation) 1435 buf_len = DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE; 1436 else 1437 buf_len = rem_len; 1438 1439 if (buf_len > (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE)) { 1440 WARN_ON_ONCE(1); 1441 dev_kfree_skb_any(skb); 1442 return -EINVAL; 1443 } 1444 1445 skb_put(skb, buf_len + HAL_RX_DESC_SIZE); 1446 skb_pull(skb, HAL_RX_DESC_SIZE); 1447 skb_copy_from_linear_data(skb, skb_put(first, buf_len), 1448 buf_len); 1449 dev_kfree_skb_any(skb); 1450 1451 rem_len -= buf_len; 1452 if (!rxcb->is_continuation) 1453 break; 1454 } 1455 1456 return 0; 1457 } 1458 1459 static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list, 1460 struct sk_buff *first) 1461 { 1462 struct sk_buff *skb; 1463 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first); 1464 1465 if (!rxcb->is_continuation) 1466 return first; 1467 1468 skb_queue_walk(msdu_list, skb) { 1469 rxcb = ATH11K_SKB_RXCB(skb); 1470 if (!rxcb->is_continuation) 1471 return skb; 1472 } 1473 1474 return NULL; 1475 } 1476 1477 static int ath11k_dp_rx_retrieve_amsdu(struct ath11k *ar, 1478 struct sk_buff_head *msdu_list, 1479 struct sk_buff_head *amsdu_list) 1480 { 1481 struct sk_buff *msdu = skb_peek(msdu_list); 1482 struct sk_buff *last_buf; 1483 struct ath11k_skb_rxcb *rxcb; 1484 struct ieee80211_hdr *hdr; 1485 struct hal_rx_desc *rx_desc, *lrx_desc; 1486 u16 msdu_len; 1487 u8 l3_pad_bytes; 1488 u8 *hdr_status; 1489 int ret; 1490 1491 if (!msdu) 1492 return -ENOENT; 1493 1494 rx_desc = (struct hal_rx_desc *)msdu->data; 1495 hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc); 1496 hdr = (struct ieee80211_hdr *)hdr_status; 1497 /* Process only data frames */ 1498 if (!ieee80211_is_data(hdr->frame_control)) { 1499 __skb_unlink(msdu, msdu_list); 1500 dev_kfree_skb_any(msdu); 1501 return -EINVAL; 1502 } 1503 1504 do { 1505 __skb_unlink(msdu, msdu_list); 1506 last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu); 1507 if (!last_buf) { 1508 ath11k_warn(ar->ab, 1509 "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n"); 1510 ret = -EIO; 1511 goto free_out; 1512 } 1513 1514 rx_desc = (struct hal_rx_desc *)msdu->data; 1515 lrx_desc = (struct hal_rx_desc *)last_buf->data; 1516 1517 if (!ath11k_dp_rx_h_attn_msdu_done(lrx_desc)) { 1518 ath11k_warn(ar->ab, "msdu_done bit in attention is not set\n"); 1519 ret = -EIO; 1520 goto free_out; 1521 } 1522 1523 rxcb = ATH11K_SKB_RXCB(msdu); 1524 rxcb->rx_desc = rx_desc; 1525 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc); 1526 l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(lrx_desc); 1527 1528 if (!rxcb->is_continuation) { 1529 skb_put(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes + msdu_len); 1530 skb_pull(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes); 1531 } else { 1532 ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list, 1533 msdu, last_buf, 1534 l3_pad_bytes, msdu_len); 1535 if (ret) { 1536 ath11k_warn(ar->ab, 1537 "failed to coalesce msdu rx buffer%d\n", ret); 1538 goto free_out; 1539 } 1540 } 1541 __skb_queue_tail(amsdu_list, msdu); 1542 1543 /* Should we also consider msdu_cnt from mpdu_meta while 1544 * preparing amsdu list? 1545 */ 1546 if (rxcb->is_last_msdu) 1547 break; 1548 } while ((msdu = skb_peek(msdu_list)) != NULL); 1549 1550 return 0; 1551 1552 free_out: 1553 dev_kfree_skb_any(msdu); 1554 __skb_queue_purge(amsdu_list); 1555 1556 return ret; 1557 } 1558 1559 static void ath11k_dp_rx_h_csum_offload(struct sk_buff *msdu) 1560 { 1561 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1562 bool ip_csum_fail, l4_csum_fail; 1563 1564 ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rxcb->rx_desc); 1565 l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rxcb->rx_desc); 1566 1567 msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ? 1568 CHECKSUM_NONE : CHECKSUM_UNNECESSARY; 1569 } 1570 1571 static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, 1572 enum hal_encrypt_type enctype) 1573 { 1574 switch (enctype) { 1575 case HAL_ENCRYPT_TYPE_OPEN: 1576 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1577 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1578 return 0; 1579 case HAL_ENCRYPT_TYPE_CCMP_128: 1580 return IEEE80211_CCMP_MIC_LEN; 1581 case HAL_ENCRYPT_TYPE_CCMP_256: 1582 return IEEE80211_CCMP_256_MIC_LEN; 1583 case HAL_ENCRYPT_TYPE_GCMP_128: 1584 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1585 return IEEE80211_GCMP_MIC_LEN; 1586 case HAL_ENCRYPT_TYPE_WEP_40: 1587 case HAL_ENCRYPT_TYPE_WEP_104: 1588 case HAL_ENCRYPT_TYPE_WEP_128: 1589 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1590 case HAL_ENCRYPT_TYPE_WAPI: 1591 break; 1592 } 1593 1594 ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype); 1595 return 0; 1596 } 1597 1598 static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar, 1599 enum hal_encrypt_type enctype) 1600 { 1601 switch (enctype) { 1602 case HAL_ENCRYPT_TYPE_OPEN: 1603 return 0; 1604 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1605 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1606 return IEEE80211_TKIP_IV_LEN; 1607 case HAL_ENCRYPT_TYPE_CCMP_128: 1608 return IEEE80211_CCMP_HDR_LEN; 1609 case HAL_ENCRYPT_TYPE_CCMP_256: 1610 return IEEE80211_CCMP_256_HDR_LEN; 1611 case HAL_ENCRYPT_TYPE_GCMP_128: 1612 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1613 return IEEE80211_GCMP_HDR_LEN; 1614 case HAL_ENCRYPT_TYPE_WEP_40: 1615 case HAL_ENCRYPT_TYPE_WEP_104: 1616 case HAL_ENCRYPT_TYPE_WEP_128: 1617 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1618 case HAL_ENCRYPT_TYPE_WAPI: 1619 break; 1620 } 1621 1622 ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 1623 return 0; 1624 } 1625 1626 static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar, 1627 enum hal_encrypt_type enctype) 1628 { 1629 switch (enctype) { 1630 case HAL_ENCRYPT_TYPE_OPEN: 1631 case HAL_ENCRYPT_TYPE_CCMP_128: 1632 case HAL_ENCRYPT_TYPE_CCMP_256: 1633 case HAL_ENCRYPT_TYPE_GCMP_128: 1634 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1635 return 0; 1636 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1637 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1638 return IEEE80211_TKIP_ICV_LEN; 1639 case HAL_ENCRYPT_TYPE_WEP_40: 1640 case HAL_ENCRYPT_TYPE_WEP_104: 1641 case HAL_ENCRYPT_TYPE_WEP_128: 1642 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1643 case HAL_ENCRYPT_TYPE_WAPI: 1644 break; 1645 } 1646 1647 ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 1648 return 0; 1649 } 1650 1651 static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar, 1652 struct sk_buff *msdu, 1653 u8 *first_hdr, 1654 enum hal_encrypt_type enctype, 1655 struct ieee80211_rx_status *status) 1656 { 1657 struct ieee80211_hdr *hdr; 1658 size_t hdr_len; 1659 u8 da[ETH_ALEN]; 1660 u8 sa[ETH_ALEN]; 1661 1662 /* pull decapped header and copy SA & DA */ 1663 hdr = (struct ieee80211_hdr *)msdu->data; 1664 ether_addr_copy(da, ieee80211_get_DA(hdr)); 1665 ether_addr_copy(sa, ieee80211_get_SA(hdr)); 1666 skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control)); 1667 1668 /* push original 802.11 header */ 1669 hdr = (struct ieee80211_hdr *)first_hdr; 1670 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1671 1672 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 1673 memcpy(skb_push(msdu, 1674 ath11k_dp_rx_crypto_param_len(ar, enctype)), 1675 (void *)hdr + hdr_len, 1676 ath11k_dp_rx_crypto_param_len(ar, enctype)); 1677 } 1678 1679 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1680 1681 /* original 802.11 header has a different DA and in 1682 * case of 4addr it may also have different SA 1683 */ 1684 hdr = (struct ieee80211_hdr *)msdu->data; 1685 ether_addr_copy(ieee80211_get_DA(hdr), da); 1686 ether_addr_copy(ieee80211_get_SA(hdr), sa); 1687 } 1688 1689 static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu, 1690 enum hal_encrypt_type enctype, 1691 struct ieee80211_rx_status *status, 1692 bool decrypted) 1693 { 1694 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1695 struct ieee80211_hdr *hdr; 1696 size_t hdr_len; 1697 size_t crypto_len; 1698 1699 if (!rxcb->is_first_msdu || 1700 !(rxcb->is_first_msdu && rxcb->is_last_msdu)) { 1701 WARN_ON_ONCE(1); 1702 return; 1703 } 1704 1705 skb_trim(msdu, msdu->len - FCS_LEN); 1706 1707 if (!decrypted) 1708 return; 1709 1710 hdr = (void *)msdu->data; 1711 1712 /* Tail */ 1713 if (status->flag & RX_FLAG_IV_STRIPPED) { 1714 skb_trim(msdu, msdu->len - 1715 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 1716 1717 skb_trim(msdu, msdu->len - 1718 ath11k_dp_rx_crypto_icv_len(ar, enctype)); 1719 } else { 1720 /* MIC */ 1721 if (status->flag & RX_FLAG_MIC_STRIPPED) 1722 skb_trim(msdu, msdu->len - 1723 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 1724 1725 /* ICV */ 1726 if (status->flag & RX_FLAG_ICV_STRIPPED) 1727 skb_trim(msdu, msdu->len - 1728 ath11k_dp_rx_crypto_icv_len(ar, enctype)); 1729 } 1730 1731 /* MMIC */ 1732 if ((status->flag & RX_FLAG_MMIC_STRIPPED) && 1733 !ieee80211_has_morefrags(hdr->frame_control) && 1734 enctype == HAL_ENCRYPT_TYPE_TKIP_MIC) 1735 skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN); 1736 1737 /* Head */ 1738 if (status->flag & RX_FLAG_IV_STRIPPED) { 1739 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1740 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 1741 1742 memmove((void *)msdu->data + crypto_len, 1743 (void *)msdu->data, hdr_len); 1744 skb_pull(msdu, crypto_len); 1745 } 1746 } 1747 1748 static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar, 1749 struct sk_buff *msdu, 1750 enum hal_encrypt_type enctype) 1751 { 1752 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1753 struct ieee80211_hdr *hdr; 1754 size_t hdr_len, crypto_len; 1755 void *rfc1042; 1756 bool is_amsdu; 1757 1758 is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu); 1759 hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(rxcb->rx_desc); 1760 rfc1042 = hdr; 1761 1762 if (rxcb->is_first_msdu) { 1763 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1764 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 1765 1766 rfc1042 += hdr_len + crypto_len; 1767 } 1768 1769 if (is_amsdu) 1770 rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr); 1771 1772 return rfc1042; 1773 } 1774 1775 static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar, 1776 struct sk_buff *msdu, 1777 u8 *first_hdr, 1778 enum hal_encrypt_type enctype, 1779 struct ieee80211_rx_status *status) 1780 { 1781 struct ieee80211_hdr *hdr; 1782 struct ethhdr *eth; 1783 size_t hdr_len; 1784 u8 da[ETH_ALEN]; 1785 u8 sa[ETH_ALEN]; 1786 void *rfc1042; 1787 1788 rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype); 1789 if (WARN_ON_ONCE(!rfc1042)) 1790 return; 1791 1792 /* pull decapped header and copy SA & DA */ 1793 eth = (struct ethhdr *)msdu->data; 1794 ether_addr_copy(da, eth->h_dest); 1795 ether_addr_copy(sa, eth->h_source); 1796 skb_pull(msdu, sizeof(struct ethhdr)); 1797 1798 /* push rfc1042/llc/snap */ 1799 memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042, 1800 sizeof(struct ath11k_dp_rfc1042_hdr)); 1801 1802 /* push original 802.11 header */ 1803 hdr = (struct ieee80211_hdr *)first_hdr; 1804 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1805 1806 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 1807 memcpy(skb_push(msdu, 1808 ath11k_dp_rx_crypto_param_len(ar, enctype)), 1809 (void *)hdr + hdr_len, 1810 ath11k_dp_rx_crypto_param_len(ar, enctype)); 1811 } 1812 1813 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1814 1815 /* original 802.11 header has a different DA and in 1816 * case of 4addr it may also have different SA 1817 */ 1818 hdr = (struct ieee80211_hdr *)msdu->data; 1819 ether_addr_copy(ieee80211_get_DA(hdr), da); 1820 ether_addr_copy(ieee80211_get_SA(hdr), sa); 1821 } 1822 1823 static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu, 1824 struct hal_rx_desc *rx_desc, 1825 enum hal_encrypt_type enctype, 1826 struct ieee80211_rx_status *status, 1827 bool decrypted) 1828 { 1829 u8 *first_hdr; 1830 u8 decap; 1831 1832 first_hdr = ath11k_dp_rx_h_80211_hdr(rx_desc); 1833 decap = ath11k_dp_rx_h_mpdu_start_decap_type(rx_desc); 1834 1835 switch (decap) { 1836 case DP_RX_DECAP_TYPE_NATIVE_WIFI: 1837 ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr, 1838 enctype, status); 1839 break; 1840 case DP_RX_DECAP_TYPE_RAW: 1841 ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status, 1842 decrypted); 1843 break; 1844 case DP_RX_DECAP_TYPE_ETHERNET2_DIX: 1845 ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr, 1846 enctype, status); 1847 break; 1848 case DP_RX_DECAP_TYPE_8023: 1849 /* TODO: Handle undecap for these formats */ 1850 break; 1851 } 1852 } 1853 1854 static void ath11k_dp_rx_h_mpdu(struct ath11k *ar, 1855 struct sk_buff_head *amsdu_list, 1856 struct hal_rx_desc *rx_desc, 1857 struct ieee80211_rx_status *rx_status) 1858 { 1859 struct ieee80211_hdr *hdr; 1860 enum hal_encrypt_type enctype; 1861 struct sk_buff *last_msdu; 1862 struct sk_buff *msdu; 1863 struct ath11k_skb_rxcb *last_rxcb; 1864 bool is_decrypted; 1865 u32 err_bitmap; 1866 u8 *qos; 1867 1868 if (skb_queue_empty(amsdu_list)) 1869 return; 1870 1871 hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(rx_desc); 1872 1873 /* Each A-MSDU subframe will use the original header as the base and be 1874 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl. 1875 */ 1876 if (ieee80211_is_data_qos(hdr->frame_control)) { 1877 qos = ieee80211_get_qos_ctl(hdr); 1878 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 1879 } 1880 1881 is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc); 1882 enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc); 1883 1884 /* Some attention flags are valid only in the last MSDU. */ 1885 last_msdu = skb_peek_tail(amsdu_list); 1886 last_rxcb = ATH11K_SKB_RXCB(last_msdu); 1887 1888 err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(last_rxcb->rx_desc); 1889 1890 /* Clear per-MPDU flags while leaving per-PPDU flags intact. */ 1891 rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | 1892 RX_FLAG_MMIC_ERROR | 1893 RX_FLAG_DECRYPTED | 1894 RX_FLAG_IV_STRIPPED | 1895 RX_FLAG_MMIC_STRIPPED); 1896 1897 if (err_bitmap & DP_RX_MPDU_ERR_FCS) 1898 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 1899 1900 if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC) 1901 rx_status->flag |= RX_FLAG_MMIC_ERROR; 1902 1903 if (is_decrypted) 1904 rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED | 1905 RX_FLAG_MIC_STRIPPED | RX_FLAG_ICV_STRIPPED; 1906 1907 skb_queue_walk(amsdu_list, msdu) { 1908 ath11k_dp_rx_h_csum_offload(msdu); 1909 ath11k_dp_rx_h_undecap(ar, msdu, rx_desc, 1910 enctype, rx_status, is_decrypted); 1911 } 1912 } 1913 1914 static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc, 1915 struct ieee80211_rx_status *rx_status) 1916 { 1917 struct ieee80211_supported_band *sband; 1918 enum rx_msdu_start_pkt_type pkt_type; 1919 u8 bw; 1920 u8 rate_mcs, nss; 1921 u8 sgi; 1922 bool is_cck; 1923 1924 pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(rx_desc); 1925 bw = ath11k_dp_rx_h_msdu_start_rx_bw(rx_desc); 1926 rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(rx_desc); 1927 nss = ath11k_dp_rx_h_msdu_start_nss(rx_desc); 1928 sgi = ath11k_dp_rx_h_msdu_start_sgi(rx_desc); 1929 1930 switch (pkt_type) { 1931 case RX_MSDU_START_PKT_TYPE_11A: 1932 case RX_MSDU_START_PKT_TYPE_11B: 1933 is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B); 1934 sband = &ar->mac.sbands[rx_status->band]; 1935 rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs, 1936 is_cck); 1937 break; 1938 case RX_MSDU_START_PKT_TYPE_11N: 1939 rx_status->encoding = RX_ENC_HT; 1940 if (rate_mcs > ATH11K_HT_MCS_MAX) { 1941 ath11k_warn(ar->ab, 1942 "Received with invalid mcs in HT mode %d\n", 1943 rate_mcs); 1944 break; 1945 } 1946 rx_status->rate_idx = rate_mcs + (8 * (nss - 1)); 1947 if (sgi) 1948 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 1949 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 1950 break; 1951 case RX_MSDU_START_PKT_TYPE_11AC: 1952 rx_status->encoding = RX_ENC_VHT; 1953 rx_status->rate_idx = rate_mcs; 1954 if (rate_mcs > ATH11K_VHT_MCS_MAX) { 1955 ath11k_warn(ar->ab, 1956 "Received with invalid mcs in VHT mode %d\n", 1957 rate_mcs); 1958 break; 1959 } 1960 rx_status->nss = nss; 1961 if (sgi) 1962 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 1963 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 1964 break; 1965 case RX_MSDU_START_PKT_TYPE_11AX: 1966 rx_status->rate_idx = rate_mcs; 1967 if (rate_mcs > ATH11K_HE_MCS_MAX) { 1968 ath11k_warn(ar->ab, 1969 "Received with invalid mcs in HE mode %d\n", 1970 rate_mcs); 1971 break; 1972 } 1973 rx_status->encoding = RX_ENC_HE; 1974 rx_status->nss = nss; 1975 rx_status->he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi); 1976 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 1977 break; 1978 } 1979 } 1980 1981 static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc, 1982 struct ieee80211_rx_status *rx_status) 1983 { 1984 u8 channel_num; 1985 1986 rx_status->freq = 0; 1987 rx_status->rate_idx = 0; 1988 rx_status->nss = 0; 1989 rx_status->encoding = RX_ENC_LEGACY; 1990 rx_status->bw = RATE_INFO_BW_20; 1991 1992 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 1993 1994 channel_num = ath11k_dp_rx_h_msdu_start_freq(rx_desc); 1995 1996 if (channel_num >= 1 && channel_num <= 14) { 1997 rx_status->band = NL80211_BAND_2GHZ; 1998 } else if (channel_num >= 36 && channel_num <= 173) { 1999 rx_status->band = NL80211_BAND_5GHZ; 2000 } else { 2001 ath11k_warn(ar->ab, "Unsupported Channel info received %d\n", 2002 channel_num); 2003 return; 2004 } 2005 2006 rx_status->freq = ieee80211_channel_to_frequency(channel_num, 2007 rx_status->band); 2008 2009 ath11k_dp_rx_h_rate(ar, rx_desc, rx_status); 2010 } 2011 2012 static void ath11k_dp_rx_process_amsdu(struct ath11k *ar, 2013 struct sk_buff_head *amsdu_list, 2014 struct ieee80211_rx_status *rx_status) 2015 { 2016 struct sk_buff *first; 2017 struct ath11k_skb_rxcb *rxcb; 2018 struct hal_rx_desc *rx_desc; 2019 bool first_mpdu; 2020 2021 if (skb_queue_empty(amsdu_list)) 2022 return; 2023 2024 first = skb_peek(amsdu_list); 2025 rxcb = ATH11K_SKB_RXCB(first); 2026 rx_desc = rxcb->rx_desc; 2027 2028 first_mpdu = ath11k_dp_rx_h_attn_first_mpdu(rx_desc); 2029 if (first_mpdu) 2030 ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status); 2031 2032 ath11k_dp_rx_h_mpdu(ar, amsdu_list, rx_desc, rx_status); 2033 } 2034 2035 static char *ath11k_print_get_tid(struct ieee80211_hdr *hdr, char *out, 2036 size_t size) 2037 { 2038 u8 *qc; 2039 int tid; 2040 2041 if (!ieee80211_is_data_qos(hdr->frame_control)) 2042 return ""; 2043 2044 qc = ieee80211_get_qos_ctl(hdr); 2045 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 2046 snprintf(out, size, "tid %d", tid); 2047 2048 return out; 2049 } 2050 2051 static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi, 2052 struct sk_buff *msdu) 2053 { 2054 static const struct ieee80211_radiotap_he known = { 2055 .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | 2056 IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN), 2057 .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN), 2058 }; 2059 struct ieee80211_rx_status *status; 2060 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; 2061 struct ieee80211_radiotap_he *he = NULL; 2062 char tid[32]; 2063 2064 status = IEEE80211_SKB_RXCB(msdu); 2065 if (status->encoding == RX_ENC_HE) { 2066 he = skb_push(msdu, sizeof(known)); 2067 memcpy(he, &known, sizeof(known)); 2068 status->flag |= RX_FLAG_RADIOTAP_HE; 2069 } 2070 2071 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 2072 "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", 2073 msdu, 2074 msdu->len, 2075 ieee80211_get_SA(hdr), 2076 ath11k_print_get_tid(hdr, tid, sizeof(tid)), 2077 is_multicast_ether_addr(ieee80211_get_DA(hdr)) ? 2078 "mcast" : "ucast", 2079 (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4, 2080 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", 2081 (status->encoding == RX_ENC_HT) ? "ht" : "", 2082 (status->encoding == RX_ENC_VHT) ? "vht" : "", 2083 (status->encoding == RX_ENC_HE) ? "he" : "", 2084 (status->bw == RATE_INFO_BW_40) ? "40" : "", 2085 (status->bw == RATE_INFO_BW_80) ? "80" : "", 2086 (status->bw == RATE_INFO_BW_160) ? "160" : "", 2087 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", 2088 status->rate_idx, 2089 status->nss, 2090 status->freq, 2091 status->band, status->flag, 2092 !!(status->flag & RX_FLAG_FAILED_FCS_CRC), 2093 !!(status->flag & RX_FLAG_MMIC_ERROR), 2094 !!(status->flag & RX_FLAG_AMSDU_MORE)); 2095 2096 /* TODO: trace rx packet */ 2097 2098 ieee80211_rx_napi(ar->hw, NULL, msdu, napi); 2099 } 2100 2101 static void ath11k_dp_rx_pre_deliver_amsdu(struct ath11k *ar, 2102 struct sk_buff_head *amsdu_list, 2103 struct ieee80211_rx_status *rxs) 2104 { 2105 struct sk_buff *msdu; 2106 struct sk_buff *first_subframe; 2107 struct ieee80211_rx_status *status; 2108 2109 first_subframe = skb_peek(amsdu_list); 2110 2111 skb_queue_walk(amsdu_list, msdu) { 2112 /* Setup per-MSDU flags */ 2113 if (skb_queue_empty(amsdu_list)) 2114 rxs->flag &= ~RX_FLAG_AMSDU_MORE; 2115 else 2116 rxs->flag |= RX_FLAG_AMSDU_MORE; 2117 2118 if (msdu == first_subframe) { 2119 first_subframe = NULL; 2120 rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN; 2121 } else { 2122 rxs->flag |= RX_FLAG_ALLOW_SAME_PN; 2123 } 2124 rxs->flag |= RX_FLAG_SKIP_MONITOR; 2125 2126 status = IEEE80211_SKB_RXCB(msdu); 2127 *status = *rxs; 2128 } 2129 } 2130 2131 static void ath11k_dp_rx_process_pending_packets(struct ath11k_base *ab, 2132 struct napi_struct *napi, 2133 struct sk_buff_head *pending_q, 2134 int *quota, u8 mac_id) 2135 { 2136 struct ath11k *ar; 2137 struct sk_buff *msdu; 2138 struct ath11k_pdev *pdev; 2139 2140 if (skb_queue_empty(pending_q)) 2141 return; 2142 2143 ar = ab->pdevs[mac_id].ar; 2144 2145 rcu_read_lock(); 2146 pdev = rcu_dereference(ab->pdevs_active[mac_id]); 2147 2148 while (*quota && (msdu = __skb_dequeue(pending_q))) { 2149 if (!pdev) { 2150 dev_kfree_skb_any(msdu); 2151 continue; 2152 } 2153 2154 ath11k_dp_rx_deliver_msdu(ar, napi, msdu); 2155 (*quota)--; 2156 } 2157 rcu_read_unlock(); 2158 } 2159 2160 int ath11k_dp_process_rx(struct ath11k_base *ab, int mac_id, 2161 struct napi_struct *napi, struct sk_buff_head *pending_q, 2162 int budget) 2163 { 2164 struct ath11k *ar = ab->pdevs[mac_id].ar; 2165 struct ath11k_pdev_dp *dp = &ar->dp; 2166 struct ieee80211_rx_status *rx_status = &dp->rx_status; 2167 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 2168 struct hal_srng *srng; 2169 struct sk_buff *msdu; 2170 struct sk_buff_head msdu_list; 2171 struct sk_buff_head amsdu_list; 2172 struct ath11k_skb_rxcb *rxcb; 2173 u32 *rx_desc; 2174 int buf_id; 2175 int num_buffs_reaped = 0; 2176 int quota = budget; 2177 int ret; 2178 bool done = false; 2179 2180 /* Process any pending packets from the previous napi poll. 2181 * Note: All msdu's in this pending_q corresponds to the same mac id 2182 * due to pdev based reo dest mapping and also since each irq group id 2183 * maps to specific reo dest ring. 2184 */ 2185 ath11k_dp_rx_process_pending_packets(ab, napi, pending_q, "a, 2186 mac_id); 2187 2188 /* If all quota is exhausted by processing the pending_q, 2189 * Wait for the next napi poll to reap the new info 2190 */ 2191 if (!quota) 2192 goto exit; 2193 2194 __skb_queue_head_init(&msdu_list); 2195 2196 srng = &ab->hal.srng_list[dp->reo_dst_ring.ring_id]; 2197 2198 spin_lock_bh(&srng->lock); 2199 2200 ath11k_hal_srng_access_begin(ab, srng); 2201 2202 try_again: 2203 while ((rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 2204 struct hal_reo_dest_ring *desc = (struct hal_reo_dest_ring *)rx_desc; 2205 enum hal_reo_dest_ring_push_reason push_reason; 2206 u32 cookie; 2207 2208 cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, 2209 desc->buf_addr_info.info1); 2210 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 2211 cookie); 2212 spin_lock_bh(&rx_ring->idr_lock); 2213 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 2214 if (!msdu) { 2215 ath11k_warn(ab, "frame rx with invalid buf_id %d\n", 2216 buf_id); 2217 spin_unlock_bh(&rx_ring->idr_lock); 2218 continue; 2219 } 2220 2221 idr_remove(&rx_ring->bufs_idr, buf_id); 2222 spin_unlock_bh(&rx_ring->idr_lock); 2223 2224 rxcb = ATH11K_SKB_RXCB(msdu); 2225 dma_unmap_single(ab->dev, rxcb->paddr, 2226 msdu->len + skb_tailroom(msdu), 2227 DMA_FROM_DEVICE); 2228 2229 num_buffs_reaped++; 2230 2231 push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON, 2232 desc->info0); 2233 if (push_reason != 2234 HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) { 2235 /* TODO: Check if the msdu can be sent up for processing */ 2236 dev_kfree_skb_any(msdu); 2237 ab->soc_stats.hal_reo_error[dp->reo_dst_ring.ring_id]++; 2238 continue; 2239 } 2240 2241 rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 & 2242 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU); 2243 rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 & 2244 RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU); 2245 rxcb->is_continuation = !!(desc->rx_msdu_info.info0 & 2246 RX_MSDU_DESC_INFO0_MSDU_CONTINUATION); 2247 rxcb->mac_id = mac_id; 2248 __skb_queue_tail(&msdu_list, msdu); 2249 2250 /* Stop reaping from the ring once quota is exhausted 2251 * and we've received all msdu's in the the AMSDU. The 2252 * additional msdu's reaped in excess of quota here would 2253 * be pushed into the pending queue to be processed during 2254 * the next napi poll. 2255 * Note: More profiling can be done to see the impact on 2256 * pending_q and throughput during various traffic & density 2257 * and how use of budget instead of remaining quota affects it. 2258 */ 2259 if (num_buffs_reaped >= quota && rxcb->is_last_msdu && 2260 !rxcb->is_continuation) { 2261 done = true; 2262 break; 2263 } 2264 } 2265 2266 /* Hw might have updated the head pointer after we cached it. 2267 * In this case, even though there are entries in the ring we'll 2268 * get rx_desc NULL. Give the read another try with updated cached 2269 * head pointer so that we can reap complete MPDU in the current 2270 * rx processing. 2271 */ 2272 if (!done && ath11k_hal_srng_dst_num_free(ab, srng, true)) { 2273 ath11k_hal_srng_access_end(ab, srng); 2274 goto try_again; 2275 } 2276 2277 ath11k_hal_srng_access_end(ab, srng); 2278 2279 spin_unlock_bh(&srng->lock); 2280 2281 if (!num_buffs_reaped) 2282 goto exit; 2283 2284 /* Should we reschedule it later if we are not able to replenish all 2285 * the buffers? 2286 */ 2287 ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buffs_reaped, 2288 HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC); 2289 2290 rcu_read_lock(); 2291 if (!rcu_dereference(ab->pdevs_active[mac_id])) { 2292 __skb_queue_purge(&msdu_list); 2293 goto rcu_unlock; 2294 } 2295 2296 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 2297 __skb_queue_purge(&msdu_list); 2298 goto rcu_unlock; 2299 } 2300 2301 while (!skb_queue_empty(&msdu_list)) { 2302 __skb_queue_head_init(&amsdu_list); 2303 ret = ath11k_dp_rx_retrieve_amsdu(ar, &msdu_list, &amsdu_list); 2304 if (ret) { 2305 if (ret == -EIO) { 2306 ath11k_err(ab, "rx ring got corrupted %d\n", ret); 2307 __skb_queue_purge(&msdu_list); 2308 /* Should stop processing any more rx in 2309 * future from this ring? 2310 */ 2311 goto rcu_unlock; 2312 } 2313 2314 /* A-MSDU retrieval got failed due to non-fatal condition, 2315 * continue processing with the next msdu. 2316 */ 2317 continue; 2318 } 2319 2320 ath11k_dp_rx_process_amsdu(ar, &amsdu_list, rx_status); 2321 2322 ath11k_dp_rx_pre_deliver_amsdu(ar, &amsdu_list, rx_status); 2323 skb_queue_splice_tail(&amsdu_list, pending_q); 2324 } 2325 2326 while (quota && (msdu = __skb_dequeue(pending_q))) { 2327 ath11k_dp_rx_deliver_msdu(ar, napi, msdu); 2328 quota--; 2329 } 2330 2331 rcu_unlock: 2332 rcu_read_unlock(); 2333 exit: 2334 return budget - quota; 2335 } 2336 2337 static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta, 2338 struct hal_rx_mon_ppdu_info *ppdu_info) 2339 { 2340 struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats; 2341 u32 num_msdu; 2342 2343 if (!rx_stats) 2344 return; 2345 2346 num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count + 2347 ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count; 2348 2349 rx_stats->num_msdu += num_msdu; 2350 rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count + 2351 ppdu_info->tcp_ack_msdu_count; 2352 rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count; 2353 rx_stats->other_msdu_count += ppdu_info->other_msdu_count; 2354 2355 if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A || 2356 ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) { 2357 ppdu_info->nss = 1; 2358 ppdu_info->mcs = HAL_RX_MAX_MCS; 2359 ppdu_info->tid = IEEE80211_NUM_TIDS; 2360 } 2361 2362 if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS) 2363 rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu; 2364 2365 if (ppdu_info->mcs <= HAL_RX_MAX_MCS) 2366 rx_stats->mcs_count[ppdu_info->mcs] += num_msdu; 2367 2368 if (ppdu_info->gi < HAL_RX_GI_MAX) 2369 rx_stats->gi_count[ppdu_info->gi] += num_msdu; 2370 2371 if (ppdu_info->bw < HAL_RX_BW_MAX) 2372 rx_stats->bw_count[ppdu_info->bw] += num_msdu; 2373 2374 if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX) 2375 rx_stats->coding_count[ppdu_info->ldpc] += num_msdu; 2376 2377 if (ppdu_info->tid <= IEEE80211_NUM_TIDS) 2378 rx_stats->tid_count[ppdu_info->tid] += num_msdu; 2379 2380 if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX) 2381 rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu; 2382 2383 if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX) 2384 rx_stats->reception_type[ppdu_info->reception_type] += num_msdu; 2385 2386 if (ppdu_info->is_stbc) 2387 rx_stats->stbc_count += num_msdu; 2388 2389 if (ppdu_info->beamformed) 2390 rx_stats->beamformed_count += num_msdu; 2391 2392 if (ppdu_info->num_mpdu_fcs_ok > 1) 2393 rx_stats->ampdu_msdu_count += num_msdu; 2394 else 2395 rx_stats->non_ampdu_msdu_count += num_msdu; 2396 2397 rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok; 2398 rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err; 2399 rx_stats->dcm_count += ppdu_info->dcm; 2400 rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu; 2401 2402 arsta->rssi_comb = ppdu_info->rssi_comb; 2403 rx_stats->rx_duration += ppdu_info->rx_duration; 2404 arsta->rx_duration = rx_stats->rx_duration; 2405 } 2406 2407 static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab, 2408 struct dp_rxdma_ring *rx_ring, 2409 int *buf_id, gfp_t gfp) 2410 { 2411 struct sk_buff *skb; 2412 dma_addr_t paddr; 2413 2414 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + 2415 DP_RX_BUFFER_ALIGN_SIZE); 2416 2417 if (!skb) 2418 goto fail_alloc_skb; 2419 2420 if (!IS_ALIGNED((unsigned long)skb->data, 2421 DP_RX_BUFFER_ALIGN_SIZE)) { 2422 skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - 2423 skb->data); 2424 } 2425 2426 paddr = dma_map_single(ab->dev, skb->data, 2427 skb->len + skb_tailroom(skb), 2428 DMA_BIDIRECTIONAL); 2429 if (unlikely(dma_mapping_error(ab->dev, paddr))) 2430 goto fail_free_skb; 2431 2432 spin_lock_bh(&rx_ring->idr_lock); 2433 *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, 2434 rx_ring->bufs_max, gfp); 2435 spin_unlock_bh(&rx_ring->idr_lock); 2436 if (*buf_id < 0) 2437 goto fail_dma_unmap; 2438 2439 ATH11K_SKB_RXCB(skb)->paddr = paddr; 2440 return skb; 2441 2442 fail_dma_unmap: 2443 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 2444 DMA_BIDIRECTIONAL); 2445 fail_free_skb: 2446 dev_kfree_skb_any(skb); 2447 fail_alloc_skb: 2448 return NULL; 2449 } 2450 2451 int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id, 2452 struct dp_rxdma_ring *rx_ring, 2453 int req_entries, 2454 enum hal_rx_buf_return_buf_manager mgr, 2455 gfp_t gfp) 2456 { 2457 struct hal_srng *srng; 2458 u32 *desc; 2459 struct sk_buff *skb; 2460 int num_free; 2461 int num_remain; 2462 int buf_id; 2463 u32 cookie; 2464 dma_addr_t paddr; 2465 2466 req_entries = min(req_entries, rx_ring->bufs_max); 2467 2468 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 2469 2470 spin_lock_bh(&srng->lock); 2471 2472 ath11k_hal_srng_access_begin(ab, srng); 2473 2474 num_free = ath11k_hal_srng_src_num_free(ab, srng, true); 2475 2476 req_entries = min(num_free, req_entries); 2477 num_remain = req_entries; 2478 2479 while (num_remain > 0) { 2480 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, 2481 &buf_id, gfp); 2482 if (!skb) 2483 break; 2484 paddr = ATH11K_SKB_RXCB(skb)->paddr; 2485 2486 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 2487 if (!desc) 2488 goto fail_desc_get; 2489 2490 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 2491 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 2492 2493 num_remain--; 2494 2495 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); 2496 } 2497 2498 ath11k_hal_srng_access_end(ab, srng); 2499 2500 spin_unlock_bh(&srng->lock); 2501 2502 return req_entries - num_remain; 2503 2504 fail_desc_get: 2505 spin_lock_bh(&rx_ring->idr_lock); 2506 idr_remove(&rx_ring->bufs_idr, buf_id); 2507 spin_unlock_bh(&rx_ring->idr_lock); 2508 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 2509 DMA_BIDIRECTIONAL); 2510 dev_kfree_skb_any(skb); 2511 ath11k_hal_srng_access_end(ab, srng); 2512 spin_unlock_bh(&srng->lock); 2513 2514 return req_entries - num_remain; 2515 } 2516 2517 static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id, 2518 int *budget, struct sk_buff_head *skb_list) 2519 { 2520 struct ath11k *ar = ab->pdevs[mac_id].ar; 2521 struct ath11k_pdev_dp *dp = &ar->dp; 2522 struct dp_rxdma_ring *rx_ring = &dp->rx_mon_status_refill_ring; 2523 struct hal_srng *srng; 2524 void *rx_mon_status_desc; 2525 struct sk_buff *skb; 2526 struct ath11k_skb_rxcb *rxcb; 2527 struct hal_tlv_hdr *tlv; 2528 u32 cookie; 2529 int buf_id; 2530 dma_addr_t paddr; 2531 u8 rbm; 2532 int num_buffs_reaped = 0; 2533 2534 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 2535 2536 spin_lock_bh(&srng->lock); 2537 2538 ath11k_hal_srng_access_begin(ab, srng); 2539 while (*budget) { 2540 *budget -= 1; 2541 rx_mon_status_desc = 2542 ath11k_hal_srng_src_peek(ab, srng); 2543 if (!rx_mon_status_desc) 2544 break; 2545 2546 ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr, 2547 &cookie, &rbm); 2548 if (paddr) { 2549 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie); 2550 2551 spin_lock_bh(&rx_ring->idr_lock); 2552 skb = idr_find(&rx_ring->bufs_idr, buf_id); 2553 if (!skb) { 2554 ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n", 2555 buf_id); 2556 spin_unlock_bh(&rx_ring->idr_lock); 2557 continue; 2558 } 2559 2560 idr_remove(&rx_ring->bufs_idr, buf_id); 2561 spin_unlock_bh(&rx_ring->idr_lock); 2562 2563 rxcb = ATH11K_SKB_RXCB(skb); 2564 2565 dma_sync_single_for_cpu(ab->dev, rxcb->paddr, 2566 skb->len + skb_tailroom(skb), 2567 DMA_FROM_DEVICE); 2568 2569 dma_unmap_single(ab->dev, rxcb->paddr, 2570 skb->len + skb_tailroom(skb), 2571 DMA_BIDIRECTIONAL); 2572 2573 tlv = (struct hal_tlv_hdr *)skb->data; 2574 if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != 2575 HAL_RX_STATUS_BUFFER_DONE) { 2576 ath11k_hal_srng_src_get_next_entry(ab, srng); 2577 continue; 2578 } 2579 2580 __skb_queue_tail(skb_list, skb); 2581 } 2582 2583 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, 2584 &buf_id, GFP_ATOMIC); 2585 2586 if (!skb) { 2587 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0, 2588 HAL_RX_BUF_RBM_SW3_BM); 2589 num_buffs_reaped++; 2590 break; 2591 } 2592 rxcb = ATH11K_SKB_RXCB(skb); 2593 2594 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 2595 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 2596 2597 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr, 2598 cookie, HAL_RX_BUF_RBM_SW3_BM); 2599 ath11k_hal_srng_src_get_next_entry(ab, srng); 2600 num_buffs_reaped++; 2601 } 2602 ath11k_hal_srng_access_end(ab, srng); 2603 spin_unlock_bh(&srng->lock); 2604 2605 return num_buffs_reaped; 2606 } 2607 2608 int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id, 2609 struct napi_struct *napi, int budget) 2610 { 2611 struct ath11k *ar = ab->pdevs[mac_id].ar; 2612 enum hal_rx_mon_status hal_status; 2613 struct sk_buff *skb; 2614 struct sk_buff_head skb_list; 2615 struct hal_rx_mon_ppdu_info ppdu_info; 2616 struct ath11k_peer *peer; 2617 struct ath11k_sta *arsta; 2618 int num_buffs_reaped = 0; 2619 2620 __skb_queue_head_init(&skb_list); 2621 2622 num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget, 2623 &skb_list); 2624 if (!num_buffs_reaped) 2625 goto exit; 2626 2627 while ((skb = __skb_dequeue(&skb_list))) { 2628 memset(&ppdu_info, 0, sizeof(ppdu_info)); 2629 ppdu_info.peer_id = HAL_INVALID_PEERID; 2630 2631 if (ath11k_debug_is_pktlog_rx_stats_enabled(ar)) 2632 trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE); 2633 2634 hal_status = ath11k_hal_rx_parse_mon_status(ab, &ppdu_info, skb); 2635 2636 if (ppdu_info.peer_id == HAL_INVALID_PEERID || 2637 hal_status != HAL_RX_MON_STATUS_PPDU_DONE) { 2638 dev_kfree_skb_any(skb); 2639 continue; 2640 } 2641 2642 rcu_read_lock(); 2643 spin_lock_bh(&ab->base_lock); 2644 peer = ath11k_peer_find_by_id(ab, ppdu_info.peer_id); 2645 2646 if (!peer || !peer->sta) { 2647 ath11k_dbg(ab, ATH11K_DBG_DATA, 2648 "failed to find the peer with peer_id %d\n", 2649 ppdu_info.peer_id); 2650 spin_unlock_bh(&ab->base_lock); 2651 rcu_read_unlock(); 2652 dev_kfree_skb_any(skb); 2653 continue; 2654 } 2655 2656 arsta = (struct ath11k_sta *)peer->sta->drv_priv; 2657 ath11k_dp_rx_update_peer_stats(arsta, &ppdu_info); 2658 2659 if (ath11k_debug_is_pktlog_peer_valid(ar, peer->addr)) 2660 trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE); 2661 2662 spin_unlock_bh(&ab->base_lock); 2663 rcu_read_unlock(); 2664 2665 dev_kfree_skb_any(skb); 2666 } 2667 exit: 2668 return num_buffs_reaped; 2669 } 2670 2671 static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab, 2672 u32 *link_desc, 2673 enum hal_wbm_rel_bm_act action) 2674 { 2675 struct ath11k_dp *dp = &ab->dp; 2676 struct hal_srng *srng; 2677 u32 *desc; 2678 int ret = 0; 2679 2680 srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id]; 2681 2682 spin_lock_bh(&srng->lock); 2683 2684 ath11k_hal_srng_access_begin(ab, srng); 2685 2686 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 2687 if (!desc) { 2688 ret = -ENOBUFS; 2689 goto exit; 2690 } 2691 2692 ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc, 2693 action); 2694 2695 exit: 2696 ath11k_hal_srng_access_end(ab, srng); 2697 2698 spin_unlock_bh(&srng->lock); 2699 2700 return ret; 2701 } 2702 2703 static void ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar, 2704 struct sk_buff *msdu, 2705 struct hal_rx_desc *rx_desc, 2706 struct ieee80211_rx_status *rx_status) 2707 { 2708 u8 rx_channel; 2709 enum hal_encrypt_type enctype; 2710 bool is_decrypted; 2711 u32 err_bitmap; 2712 2713 is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc); 2714 enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc); 2715 err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_desc); 2716 2717 if (err_bitmap & DP_RX_MPDU_ERR_FCS) 2718 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 2719 2720 if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC) 2721 rx_status->flag |= RX_FLAG_MMIC_ERROR; 2722 2723 rx_status->encoding = RX_ENC_LEGACY; 2724 rx_status->bw = RATE_INFO_BW_20; 2725 2726 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 2727 2728 rx_channel = ath11k_dp_rx_h_msdu_start_freq(rx_desc); 2729 2730 if (rx_channel >= 1 && rx_channel <= 14) { 2731 rx_status->band = NL80211_BAND_2GHZ; 2732 } else if (rx_channel >= 36 && rx_channel <= 173) { 2733 rx_status->band = NL80211_BAND_5GHZ; 2734 } else { 2735 ath11k_warn(ar->ab, "Unsupported Channel info received %d\n", 2736 rx_channel); 2737 return; 2738 } 2739 2740 rx_status->freq = ieee80211_channel_to_frequency(rx_channel, 2741 rx_status->band); 2742 ath11k_dp_rx_h_rate(ar, rx_desc, rx_status); 2743 2744 /* Rx fragments are received in raw mode */ 2745 skb_trim(msdu, msdu->len - FCS_LEN); 2746 2747 if (is_decrypted) { 2748 rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MIC_STRIPPED; 2749 skb_trim(msdu, msdu->len - 2750 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 2751 } 2752 } 2753 2754 static int 2755 ath11k_dp_process_rx_err_buf(struct ath11k *ar, struct napi_struct *napi, 2756 int buf_id, bool frag) 2757 { 2758 struct ath11k_pdev_dp *dp = &ar->dp; 2759 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 2760 struct ieee80211_rx_status rx_status = {0}; 2761 struct sk_buff *msdu; 2762 struct ath11k_skb_rxcb *rxcb; 2763 struct ieee80211_rx_status *status; 2764 struct hal_rx_desc *rx_desc; 2765 u16 msdu_len; 2766 2767 spin_lock_bh(&rx_ring->idr_lock); 2768 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 2769 if (!msdu) { 2770 ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n", 2771 buf_id); 2772 spin_unlock_bh(&rx_ring->idr_lock); 2773 return -EINVAL; 2774 } 2775 2776 idr_remove(&rx_ring->bufs_idr, buf_id); 2777 spin_unlock_bh(&rx_ring->idr_lock); 2778 2779 rxcb = ATH11K_SKB_RXCB(msdu); 2780 dma_unmap_single(ar->ab->dev, rxcb->paddr, 2781 msdu->len + skb_tailroom(msdu), 2782 DMA_FROM_DEVICE); 2783 2784 if (!frag) { 2785 /* Process only rx fragments below, and drop 2786 * msdu's indicated due to error reasons. 2787 */ 2788 dev_kfree_skb_any(msdu); 2789 return 0; 2790 } 2791 2792 rcu_read_lock(); 2793 if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) { 2794 dev_kfree_skb_any(msdu); 2795 goto exit; 2796 } 2797 2798 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 2799 dev_kfree_skb_any(msdu); 2800 goto exit; 2801 } 2802 2803 rx_desc = (struct hal_rx_desc *)msdu->data; 2804 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc); 2805 skb_put(msdu, HAL_RX_DESC_SIZE + msdu_len); 2806 skb_pull(msdu, HAL_RX_DESC_SIZE); 2807 2808 ath11k_dp_rx_frag_h_mpdu(ar, msdu, rx_desc, &rx_status); 2809 2810 status = IEEE80211_SKB_RXCB(msdu); 2811 2812 *status = rx_status; 2813 2814 ath11k_dp_rx_deliver_msdu(ar, napi, msdu); 2815 2816 exit: 2817 rcu_read_unlock(); 2818 return 0; 2819 } 2820 2821 int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi, 2822 int budget) 2823 { 2824 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; 2825 struct dp_link_desc_bank *link_desc_banks; 2826 enum hal_rx_buf_return_buf_manager rbm; 2827 int tot_n_bufs_reaped, quota, ret, i; 2828 int n_bufs_reaped[MAX_RADIOS] = {0}; 2829 struct dp_rxdma_ring *rx_ring; 2830 struct dp_srng *reo_except; 2831 u32 desc_bank, num_msdus; 2832 struct hal_srng *srng; 2833 struct ath11k_dp *dp; 2834 void *link_desc_va; 2835 int buf_id, mac_id; 2836 struct ath11k *ar; 2837 dma_addr_t paddr; 2838 u32 *desc; 2839 bool is_frag; 2840 2841 tot_n_bufs_reaped = 0; 2842 quota = budget; 2843 2844 dp = &ab->dp; 2845 reo_except = &dp->reo_except_ring; 2846 link_desc_banks = dp->link_desc_banks; 2847 2848 srng = &ab->hal.srng_list[reo_except->ring_id]; 2849 2850 spin_lock_bh(&srng->lock); 2851 2852 ath11k_hal_srng_access_begin(ab, srng); 2853 2854 while (budget && 2855 (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 2856 struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc; 2857 2858 ab->soc_stats.err_ring_pkts++; 2859 ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr, 2860 &desc_bank); 2861 if (ret) { 2862 ath11k_warn(ab, "failed to parse error reo desc %d\n", 2863 ret); 2864 continue; 2865 } 2866 link_desc_va = link_desc_banks[desc_bank].vaddr + 2867 (paddr - link_desc_banks[desc_bank].paddr); 2868 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies, 2869 &rbm); 2870 if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST && 2871 rbm != HAL_RX_BUF_RBM_SW3_BM) { 2872 ab->soc_stats.invalid_rbm++; 2873 ath11k_warn(ab, "invalid return buffer manager %d\n", rbm); 2874 ath11k_dp_rx_link_desc_return(ab, desc, 2875 HAL_WBM_REL_BM_ACT_REL_MSDU); 2876 continue; 2877 } 2878 2879 is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG); 2880 2881 /* Return the link desc back to wbm idle list */ 2882 ath11k_dp_rx_link_desc_return(ab, desc, 2883 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 2884 2885 for (i = 0; i < num_msdus; i++) { 2886 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 2887 msdu_cookies[i]); 2888 2889 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, 2890 msdu_cookies[i]); 2891 2892 ar = ab->pdevs[mac_id].ar; 2893 2894 if (!ath11k_dp_process_rx_err_buf(ar, napi, buf_id, 2895 is_frag)) { 2896 n_bufs_reaped[mac_id]++; 2897 tot_n_bufs_reaped++; 2898 } 2899 } 2900 2901 if (tot_n_bufs_reaped >= quota) { 2902 tot_n_bufs_reaped = quota; 2903 goto exit; 2904 } 2905 2906 budget = quota - tot_n_bufs_reaped; 2907 } 2908 2909 exit: 2910 ath11k_hal_srng_access_end(ab, srng); 2911 2912 spin_unlock_bh(&srng->lock); 2913 2914 for (i = 0; i < ab->num_radios; i++) { 2915 if (!n_bufs_reaped[i]) 2916 continue; 2917 2918 ar = ab->pdevs[i].ar; 2919 rx_ring = &ar->dp.rx_refill_buf_ring; 2920 2921 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i], 2922 HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC); 2923 } 2924 2925 return tot_n_bufs_reaped; 2926 } 2927 2928 static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar, 2929 int msdu_len, 2930 struct sk_buff_head *msdu_list) 2931 { 2932 struct sk_buff *skb, *tmp; 2933 struct ath11k_skb_rxcb *rxcb; 2934 int n_buffs; 2935 2936 n_buffs = DIV_ROUND_UP(msdu_len, 2937 (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE)); 2938 2939 skb_queue_walk_safe(msdu_list, skb, tmp) { 2940 rxcb = ATH11K_SKB_RXCB(skb); 2941 if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO && 2942 rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) { 2943 if (!n_buffs) 2944 break; 2945 __skb_unlink(skb, msdu_list); 2946 dev_kfree_skb_any(skb); 2947 n_buffs--; 2948 } 2949 } 2950 } 2951 2952 static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu, 2953 struct ieee80211_rx_status *status, 2954 struct sk_buff_head *msdu_list) 2955 { 2956 struct sk_buff_head amsdu_list; 2957 u16 msdu_len; 2958 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 2959 u8 l3pad_bytes; 2960 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 2961 2962 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc); 2963 2964 if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) { 2965 /* First buffer will be freed by the caller, so deduct it's length */ 2966 msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE); 2967 ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list); 2968 return -EINVAL; 2969 } 2970 2971 if (!ath11k_dp_rx_h_attn_msdu_done(desc)) { 2972 ath11k_warn(ar->ab, 2973 "msdu_done bit not set in null_q_des processing\n"); 2974 __skb_queue_purge(msdu_list); 2975 return -EIO; 2976 } 2977 2978 /* Handle NULL queue descriptor violations arising out a missing 2979 * REO queue for a given peer or a given TID. This typically 2980 * may happen if a packet is received on a QOS enabled TID before the 2981 * ADDBA negotiation for that TID, when the TID queue is setup. Or 2982 * it may also happen for MC/BC frames if they are not routed to the 2983 * non-QOS TID queue, in the absence of any other default TID queue. 2984 * This error can show up both in a REO destination or WBM release ring. 2985 */ 2986 2987 __skb_queue_head_init(&amsdu_list); 2988 2989 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc); 2990 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc); 2991 2992 l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc); 2993 2994 if ((HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) 2995 return -EINVAL; 2996 2997 skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len); 2998 skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes); 2999 3000 ath11k_dp_rx_h_ppdu(ar, desc, status); 3001 3002 __skb_queue_tail(&amsdu_list, msdu); 3003 3004 ath11k_dp_rx_h_mpdu(ar, &amsdu_list, desc, status); 3005 3006 /* Please note that caller will having the access to msdu and completing 3007 * rx with mac80211. Need not worry about cleaning up amsdu_list. 3008 */ 3009 3010 return 0; 3011 } 3012 3013 static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu, 3014 struct ieee80211_rx_status *status, 3015 struct sk_buff_head *msdu_list) 3016 { 3017 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3018 bool drop = false; 3019 3020 ar->ab->soc_stats.reo_error[rxcb->err_code]++; 3021 3022 switch (rxcb->err_code) { 3023 case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO: 3024 if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list)) 3025 drop = true; 3026 break; 3027 default: 3028 /* TODO: Review other errors and process them to mac80211 3029 * as appropriate. 3030 */ 3031 drop = true; 3032 break; 3033 } 3034 3035 return drop; 3036 } 3037 3038 static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu, 3039 struct ieee80211_rx_status *status) 3040 { 3041 u16 msdu_len; 3042 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 3043 u8 l3pad_bytes; 3044 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3045 3046 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc); 3047 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc); 3048 3049 l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc); 3050 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc); 3051 skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len); 3052 skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes); 3053 3054 ath11k_dp_rx_h_ppdu(ar, desc, status); 3055 3056 status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR | 3057 RX_FLAG_DECRYPTED); 3058 3059 ath11k_dp_rx_h_undecap(ar, msdu, desc, 3060 HAL_ENCRYPT_TYPE_TKIP_MIC, status, false); 3061 } 3062 3063 static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar, struct sk_buff *msdu, 3064 struct ieee80211_rx_status *status) 3065 { 3066 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3067 bool drop = false; 3068 3069 ar->ab->soc_stats.rxdma_error[rxcb->err_code]++; 3070 3071 switch (rxcb->err_code) { 3072 case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR: 3073 ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status); 3074 break; 3075 default: 3076 /* TODO: Review other rxdma error code to check if anything is 3077 * worth reporting to mac80211 3078 */ 3079 drop = true; 3080 break; 3081 } 3082 3083 return drop; 3084 } 3085 3086 static void ath11k_dp_rx_wbm_err(struct ath11k *ar, 3087 struct napi_struct *napi, 3088 struct sk_buff *msdu, 3089 struct sk_buff_head *msdu_list) 3090 { 3091 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3092 struct ieee80211_rx_status rxs = {0}; 3093 struct ieee80211_rx_status *status; 3094 bool drop = true; 3095 3096 switch (rxcb->err_rel_src) { 3097 case HAL_WBM_REL_SRC_MODULE_REO: 3098 drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list); 3099 break; 3100 case HAL_WBM_REL_SRC_MODULE_RXDMA: 3101 drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs); 3102 break; 3103 default: 3104 /* msdu will get freed */ 3105 break; 3106 } 3107 3108 if (drop) { 3109 dev_kfree_skb_any(msdu); 3110 return; 3111 } 3112 3113 status = IEEE80211_SKB_RXCB(msdu); 3114 *status = rxs; 3115 3116 ath11k_dp_rx_deliver_msdu(ar, napi, msdu); 3117 } 3118 3119 int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab, 3120 struct napi_struct *napi, int budget) 3121 { 3122 struct ath11k *ar; 3123 struct ath11k_dp *dp = &ab->dp; 3124 struct dp_rxdma_ring *rx_ring; 3125 struct hal_rx_wbm_rel_info err_info; 3126 struct hal_srng *srng; 3127 struct sk_buff *msdu; 3128 struct sk_buff_head msdu_list[MAX_RADIOS]; 3129 struct ath11k_skb_rxcb *rxcb; 3130 u32 *rx_desc; 3131 int buf_id, mac_id; 3132 int num_buffs_reaped[MAX_RADIOS] = {0}; 3133 int total_num_buffs_reaped = 0; 3134 int ret, i; 3135 3136 for (i = 0; i < MAX_RADIOS; i++) 3137 __skb_queue_head_init(&msdu_list[i]); 3138 3139 srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id]; 3140 3141 spin_lock_bh(&srng->lock); 3142 3143 ath11k_hal_srng_access_begin(ab, srng); 3144 3145 while (budget) { 3146 rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng); 3147 if (!rx_desc) 3148 break; 3149 3150 ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info); 3151 if (ret) { 3152 ath11k_warn(ab, 3153 "failed to parse rx error in wbm_rel ring desc %d\n", 3154 ret); 3155 continue; 3156 } 3157 3158 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie); 3159 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie); 3160 3161 ar = ab->pdevs[mac_id].ar; 3162 rx_ring = &ar->dp.rx_refill_buf_ring; 3163 3164 spin_lock_bh(&rx_ring->idr_lock); 3165 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 3166 if (!msdu) { 3167 ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n", 3168 buf_id, mac_id); 3169 spin_unlock_bh(&rx_ring->idr_lock); 3170 continue; 3171 } 3172 3173 idr_remove(&rx_ring->bufs_idr, buf_id); 3174 spin_unlock_bh(&rx_ring->idr_lock); 3175 3176 rxcb = ATH11K_SKB_RXCB(msdu); 3177 dma_unmap_single(ab->dev, rxcb->paddr, 3178 msdu->len + skb_tailroom(msdu), 3179 DMA_FROM_DEVICE); 3180 3181 num_buffs_reaped[mac_id]++; 3182 total_num_buffs_reaped++; 3183 budget--; 3184 3185 if (err_info.push_reason != 3186 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { 3187 dev_kfree_skb_any(msdu); 3188 continue; 3189 } 3190 3191 rxcb->err_rel_src = err_info.err_rel_src; 3192 rxcb->err_code = err_info.err_code; 3193 rxcb->rx_desc = (struct hal_rx_desc *)msdu->data; 3194 __skb_queue_tail(&msdu_list[mac_id], msdu); 3195 } 3196 3197 ath11k_hal_srng_access_end(ab, srng); 3198 3199 spin_unlock_bh(&srng->lock); 3200 3201 if (!total_num_buffs_reaped) 3202 goto done; 3203 3204 for (i = 0; i < ab->num_radios; i++) { 3205 if (!num_buffs_reaped[i]) 3206 continue; 3207 3208 ar = ab->pdevs[i].ar; 3209 rx_ring = &ar->dp.rx_refill_buf_ring; 3210 3211 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i], 3212 HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC); 3213 } 3214 3215 rcu_read_lock(); 3216 for (i = 0; i < ab->num_radios; i++) { 3217 if (!rcu_dereference(ab->pdevs_active[i])) { 3218 __skb_queue_purge(&msdu_list[i]); 3219 continue; 3220 } 3221 3222 ar = ab->pdevs[i].ar; 3223 3224 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 3225 __skb_queue_purge(&msdu_list[i]); 3226 continue; 3227 } 3228 3229 while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL) 3230 ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]); 3231 } 3232 rcu_read_unlock(); 3233 done: 3234 return total_num_buffs_reaped; 3235 } 3236 3237 int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget) 3238 { 3239 struct ath11k *ar = ab->pdevs[mac_id].ar; 3240 struct dp_srng *err_ring = &ar->dp.rxdma_err_dst_ring; 3241 struct dp_rxdma_ring *rx_ring = &ar->dp.rx_refill_buf_ring; 3242 struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks; 3243 struct hal_srng *srng; 3244 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; 3245 enum hal_rx_buf_return_buf_manager rbm; 3246 enum hal_reo_entr_rxdma_ecode rxdma_err_code; 3247 struct ath11k_skb_rxcb *rxcb; 3248 struct sk_buff *skb; 3249 struct hal_reo_entrance_ring *entr_ring; 3250 void *desc; 3251 int num_buf_freed = 0; 3252 int quota = budget; 3253 dma_addr_t paddr; 3254 u32 desc_bank; 3255 void *link_desc_va; 3256 int num_msdus; 3257 int i; 3258 int buf_id; 3259 3260 srng = &ab->hal.srng_list[err_ring->ring_id]; 3261 3262 spin_lock_bh(&srng->lock); 3263 3264 ath11k_hal_srng_access_begin(ab, srng); 3265 3266 while (quota-- && 3267 (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 3268 ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank); 3269 3270 entr_ring = (struct hal_reo_entrance_ring *)desc; 3271 rxdma_err_code = 3272 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE, 3273 entr_ring->info1); 3274 ab->soc_stats.rxdma_error[rxdma_err_code]++; 3275 3276 link_desc_va = link_desc_banks[desc_bank].vaddr + 3277 (paddr - link_desc_banks[desc_bank].paddr); 3278 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, 3279 msdu_cookies, &rbm); 3280 3281 for (i = 0; i < num_msdus; i++) { 3282 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 3283 msdu_cookies[i]); 3284 3285 spin_lock_bh(&rx_ring->idr_lock); 3286 skb = idr_find(&rx_ring->bufs_idr, buf_id); 3287 if (!skb) { 3288 ath11k_warn(ab, "rxdma error with invalid buf_id %d\n", 3289 buf_id); 3290 spin_unlock_bh(&rx_ring->idr_lock); 3291 continue; 3292 } 3293 3294 idr_remove(&rx_ring->bufs_idr, buf_id); 3295 spin_unlock_bh(&rx_ring->idr_lock); 3296 3297 rxcb = ATH11K_SKB_RXCB(skb); 3298 dma_unmap_single(ab->dev, rxcb->paddr, 3299 skb->len + skb_tailroom(skb), 3300 DMA_FROM_DEVICE); 3301 dev_kfree_skb_any(skb); 3302 3303 num_buf_freed++; 3304 } 3305 3306 ath11k_dp_rx_link_desc_return(ab, desc, 3307 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3308 } 3309 3310 ath11k_hal_srng_access_end(ab, srng); 3311 3312 spin_unlock_bh(&srng->lock); 3313 3314 if (num_buf_freed) 3315 ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed, 3316 HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC); 3317 3318 return budget - quota; 3319 } 3320 3321 void ath11k_dp_process_reo_status(struct ath11k_base *ab) 3322 { 3323 struct ath11k_dp *dp = &ab->dp; 3324 struct hal_srng *srng; 3325 struct dp_reo_cmd *cmd, *tmp; 3326 bool found = false; 3327 u32 *reo_desc; 3328 u16 tag; 3329 struct hal_reo_status reo_status; 3330 3331 srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id]; 3332 3333 memset(&reo_status, 0, sizeof(reo_status)); 3334 3335 spin_lock_bh(&srng->lock); 3336 3337 ath11k_hal_srng_access_begin(ab, srng); 3338 3339 while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 3340 tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc); 3341 3342 switch (tag) { 3343 case HAL_REO_GET_QUEUE_STATS_STATUS: 3344 ath11k_hal_reo_status_queue_stats(ab, reo_desc, 3345 &reo_status); 3346 break; 3347 case HAL_REO_FLUSH_QUEUE_STATUS: 3348 ath11k_hal_reo_flush_queue_status(ab, reo_desc, 3349 &reo_status); 3350 break; 3351 case HAL_REO_FLUSH_CACHE_STATUS: 3352 ath11k_hal_reo_flush_cache_status(ab, reo_desc, 3353 &reo_status); 3354 break; 3355 case HAL_REO_UNBLOCK_CACHE_STATUS: 3356 ath11k_hal_reo_unblk_cache_status(ab, reo_desc, 3357 &reo_status); 3358 break; 3359 case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS: 3360 ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc, 3361 &reo_status); 3362 break; 3363 case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS: 3364 ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc, 3365 &reo_status); 3366 break; 3367 case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS: 3368 ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc, 3369 &reo_status); 3370 break; 3371 default: 3372 ath11k_warn(ab, "Unknown reo status type %d\n", tag); 3373 continue; 3374 } 3375 3376 spin_lock_bh(&dp->reo_cmd_lock); 3377 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 3378 if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) { 3379 found = true; 3380 list_del(&cmd->list); 3381 break; 3382 } 3383 } 3384 spin_unlock_bh(&dp->reo_cmd_lock); 3385 3386 if (found) { 3387 cmd->handler(dp, (void *)&cmd->data, 3388 reo_status.uniform_hdr.cmd_status); 3389 kfree(cmd); 3390 } 3391 3392 found = false; 3393 } 3394 3395 ath11k_hal_srng_access_end(ab, srng); 3396 3397 spin_unlock_bh(&srng->lock); 3398 } 3399 3400 void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id) 3401 { 3402 struct ath11k *ar = ab->pdevs[mac_id].ar; 3403 3404 ath11k_dp_rx_pdev_srng_free(ar); 3405 ath11k_dp_rxdma_pdev_buf_free(ar); 3406 } 3407 3408 int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id) 3409 { 3410 struct ath11k *ar = ab->pdevs[mac_id].ar; 3411 struct ath11k_pdev_dp *dp = &ar->dp; 3412 u32 ring_id; 3413 int ret; 3414 3415 ret = ath11k_dp_rx_pdev_srng_alloc(ar); 3416 if (ret) { 3417 ath11k_warn(ab, "failed to setup rx srngs\n"); 3418 return ret; 3419 } 3420 3421 ret = ath11k_dp_rxdma_pdev_buf_setup(ar); 3422 if (ret) { 3423 ath11k_warn(ab, "failed to setup rxdma ring\n"); 3424 return ret; 3425 } 3426 3427 ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; 3428 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF); 3429 if (ret) { 3430 ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n", 3431 ret); 3432 return ret; 3433 } 3434 3435 ring_id = dp->rxdma_err_dst_ring.ring_id; 3436 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_DST); 3437 if (ret) { 3438 ath11k_warn(ab, "failed to configure rxdma_err_dest_ring %d\n", 3439 ret); 3440 return ret; 3441 } 3442 3443 ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id; 3444 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, 3445 mac_id, HAL_RXDMA_MONITOR_BUF); 3446 if (ret) { 3447 ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n", 3448 ret); 3449 return ret; 3450 } 3451 ret = ath11k_dp_tx_htt_srng_setup(ab, 3452 dp->rxdma_mon_dst_ring.ring_id, 3453 mac_id, HAL_RXDMA_MONITOR_DST); 3454 if (ret) { 3455 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n", 3456 ret); 3457 return ret; 3458 } 3459 ret = ath11k_dp_tx_htt_srng_setup(ab, 3460 dp->rxdma_mon_desc_ring.ring_id, 3461 mac_id, HAL_RXDMA_MONITOR_DESC); 3462 if (ret) { 3463 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n", 3464 ret); 3465 return ret; 3466 } 3467 ring_id = dp->rx_mon_status_refill_ring.refill_buf_ring.ring_id; 3468 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, 3469 HAL_RXDMA_MONITOR_STATUS); 3470 if (ret) { 3471 ath11k_warn(ab, 3472 "failed to configure mon_status_refill_ring %d\n", 3473 ret); 3474 return ret; 3475 } 3476 return 0; 3477 } 3478 3479 static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len) 3480 { 3481 if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) { 3482 *frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc); 3483 *total_len -= *frag_len; 3484 } else { 3485 *frag_len = *total_len; 3486 *total_len = 0; 3487 } 3488 } 3489 3490 static 3491 int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar, 3492 void *p_last_buf_addr_info, 3493 u8 mac_id) 3494 { 3495 struct ath11k_pdev_dp *dp = &ar->dp; 3496 struct dp_srng *dp_srng; 3497 void *hal_srng; 3498 void *src_srng_desc; 3499 int ret = 0; 3500 3501 dp_srng = &dp->rxdma_mon_desc_ring; 3502 hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id]; 3503 3504 ath11k_hal_srng_access_begin(ar->ab, hal_srng); 3505 3506 src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng); 3507 3508 if (src_srng_desc) { 3509 struct ath11k_buffer_addr *src_desc = 3510 (struct ath11k_buffer_addr *)src_srng_desc; 3511 3512 *src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info); 3513 } else { 3514 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 3515 "Monitor Link Desc Ring %d Full", mac_id); 3516 ret = -ENOMEM; 3517 } 3518 3519 ath11k_hal_srng_access_end(ar->ab, hal_srng); 3520 return ret; 3521 } 3522 3523 static 3524 void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc, 3525 dma_addr_t *paddr, u32 *sw_cookie, 3526 void **pp_buf_addr_info) 3527 { 3528 struct hal_rx_msdu_link *msdu_link = 3529 (struct hal_rx_msdu_link *)rx_msdu_link_desc; 3530 struct ath11k_buffer_addr *buf_addr_info; 3531 u8 rbm = 0; 3532 3533 buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info; 3534 3535 ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, &rbm); 3536 3537 *pp_buf_addr_info = (void *)buf_addr_info; 3538 } 3539 3540 static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len) 3541 { 3542 if (skb->len > len) { 3543 skb_trim(skb, len); 3544 } else { 3545 if (skb_tailroom(skb) < len - skb->len) { 3546 if ((pskb_expand_head(skb, 0, 3547 len - skb->len - skb_tailroom(skb), 3548 GFP_ATOMIC))) { 3549 dev_kfree_skb_any(skb); 3550 return -ENOMEM; 3551 } 3552 } 3553 skb_put(skb, (len - skb->len)); 3554 } 3555 return 0; 3556 } 3557 3558 static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar, 3559 void *msdu_link_desc, 3560 struct hal_rx_msdu_list *msdu_list, 3561 u16 *num_msdus) 3562 { 3563 struct hal_rx_msdu_details *msdu_details = NULL; 3564 struct rx_msdu_desc *msdu_desc_info = NULL; 3565 struct hal_rx_msdu_link *msdu_link = NULL; 3566 int i; 3567 u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1); 3568 u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1); 3569 u8 tmp = 0; 3570 3571 msdu_link = (struct hal_rx_msdu_link *)msdu_link_desc; 3572 msdu_details = &msdu_link->msdu_link[0]; 3573 3574 for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) { 3575 if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR, 3576 msdu_details[i].buf_addr_info.info0) == 0) { 3577 msdu_desc_info = &msdu_details[i - 1].rx_msdu_info; 3578 msdu_desc_info->info0 |= last; 3579 ; 3580 break; 3581 } 3582 msdu_desc_info = &msdu_details[i].rx_msdu_info; 3583 3584 if (!i) 3585 msdu_desc_info->info0 |= first; 3586 else if (i == (HAL_RX_NUM_MSDU_DESC - 1)) 3587 msdu_desc_info->info0 |= last; 3588 msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0; 3589 msdu_list->msdu_info[i].msdu_len = 3590 HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0); 3591 msdu_list->sw_cookie[i] = 3592 FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, 3593 msdu_details[i].buf_addr_info.info1); 3594 tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR, 3595 msdu_details[i].buf_addr_info.info1); 3596 msdu_list->rbm[i] = tmp; 3597 } 3598 *num_msdus = i; 3599 } 3600 3601 static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id, 3602 u32 *rx_bufs_used) 3603 { 3604 u32 ret = 0; 3605 3606 if ((*ppdu_id < msdu_ppdu_id) && 3607 ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) { 3608 *ppdu_id = msdu_ppdu_id; 3609 ret = msdu_ppdu_id; 3610 } else if ((*ppdu_id > msdu_ppdu_id) && 3611 ((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) { 3612 /* mon_dst is behind than mon_status 3613 * skip dst_ring and free it 3614 */ 3615 *rx_bufs_used += 1; 3616 *ppdu_id = msdu_ppdu_id; 3617 ret = msdu_ppdu_id; 3618 } 3619 return ret; 3620 } 3621 3622 static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info, 3623 bool *is_frag, u32 *total_len, 3624 u32 *frag_len, u32 *msdu_cnt) 3625 { 3626 if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) { 3627 if (!*is_frag) { 3628 *total_len = info->msdu_len; 3629 *is_frag = true; 3630 } 3631 ath11k_dp_mon_set_frag_len(total_len, 3632 frag_len); 3633 } else { 3634 if (*is_frag) { 3635 ath11k_dp_mon_set_frag_len(total_len, 3636 frag_len); 3637 } else { 3638 *frag_len = info->msdu_len; 3639 } 3640 *is_frag = false; 3641 *msdu_cnt -= 1; 3642 } 3643 } 3644 3645 static u32 3646 ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, 3647 void *ring_entry, struct sk_buff **head_msdu, 3648 struct sk_buff **tail_msdu, u32 *npackets, 3649 u32 *ppdu_id) 3650 { 3651 struct ath11k_pdev_dp *dp = &ar->dp; 3652 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 3653 struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring; 3654 struct sk_buff *msdu = NULL, *last = NULL; 3655 struct hal_rx_msdu_list msdu_list; 3656 void *p_buf_addr_info, *p_last_buf_addr_info; 3657 struct hal_rx_desc *rx_desc; 3658 void *rx_msdu_link_desc; 3659 dma_addr_t paddr; 3660 u16 num_msdus = 0; 3661 u32 rx_buf_size, rx_pkt_offset, sw_cookie; 3662 u32 rx_bufs_used = 0, i = 0; 3663 u32 msdu_ppdu_id = 0, msdu_cnt = 0; 3664 u32 total_len = 0, frag_len = 0; 3665 bool is_frag, is_first_msdu; 3666 bool drop_mpdu = false; 3667 struct ath11k_skb_rxcb *rxcb; 3668 struct hal_reo_entrance_ring *ent_desc = 3669 (struct hal_reo_entrance_ring *)ring_entry; 3670 int buf_id; 3671 3672 ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr, 3673 &sw_cookie, &p_last_buf_addr_info, 3674 &msdu_cnt); 3675 3676 if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON, 3677 ent_desc->info1) == 3678 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { 3679 u8 rxdma_err = 3680 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE, 3681 ent_desc->info1); 3682 if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR || 3683 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR || 3684 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) { 3685 drop_mpdu = true; 3686 pmon->rx_mon_stats.dest_mpdu_drop++; 3687 } 3688 } 3689 3690 is_frag = false; 3691 is_first_msdu = true; 3692 3693 do { 3694 if (pmon->mon_last_linkdesc_paddr == paddr) { 3695 pmon->rx_mon_stats.dup_mon_linkdesc_cnt++; 3696 return rx_bufs_used; 3697 } 3698 3699 rx_msdu_link_desc = 3700 (void *)pmon->link_desc_banks[sw_cookie].vaddr + 3701 (paddr - pmon->link_desc_banks[sw_cookie].paddr); 3702 3703 ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list, 3704 &num_msdus); 3705 3706 for (i = 0; i < num_msdus; i++) { 3707 u32 l2_hdr_offset; 3708 3709 if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) { 3710 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 3711 "i %d last_cookie %d is same\n", 3712 i, pmon->mon_last_buf_cookie); 3713 drop_mpdu = true; 3714 pmon->rx_mon_stats.dup_mon_buf_cnt++; 3715 continue; 3716 } 3717 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 3718 msdu_list.sw_cookie[i]); 3719 3720 spin_lock_bh(&rx_ring->idr_lock); 3721 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 3722 spin_unlock_bh(&rx_ring->idr_lock); 3723 if (!msdu) { 3724 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 3725 "msdu_pop: invalid buf_id %d\n", buf_id); 3726 break; 3727 } 3728 rxcb = ATH11K_SKB_RXCB(msdu); 3729 if (!rxcb->unmapped) { 3730 dma_unmap_single(ar->ab->dev, rxcb->paddr, 3731 msdu->len + 3732 skb_tailroom(msdu), 3733 DMA_FROM_DEVICE); 3734 rxcb->unmapped = 1; 3735 } 3736 if (drop_mpdu) { 3737 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 3738 "i %d drop msdu %p *ppdu_id %x\n", 3739 i, msdu, *ppdu_id); 3740 dev_kfree_skb_any(msdu); 3741 msdu = NULL; 3742 goto next_msdu; 3743 } 3744 3745 rx_desc = (struct hal_rx_desc *)msdu->data; 3746 3747 rx_pkt_offset = sizeof(struct hal_rx_desc); 3748 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(rx_desc); 3749 3750 if (is_first_msdu) { 3751 if (!ath11k_dp_rxdesc_mpdu_valid(rx_desc)) { 3752 drop_mpdu = true; 3753 dev_kfree_skb_any(msdu); 3754 msdu = NULL; 3755 pmon->mon_last_linkdesc_paddr = paddr; 3756 goto next_msdu; 3757 } 3758 3759 msdu_ppdu_id = 3760 ath11k_dp_rxdesc_get_ppduid(rx_desc); 3761 3762 if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id, 3763 ppdu_id, 3764 &rx_bufs_used)) { 3765 if (rx_bufs_used) { 3766 drop_mpdu = true; 3767 dev_kfree_skb_any(msdu); 3768 msdu = NULL; 3769 goto next_msdu; 3770 } 3771 return rx_bufs_used; 3772 } 3773 pmon->mon_last_linkdesc_paddr = paddr; 3774 is_first_msdu = false; 3775 } 3776 ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i], 3777 &is_frag, &total_len, 3778 &frag_len, &msdu_cnt); 3779 rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len; 3780 3781 ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size); 3782 3783 if (!(*head_msdu)) 3784 *head_msdu = msdu; 3785 else if (last) 3786 last->next = msdu; 3787 3788 last = msdu; 3789 next_msdu: 3790 pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i]; 3791 rx_bufs_used++; 3792 spin_lock_bh(&rx_ring->idr_lock); 3793 idr_remove(&rx_ring->bufs_idr, buf_id); 3794 spin_unlock_bh(&rx_ring->idr_lock); 3795 } 3796 3797 ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr, 3798 &sw_cookie, 3799 &p_buf_addr_info); 3800 3801 if (ath11k_dp_rx_monitor_link_desc_return(ar, 3802 p_last_buf_addr_info, 3803 dp->mac_id)) 3804 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 3805 "dp_rx_monitor_link_desc_return failed"); 3806 3807 p_last_buf_addr_info = p_buf_addr_info; 3808 3809 } while (paddr && msdu_cnt); 3810 3811 if (last) 3812 last->next = NULL; 3813 3814 *tail_msdu = msdu; 3815 3816 if (msdu_cnt == 0) 3817 *npackets = 1; 3818 3819 return rx_bufs_used; 3820 } 3821 3822 static void ath11k_dp_rx_msdus_set_payload(struct sk_buff *msdu) 3823 { 3824 u32 rx_pkt_offset, l2_hdr_offset; 3825 3826 rx_pkt_offset = sizeof(struct hal_rx_desc); 3827 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad((struct hal_rx_desc *)msdu->data); 3828 skb_pull(msdu, rx_pkt_offset + l2_hdr_offset); 3829 } 3830 3831 static struct sk_buff * 3832 ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, 3833 u32 mac_id, struct sk_buff *head_msdu, 3834 struct sk_buff *last_msdu, 3835 struct ieee80211_rx_status *rxs) 3836 { 3837 struct sk_buff *msdu, *mpdu_buf, *prev_buf; 3838 u32 decap_format, wifi_hdr_len; 3839 struct hal_rx_desc *rx_desc; 3840 char *hdr_desc; 3841 u8 *dest; 3842 struct ieee80211_hdr_3addr *wh; 3843 3844 mpdu_buf = NULL; 3845 3846 if (!head_msdu) 3847 goto err_merge_fail; 3848 3849 rx_desc = (struct hal_rx_desc *)head_msdu->data; 3850 3851 if (ath11k_dp_rxdesc_get_mpdulen_err(rx_desc)) 3852 return NULL; 3853 3854 decap_format = ath11k_dp_rxdesc_get_decap_format(rx_desc); 3855 3856 ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs); 3857 3858 if (decap_format == DP_RX_DECAP_TYPE_RAW) { 3859 ath11k_dp_rx_msdus_set_payload(head_msdu); 3860 3861 prev_buf = head_msdu; 3862 msdu = head_msdu->next; 3863 3864 while (msdu) { 3865 ath11k_dp_rx_msdus_set_payload(msdu); 3866 3867 prev_buf = msdu; 3868 msdu = msdu->next; 3869 } 3870 3871 prev_buf->next = NULL; 3872 3873 skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN); 3874 } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) { 3875 __le16 qos_field; 3876 u8 qos_pkt = 0; 3877 3878 rx_desc = (struct hal_rx_desc *)head_msdu->data; 3879 hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc); 3880 3881 /* Base size */ 3882 wifi_hdr_len = sizeof(struct ieee80211_hdr_3addr); 3883 wh = (struct ieee80211_hdr_3addr *)hdr_desc; 3884 3885 if (ieee80211_is_data_qos(wh->frame_control)) { 3886 struct ieee80211_qos_hdr *qwh = 3887 (struct ieee80211_qos_hdr *)hdr_desc; 3888 3889 qos_field = qwh->qos_ctrl; 3890 qos_pkt = 1; 3891 } 3892 msdu = head_msdu; 3893 3894 while (msdu) { 3895 rx_desc = (struct hal_rx_desc *)msdu->data; 3896 hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc); 3897 3898 if (qos_pkt) { 3899 dest = skb_push(msdu, sizeof(__le16)); 3900 if (!dest) 3901 goto err_merge_fail; 3902 memcpy(dest, hdr_desc, wifi_hdr_len); 3903 memcpy(dest + wifi_hdr_len, 3904 (u8 *)&qos_field, sizeof(__le16)); 3905 } 3906 ath11k_dp_rx_msdus_set_payload(msdu); 3907 prev_buf = msdu; 3908 msdu = msdu->next; 3909 } 3910 dest = skb_put(prev_buf, HAL_RX_FCS_LEN); 3911 if (!dest) 3912 goto err_merge_fail; 3913 3914 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 3915 "mpdu_buf %pK mpdu_buf->len %u", 3916 prev_buf, prev_buf->len); 3917 } else { 3918 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 3919 "decap format %d is not supported!\n", 3920 decap_format); 3921 goto err_merge_fail; 3922 } 3923 3924 return head_msdu; 3925 3926 err_merge_fail: 3927 if (mpdu_buf && decap_format != DP_RX_DECAP_TYPE_RAW) { 3928 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 3929 "err_merge_fail mpdu_buf %pK", mpdu_buf); 3930 /* Free the head buffer */ 3931 dev_kfree_skb_any(mpdu_buf); 3932 } 3933 return NULL; 3934 } 3935 3936 static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id, 3937 struct sk_buff *head_msdu, 3938 struct sk_buff *tail_msdu, 3939 struct napi_struct *napi) 3940 { 3941 struct ath11k_pdev_dp *dp = &ar->dp; 3942 struct sk_buff *mon_skb, *skb_next, *header; 3943 struct ieee80211_rx_status *rxs = &dp->rx_status, *status; 3944 3945 mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu, 3946 tail_msdu, rxs); 3947 3948 if (!mon_skb) 3949 goto mon_deliver_fail; 3950 3951 header = mon_skb; 3952 3953 rxs->flag = 0; 3954 do { 3955 skb_next = mon_skb->next; 3956 if (!skb_next) 3957 rxs->flag &= ~RX_FLAG_AMSDU_MORE; 3958 else 3959 rxs->flag |= RX_FLAG_AMSDU_MORE; 3960 3961 if (mon_skb == header) { 3962 header = NULL; 3963 rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN; 3964 } else { 3965 rxs->flag |= RX_FLAG_ALLOW_SAME_PN; 3966 } 3967 rxs->flag |= RX_FLAG_ONLY_MONITOR; 3968 3969 status = IEEE80211_SKB_RXCB(mon_skb); 3970 *status = *rxs; 3971 3972 ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb); 3973 mon_skb = skb_next; 3974 } while (mon_skb); 3975 rxs->flag = 0; 3976 3977 return 0; 3978 3979 mon_deliver_fail: 3980 mon_skb = head_msdu; 3981 while (mon_skb) { 3982 skb_next = mon_skb->next; 3983 dev_kfree_skb_any(mon_skb); 3984 mon_skb = skb_next; 3985 } 3986 return -EINVAL; 3987 } 3988 3989 static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, u32 quota, 3990 struct napi_struct *napi) 3991 { 3992 struct ath11k_pdev_dp *dp = &ar->dp; 3993 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 3994 void *ring_entry; 3995 void *mon_dst_srng; 3996 u32 ppdu_id; 3997 u32 rx_bufs_used; 3998 struct ath11k_pdev_mon_stats *rx_mon_stats; 3999 u32 npackets = 0; 4000 4001 mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id]; 4002 4003 if (!mon_dst_srng) { 4004 ath11k_warn(ar->ab, 4005 "HAL Monitor Destination Ring Init Failed -- %pK", 4006 mon_dst_srng); 4007 return; 4008 } 4009 4010 spin_lock_bh(&pmon->mon_lock); 4011 4012 ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng); 4013 4014 ppdu_id = pmon->mon_ppdu_info.ppdu_id; 4015 rx_bufs_used = 0; 4016 rx_mon_stats = &pmon->rx_mon_stats; 4017 4018 while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) { 4019 struct sk_buff *head_msdu, *tail_msdu; 4020 4021 head_msdu = NULL; 4022 tail_msdu = NULL; 4023 4024 rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, ring_entry, 4025 &head_msdu, 4026 &tail_msdu, 4027 &npackets, &ppdu_id); 4028 4029 if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) { 4030 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 4031 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4032 "dest_rx: new ppdu_id %x != status ppdu_id %x", 4033 ppdu_id, pmon->mon_ppdu_info.ppdu_id); 4034 break; 4035 } 4036 if (head_msdu && tail_msdu) { 4037 ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu, 4038 tail_msdu, napi); 4039 rx_mon_stats->dest_mpdu_done++; 4040 } 4041 4042 ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab, 4043 mon_dst_srng); 4044 } 4045 ath11k_hal_srng_access_end(ar->ab, mon_dst_srng); 4046 4047 spin_unlock_bh(&pmon->mon_lock); 4048 4049 if (rx_bufs_used) { 4050 rx_mon_stats->dest_ppdu_done++; 4051 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, 4052 &dp->rxdma_mon_buf_ring, 4053 rx_bufs_used, 4054 HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC); 4055 } 4056 } 4057 4058 static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar, 4059 u32 quota, 4060 struct napi_struct *napi) 4061 { 4062 struct ath11k_pdev_dp *dp = &ar->dp; 4063 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4064 struct hal_rx_mon_ppdu_info *ppdu_info; 4065 struct sk_buff *status_skb; 4066 u32 tlv_status = HAL_TLV_STATUS_BUF_DONE; 4067 struct ath11k_pdev_mon_stats *rx_mon_stats; 4068 4069 ppdu_info = &pmon->mon_ppdu_info; 4070 rx_mon_stats = &pmon->rx_mon_stats; 4071 4072 if (pmon->mon_ppdu_status != DP_PPDU_STATUS_START) 4073 return; 4074 4075 while (!skb_queue_empty(&pmon->rx_status_q)) { 4076 status_skb = skb_dequeue(&pmon->rx_status_q); 4077 4078 tlv_status = ath11k_hal_rx_parse_mon_status(ar->ab, ppdu_info, 4079 status_skb); 4080 if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) { 4081 rx_mon_stats->status_ppdu_done++; 4082 pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE; 4083 ath11k_dp_rx_mon_dest_process(ar, quota, napi); 4084 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 4085 } 4086 dev_kfree_skb_any(status_skb); 4087 } 4088 } 4089 4090 static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id, 4091 struct napi_struct *napi, int budget) 4092 { 4093 struct ath11k *ar = ab->pdevs[mac_id].ar; 4094 struct ath11k_pdev_dp *dp = &ar->dp; 4095 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4096 int num_buffs_reaped = 0; 4097 4098 num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, dp->mac_id, &budget, 4099 &pmon->rx_status_q); 4100 if (num_buffs_reaped) 4101 ath11k_dp_rx_mon_status_process_tlv(ar, budget, napi); 4102 4103 return num_buffs_reaped; 4104 } 4105 4106 int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id, 4107 struct napi_struct *napi, int budget) 4108 { 4109 struct ath11k *ar = ab->pdevs[mac_id].ar; 4110 int ret = 0; 4111 4112 if (test_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags)) 4113 ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget); 4114 else 4115 ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget); 4116 return ret; 4117 } 4118 4119 static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar) 4120 { 4121 struct ath11k_pdev_dp *dp = &ar->dp; 4122 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4123 4124 skb_queue_head_init(&pmon->rx_status_q); 4125 4126 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 4127 4128 memset(&pmon->rx_mon_stats, 0, 4129 sizeof(pmon->rx_mon_stats)); 4130 return 0; 4131 } 4132 4133 int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar) 4134 { 4135 struct ath11k_pdev_dp *dp = &ar->dp; 4136 struct ath11k_mon_data *pmon = &dp->mon_data; 4137 struct hal_srng *mon_desc_srng = NULL; 4138 struct dp_srng *dp_srng; 4139 int ret = 0; 4140 u32 n_link_desc = 0; 4141 4142 ret = ath11k_dp_rx_pdev_mon_status_attach(ar); 4143 if (ret) { 4144 ath11k_warn(ar->ab, "pdev_mon_status_attach() failed"); 4145 return ret; 4146 } 4147 4148 dp_srng = &dp->rxdma_mon_desc_ring; 4149 n_link_desc = dp_srng->size / 4150 ath11k_hal_srng_get_entrysize(HAL_RXDMA_MONITOR_DESC); 4151 mon_desc_srng = 4152 &ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id]; 4153 4154 ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks, 4155 HAL_RXDMA_MONITOR_DESC, mon_desc_srng, 4156 n_link_desc); 4157 if (ret) { 4158 ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed"); 4159 return ret; 4160 } 4161 pmon->mon_last_linkdesc_paddr = 0; 4162 pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1; 4163 spin_lock_init(&pmon->mon_lock); 4164 return 0; 4165 } 4166 4167 static int ath11k_dp_mon_link_free(struct ath11k *ar) 4168 { 4169 struct ath11k_pdev_dp *dp = &ar->dp; 4170 struct ath11k_mon_data *pmon = &dp->mon_data; 4171 4172 ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks, 4173 HAL_RXDMA_MONITOR_DESC, 4174 &dp->rxdma_mon_desc_ring); 4175 return 0; 4176 } 4177 4178 int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar) 4179 { 4180 ath11k_dp_mon_link_free(ar); 4181 return 0; 4182 } 4183