1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (c) 2005-2011 Atheros Communications Inc. 4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 5 * Copyright (c) 2018, The Linux Foundation. All rights reserved. 6 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. 7 * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. 8 */ 9 10 #include <linux/export.h> 11 12 #include "core.h" 13 #include "htc.h" 14 #include "htt.h" 15 #include "txrx.h" 16 #include "debug.h" 17 #include "trace.h" 18 #include "mac.h" 19 20 #include <linux/log2.h> 21 #include <linux/bitfield.h> 22 23 /* when under memory pressure rx ring refill may fail and needs a retry */ 24 #define HTT_RX_RING_REFILL_RETRY_MS 50 25 26 #define HTT_RX_RING_REFILL_RESCHED_MS 5 27 28 /* shortcut to interpret a raw memory buffer as a rx descriptor */ 29 #define HTT_RX_BUF_TO_RX_DESC(hw, buf) ath10k_htt_rx_desc_from_raw_buffer(hw, buf) 30 31 static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb); 32 33 static struct sk_buff * 34 ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr) 35 { 36 struct ath10k_skb_rxcb *rxcb; 37 38 hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr) 39 if (rxcb->paddr == paddr) 40 return ATH10K_RXCB_SKB(rxcb); 41 42 WARN_ON_ONCE(1); 43 return NULL; 44 } 45 46 static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt) 47 { 48 struct sk_buff *skb; 49 struct ath10k_skb_rxcb *rxcb; 50 struct hlist_node *n; 51 int i; 52 53 if (htt->rx_ring.in_ord_rx) { 54 hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) { 55 skb = ATH10K_RXCB_SKB(rxcb); 56 dma_unmap_single(htt->ar->dev, rxcb->paddr, 57 skb->len + skb_tailroom(skb), 58 DMA_FROM_DEVICE); 59 hash_del(&rxcb->hlist); 60 dev_kfree_skb_any(skb); 61 } 62 } else { 63 for (i = 0; i < htt->rx_ring.size; i++) { 64 skb = htt->rx_ring.netbufs_ring[i]; 65 if (!skb) 66 continue; 67 68 rxcb = ATH10K_SKB_RXCB(skb); 69 dma_unmap_single(htt->ar->dev, rxcb->paddr, 70 skb->len + skb_tailroom(skb), 71 DMA_FROM_DEVICE); 72 dev_kfree_skb_any(skb); 73 } 74 } 75 76 htt->rx_ring.fill_cnt = 0; 77 hash_init(htt->rx_ring.skb_table); 78 memset(htt->rx_ring.netbufs_ring, 0, 79 htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0])); 80 } 81 82 static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt) 83 { 84 return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32); 85 } 86 87 static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt) 88 { 89 return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64); 90 } 91 92 static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt, 93 void *vaddr) 94 { 95 htt->rx_ring.paddrs_ring_32 = vaddr; 96 } 97 98 static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt, 99 void *vaddr) 100 { 101 htt->rx_ring.paddrs_ring_64 = vaddr; 102 } 103 104 static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt, 105 dma_addr_t paddr, int idx) 106 { 107 htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr); 108 } 109 110 static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt, 111 dma_addr_t paddr, int idx) 112 { 113 htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr); 114 } 115 116 static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx) 117 { 118 htt->rx_ring.paddrs_ring_32[idx] = 0; 119 } 120 121 static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx) 122 { 123 htt->rx_ring.paddrs_ring_64[idx] = 0; 124 } 125 126 static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt) 127 { 128 return (void *)htt->rx_ring.paddrs_ring_32; 129 } 130 131 static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt) 132 { 133 return (void *)htt->rx_ring.paddrs_ring_64; 134 } 135 136 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) 137 { 138 struct ath10k_hw_params *hw = &htt->ar->hw_params; 139 struct htt_rx_desc *rx_desc; 140 struct ath10k_skb_rxcb *rxcb; 141 struct sk_buff *skb; 142 dma_addr_t paddr; 143 int ret = 0, idx; 144 145 /* The Full Rx Reorder firmware has no way of telling the host 146 * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring. 147 * To keep things simple make sure ring is always half empty. This 148 * guarantees there'll be no replenishment overruns possible. 149 */ 150 BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2); 151 152 idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); 153 154 if (idx < 0 || idx >= htt->rx_ring.size) { 155 ath10k_err(htt->ar, "rx ring index is not valid, firmware malfunctioning?\n"); 156 idx &= htt->rx_ring.size_mask; 157 ret = -ENOMEM; 158 goto fail; 159 } 160 161 while (num > 0) { 162 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN); 163 if (!skb) { 164 ret = -ENOMEM; 165 goto fail; 166 } 167 168 if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN)) 169 skb_pull(skb, 170 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) - 171 skb->data); 172 173 /* Clear rx_desc attention word before posting to Rx ring */ 174 rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, skb->data); 175 ath10k_htt_rx_desc_get_attention(hw, rx_desc)->flags = __cpu_to_le32(0); 176 177 paddr = dma_map_single(htt->ar->dev, skb->data, 178 skb->len + skb_tailroom(skb), 179 DMA_FROM_DEVICE); 180 181 if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) { 182 dev_kfree_skb_any(skb); 183 ret = -ENOMEM; 184 goto fail; 185 } 186 187 rxcb = ATH10K_SKB_RXCB(skb); 188 rxcb->paddr = paddr; 189 htt->rx_ring.netbufs_ring[idx] = skb; 190 ath10k_htt_set_paddrs_ring(htt, paddr, idx); 191 htt->rx_ring.fill_cnt++; 192 193 if (htt->rx_ring.in_ord_rx) { 194 hash_add(htt->rx_ring.skb_table, 195 &ATH10K_SKB_RXCB(skb)->hlist, 196 paddr); 197 } 198 199 num--; 200 idx++; 201 idx &= htt->rx_ring.size_mask; 202 } 203 204 fail: 205 /* 206 * Make sure the rx buffer is updated before available buffer 207 * index to avoid any potential rx ring corruption. 208 */ 209 mb(); 210 *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx); 211 return ret; 212 } 213 214 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) 215 { 216 lockdep_assert_held(&htt->rx_ring.lock); 217 return __ath10k_htt_rx_ring_fill_n(htt, num); 218 } 219 220 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt) 221 { 222 int ret, num_deficit, num_to_fill; 223 224 /* Refilling the whole RX ring buffer proves to be a bad idea. The 225 * reason is RX may take up significant amount of CPU cycles and starve 226 * other tasks, e.g. TX on an ethernet device while acting as a bridge 227 * with ath10k wlan interface. This ended up with very poor performance 228 * once CPU the host system was overwhelmed with RX on ath10k. 229 * 230 * By limiting the number of refills the replenishing occurs 231 * progressively. This in turns makes use of the fact tasklets are 232 * processed in FIFO order. This means actual RX processing can starve 233 * out refilling. If there's not enough buffers on RX ring FW will not 234 * report RX until it is refilled with enough buffers. This 235 * automatically balances load wrt to CPU power. 236 * 237 * This probably comes at a cost of lower maximum throughput but 238 * improves the average and stability. 239 */ 240 spin_lock_bh(&htt->rx_ring.lock); 241 num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt; 242 num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit); 243 num_deficit -= num_to_fill; 244 ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill); 245 if (ret == -ENOMEM) { 246 /* 247 * Failed to fill it to the desired level - 248 * we'll start a timer and try again next time. 249 * As long as enough buffers are left in the ring for 250 * another A-MPDU rx, no special recovery is needed. 251 */ 252 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + 253 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS)); 254 } else if (num_deficit > 0) { 255 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + 256 msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS)); 257 } 258 spin_unlock_bh(&htt->rx_ring.lock); 259 } 260 261 static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t) 262 { 263 struct ath10k_htt *htt = timer_container_of(htt, t, 264 rx_ring.refill_retry_timer); 265 266 ath10k_htt_rx_msdu_buff_replenish(htt); 267 } 268 269 int ath10k_htt_rx_ring_refill(struct ath10k *ar) 270 { 271 struct ath10k_htt *htt = &ar->htt; 272 int ret; 273 274 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) 275 return 0; 276 277 spin_lock_bh(&htt->rx_ring.lock); 278 ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level - 279 htt->rx_ring.fill_cnt)); 280 281 if (ret) 282 ath10k_htt_rx_ring_free(htt); 283 284 spin_unlock_bh(&htt->rx_ring.lock); 285 286 return ret; 287 } 288 289 void ath10k_htt_rx_free(struct ath10k_htt *htt) 290 { 291 if (htt->ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) 292 return; 293 294 timer_delete_sync(&htt->rx_ring.refill_retry_timer); 295 296 skb_queue_purge(&htt->rx_msdus_q); 297 skb_queue_purge(&htt->rx_in_ord_compl_q); 298 skb_queue_purge(&htt->tx_fetch_ind_q); 299 300 spin_lock_bh(&htt->rx_ring.lock); 301 ath10k_htt_rx_ring_free(htt); 302 spin_unlock_bh(&htt->rx_ring.lock); 303 304 dma_free_coherent(htt->ar->dev, 305 ath10k_htt_get_rx_ring_size(htt), 306 ath10k_htt_get_vaddr_ring(htt), 307 htt->rx_ring.base_paddr); 308 309 ath10k_htt_config_paddrs_ring(htt, NULL); 310 311 dma_free_coherent(htt->ar->dev, 312 sizeof(*htt->rx_ring.alloc_idx.vaddr), 313 htt->rx_ring.alloc_idx.vaddr, 314 htt->rx_ring.alloc_idx.paddr); 315 htt->rx_ring.alloc_idx.vaddr = NULL; 316 317 kfree(htt->rx_ring.netbufs_ring); 318 htt->rx_ring.netbufs_ring = NULL; 319 } 320 321 static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt) 322 { 323 struct ath10k *ar = htt->ar; 324 int idx; 325 struct sk_buff *msdu; 326 327 lockdep_assert_held(&htt->rx_ring.lock); 328 329 if (htt->rx_ring.fill_cnt == 0) { 330 ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n"); 331 return NULL; 332 } 333 334 idx = htt->rx_ring.sw_rd_idx.msdu_payld; 335 msdu = htt->rx_ring.netbufs_ring[idx]; 336 htt->rx_ring.netbufs_ring[idx] = NULL; 337 ath10k_htt_reset_paddrs_ring(htt, idx); 338 339 idx++; 340 idx &= htt->rx_ring.size_mask; 341 htt->rx_ring.sw_rd_idx.msdu_payld = idx; 342 htt->rx_ring.fill_cnt--; 343 344 dma_unmap_single(htt->ar->dev, 345 ATH10K_SKB_RXCB(msdu)->paddr, 346 msdu->len + skb_tailroom(msdu), 347 DMA_FROM_DEVICE); 348 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ", 349 msdu->data, msdu->len + skb_tailroom(msdu)); 350 351 return msdu; 352 } 353 354 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */ 355 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, 356 struct sk_buff_head *amsdu) 357 { 358 struct ath10k *ar = htt->ar; 359 struct ath10k_hw_params *hw = &ar->hw_params; 360 int msdu_len, msdu_chaining = 0; 361 struct sk_buff *msdu; 362 struct htt_rx_desc *rx_desc; 363 struct rx_attention *rx_desc_attention; 364 struct rx_frag_info_common *rx_desc_frag_info_common; 365 struct rx_msdu_start_common *rx_desc_msdu_start_common; 366 struct rx_msdu_end_common *rx_desc_msdu_end_common; 367 368 lockdep_assert_held(&htt->rx_ring.lock); 369 370 for (;;) { 371 int last_msdu, msdu_len_invalid, msdu_chained; 372 373 msdu = ath10k_htt_rx_netbuf_pop(htt); 374 if (!msdu) { 375 __skb_queue_purge(amsdu); 376 return -ENOENT; 377 } 378 379 __skb_queue_tail(amsdu, msdu); 380 381 rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data); 382 rx_desc_attention = ath10k_htt_rx_desc_get_attention(hw, rx_desc); 383 rx_desc_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, 384 rx_desc); 385 rx_desc_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rx_desc); 386 rx_desc_frag_info_common = ath10k_htt_rx_desc_get_frag_info(hw, rx_desc); 387 388 /* FIXME: we must report msdu payload since this is what caller 389 * expects now 390 */ 391 skb_put(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset); 392 skb_pull(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset); 393 394 /* 395 * Sanity check - confirm the HW is finished filling in the 396 * rx data. 397 * If the HW and SW are working correctly, then it's guaranteed 398 * that the HW's MAC DMA is done before this point in the SW. 399 * To prevent the case that we handle a stale Rx descriptor, 400 * just assert for now until we have a way to recover. 401 */ 402 if (!(__le32_to_cpu(rx_desc_attention->flags) 403 & RX_ATTENTION_FLAGS_MSDU_DONE)) { 404 __skb_queue_purge(amsdu); 405 return -EIO; 406 } 407 408 msdu_len_invalid = !!(__le32_to_cpu(rx_desc_attention->flags) 409 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR | 410 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR)); 411 msdu_len = MS(__le32_to_cpu(rx_desc_msdu_start_common->info0), 412 RX_MSDU_START_INFO0_MSDU_LENGTH); 413 msdu_chained = rx_desc_frag_info_common->ring2_more_count; 414 415 if (msdu_len_invalid) 416 msdu_len = 0; 417 418 skb_trim(msdu, 0); 419 skb_put(msdu, min(msdu_len, ath10k_htt_rx_msdu_size(hw))); 420 msdu_len -= msdu->len; 421 422 /* Note: Chained buffers do not contain rx descriptor */ 423 while (msdu_chained--) { 424 msdu = ath10k_htt_rx_netbuf_pop(htt); 425 if (!msdu) { 426 __skb_queue_purge(amsdu); 427 return -ENOENT; 428 } 429 430 __skb_queue_tail(amsdu, msdu); 431 skb_trim(msdu, 0); 432 skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE)); 433 msdu_len -= msdu->len; 434 msdu_chaining = 1; 435 } 436 437 last_msdu = __le32_to_cpu(rx_desc_msdu_end_common->info0) & 438 RX_MSDU_END_INFO0_LAST_MSDU; 439 440 /* FIXME: why are we skipping the first part of the rx_desc? */ 441 trace_ath10k_htt_rx_desc(ar, (void *)rx_desc + sizeof(u32), 442 hw->rx_desc_ops->rx_desc_size - sizeof(u32)); 443 444 if (last_msdu) 445 break; 446 } 447 448 if (skb_queue_empty(amsdu)) 449 msdu_chaining = -1; 450 451 /* 452 * Don't refill the ring yet. 453 * 454 * First, the elements popped here are still in use - it is not 455 * safe to overwrite them until the matching call to 456 * mpdu_desc_list_next. Second, for efficiency it is preferable to 457 * refill the rx ring with 1 PPDU's worth of rx buffers (something 458 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers 459 * (something like 3 buffers). Consequently, we'll rely on the txrx 460 * SW to tell us when it is done pulling all the PPDU's rx buffers 461 * out of the rx ring, and then refill it just once. 462 */ 463 464 return msdu_chaining; 465 } 466 467 static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt, 468 u64 paddr) 469 { 470 struct ath10k *ar = htt->ar; 471 struct ath10k_skb_rxcb *rxcb; 472 struct sk_buff *msdu; 473 474 lockdep_assert_held(&htt->rx_ring.lock); 475 476 msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr); 477 if (!msdu) 478 return NULL; 479 480 rxcb = ATH10K_SKB_RXCB(msdu); 481 hash_del(&rxcb->hlist); 482 htt->rx_ring.fill_cnt--; 483 484 dma_unmap_single(htt->ar->dev, rxcb->paddr, 485 msdu->len + skb_tailroom(msdu), 486 DMA_FROM_DEVICE); 487 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ", 488 msdu->data, msdu->len + skb_tailroom(msdu)); 489 490 return msdu; 491 } 492 493 static inline void ath10k_htt_append_frag_list(struct sk_buff *skb_head, 494 struct sk_buff *frag_list, 495 unsigned int frag_len) 496 { 497 skb_shinfo(skb_head)->frag_list = frag_list; 498 skb_head->data_len = frag_len; 499 skb_head->len += skb_head->data_len; 500 } 501 502 static int ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt *htt, 503 struct sk_buff *msdu, 504 struct htt_rx_in_ord_msdu_desc **msdu_desc) 505 { 506 struct ath10k *ar = htt->ar; 507 struct ath10k_hw_params *hw = &ar->hw_params; 508 u32 paddr; 509 struct sk_buff *frag_buf; 510 struct sk_buff *prev_frag_buf; 511 u8 last_frag; 512 struct htt_rx_in_ord_msdu_desc *ind_desc = *msdu_desc; 513 struct htt_rx_desc *rxd; 514 int amsdu_len = __le16_to_cpu(ind_desc->msdu_len); 515 516 rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data); 517 trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size); 518 519 skb_put(msdu, hw->rx_desc_ops->rx_desc_size); 520 skb_pull(msdu, hw->rx_desc_ops->rx_desc_size); 521 skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw))); 522 amsdu_len -= msdu->len; 523 524 last_frag = ind_desc->reserved; 525 if (last_frag) { 526 if (amsdu_len) { 527 ath10k_warn(ar, "invalid amsdu len %u, left %d", 528 __le16_to_cpu(ind_desc->msdu_len), 529 amsdu_len); 530 } 531 return 0; 532 } 533 534 ind_desc++; 535 paddr = __le32_to_cpu(ind_desc->msdu_paddr); 536 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr); 537 if (!frag_buf) { 538 ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%x", paddr); 539 return -ENOENT; 540 } 541 542 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE)); 543 ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len); 544 545 amsdu_len -= frag_buf->len; 546 prev_frag_buf = frag_buf; 547 last_frag = ind_desc->reserved; 548 while (!last_frag) { 549 ind_desc++; 550 paddr = __le32_to_cpu(ind_desc->msdu_paddr); 551 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr); 552 if (!frag_buf) { 553 ath10k_warn(ar, "failed to pop frag-n paddr: 0x%x", 554 paddr); 555 prev_frag_buf->next = NULL; 556 return -ENOENT; 557 } 558 559 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE)); 560 last_frag = ind_desc->reserved; 561 amsdu_len -= frag_buf->len; 562 563 prev_frag_buf->next = frag_buf; 564 prev_frag_buf = frag_buf; 565 } 566 567 if (amsdu_len) { 568 ath10k_warn(ar, "invalid amsdu len %u, left %d", 569 __le16_to_cpu(ind_desc->msdu_len), amsdu_len); 570 } 571 572 *msdu_desc = ind_desc; 573 574 prev_frag_buf->next = NULL; 575 return 0; 576 } 577 578 static int 579 ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt *htt, 580 struct sk_buff *msdu, 581 struct htt_rx_in_ord_msdu_desc_ext **msdu_desc) 582 { 583 struct ath10k *ar = htt->ar; 584 struct ath10k_hw_params *hw = &ar->hw_params; 585 u64 paddr; 586 struct sk_buff *frag_buf; 587 struct sk_buff *prev_frag_buf; 588 u8 last_frag; 589 struct htt_rx_in_ord_msdu_desc_ext *ind_desc = *msdu_desc; 590 struct htt_rx_desc *rxd; 591 int amsdu_len = __le16_to_cpu(ind_desc->msdu_len); 592 593 rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data); 594 trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size); 595 596 skb_put(msdu, hw->rx_desc_ops->rx_desc_size); 597 skb_pull(msdu, hw->rx_desc_ops->rx_desc_size); 598 skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw))); 599 amsdu_len -= msdu->len; 600 601 last_frag = ind_desc->reserved; 602 if (last_frag) { 603 if (amsdu_len) { 604 ath10k_warn(ar, "invalid amsdu len %u, left %d", 605 __le16_to_cpu(ind_desc->msdu_len), 606 amsdu_len); 607 } 608 return 0; 609 } 610 611 ind_desc++; 612 paddr = __le64_to_cpu(ind_desc->msdu_paddr); 613 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr); 614 if (!frag_buf) { 615 ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%llx", paddr); 616 return -ENOENT; 617 } 618 619 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE)); 620 ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len); 621 622 amsdu_len -= frag_buf->len; 623 prev_frag_buf = frag_buf; 624 last_frag = ind_desc->reserved; 625 while (!last_frag) { 626 ind_desc++; 627 paddr = __le64_to_cpu(ind_desc->msdu_paddr); 628 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr); 629 if (!frag_buf) { 630 ath10k_warn(ar, "failed to pop frag-n paddr: 0x%llx", 631 paddr); 632 prev_frag_buf->next = NULL; 633 return -ENOENT; 634 } 635 636 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE)); 637 last_frag = ind_desc->reserved; 638 amsdu_len -= frag_buf->len; 639 640 prev_frag_buf->next = frag_buf; 641 prev_frag_buf = frag_buf; 642 } 643 644 if (amsdu_len) { 645 ath10k_warn(ar, "invalid amsdu len %u, left %d", 646 __le16_to_cpu(ind_desc->msdu_len), amsdu_len); 647 } 648 649 *msdu_desc = ind_desc; 650 651 prev_frag_buf->next = NULL; 652 return 0; 653 } 654 655 static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt, 656 struct htt_rx_in_ord_ind *ev, 657 struct sk_buff_head *list) 658 { 659 struct ath10k *ar = htt->ar; 660 struct ath10k_hw_params *hw = &ar->hw_params; 661 struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32; 662 struct htt_rx_desc *rxd; 663 struct rx_attention *rxd_attention; 664 struct sk_buff *msdu; 665 int msdu_count, ret; 666 bool is_offload; 667 u32 paddr; 668 669 lockdep_assert_held(&htt->rx_ring.lock); 670 671 msdu_count = __le16_to_cpu(ev->msdu_count); 672 is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK); 673 674 while (msdu_count--) { 675 paddr = __le32_to_cpu(msdu_desc->msdu_paddr); 676 677 msdu = ath10k_htt_rx_pop_paddr(htt, paddr); 678 if (!msdu) { 679 __skb_queue_purge(list); 680 return -ENOENT; 681 } 682 683 if (!is_offload && ar->monitor_arvif) { 684 ret = ath10k_htt_rx_handle_amsdu_mon_32(htt, msdu, 685 &msdu_desc); 686 if (ret) { 687 __skb_queue_purge(list); 688 return ret; 689 } 690 __skb_queue_tail(list, msdu); 691 msdu_desc++; 692 continue; 693 } 694 695 __skb_queue_tail(list, msdu); 696 697 if (!is_offload) { 698 rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data); 699 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); 700 701 trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size); 702 703 skb_put(msdu, hw->rx_desc_ops->rx_desc_size); 704 skb_pull(msdu, hw->rx_desc_ops->rx_desc_size); 705 skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len)); 706 707 if (!(__le32_to_cpu(rxd_attention->flags) & 708 RX_ATTENTION_FLAGS_MSDU_DONE)) { 709 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n"); 710 return -EIO; 711 } 712 } 713 714 msdu_desc++; 715 } 716 717 return 0; 718 } 719 720 static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt, 721 struct htt_rx_in_ord_ind *ev, 722 struct sk_buff_head *list) 723 { 724 struct ath10k *ar = htt->ar; 725 struct ath10k_hw_params *hw = &ar->hw_params; 726 struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64; 727 struct htt_rx_desc *rxd; 728 struct rx_attention *rxd_attention; 729 struct sk_buff *msdu; 730 int msdu_count, ret; 731 bool is_offload; 732 u64 paddr; 733 734 lockdep_assert_held(&htt->rx_ring.lock); 735 736 msdu_count = __le16_to_cpu(ev->msdu_count); 737 is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK); 738 739 while (msdu_count--) { 740 paddr = __le64_to_cpu(msdu_desc->msdu_paddr); 741 msdu = ath10k_htt_rx_pop_paddr(htt, paddr); 742 if (!msdu) { 743 __skb_queue_purge(list); 744 return -ENOENT; 745 } 746 747 if (!is_offload && ar->monitor_arvif) { 748 ret = ath10k_htt_rx_handle_amsdu_mon_64(htt, msdu, 749 &msdu_desc); 750 if (ret) { 751 __skb_queue_purge(list); 752 return ret; 753 } 754 __skb_queue_tail(list, msdu); 755 msdu_desc++; 756 continue; 757 } 758 759 __skb_queue_tail(list, msdu); 760 761 if (!is_offload) { 762 rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data); 763 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); 764 765 trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size); 766 767 skb_put(msdu, hw->rx_desc_ops->rx_desc_size); 768 skb_pull(msdu, hw->rx_desc_ops->rx_desc_size); 769 skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len)); 770 771 if (!(__le32_to_cpu(rxd_attention->flags) & 772 RX_ATTENTION_FLAGS_MSDU_DONE)) { 773 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n"); 774 return -EIO; 775 } 776 } 777 778 msdu_desc++; 779 } 780 781 return 0; 782 } 783 784 int ath10k_htt_rx_alloc(struct ath10k_htt *htt) 785 { 786 struct ath10k *ar = htt->ar; 787 dma_addr_t paddr; 788 void *vaddr, *vaddr_ring; 789 size_t size; 790 struct timer_list *timer = &htt->rx_ring.refill_retry_timer; 791 792 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) 793 return 0; 794 795 htt->rx_confused = false; 796 797 /* XXX: The fill level could be changed during runtime in response to 798 * the host processing latency. Is this really worth it? 799 */ 800 htt->rx_ring.size = HTT_RX_RING_SIZE; 801 htt->rx_ring.size_mask = htt->rx_ring.size - 1; 802 htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level; 803 804 if (!is_power_of_2(htt->rx_ring.size)) { 805 ath10k_warn(ar, "htt rx ring size is not power of 2\n"); 806 return -EINVAL; 807 } 808 809 htt->rx_ring.netbufs_ring = 810 kzalloc_objs(struct sk_buff *, htt->rx_ring.size); 811 if (!htt->rx_ring.netbufs_ring) 812 goto err_netbuf; 813 814 size = ath10k_htt_get_rx_ring_size(htt); 815 816 vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL); 817 if (!vaddr_ring) 818 goto err_dma_ring; 819 820 ath10k_htt_config_paddrs_ring(htt, vaddr_ring); 821 htt->rx_ring.base_paddr = paddr; 822 823 vaddr = dma_alloc_coherent(htt->ar->dev, 824 sizeof(*htt->rx_ring.alloc_idx.vaddr), 825 &paddr, GFP_KERNEL); 826 if (!vaddr) 827 goto err_dma_idx; 828 829 htt->rx_ring.alloc_idx.vaddr = vaddr; 830 htt->rx_ring.alloc_idx.paddr = paddr; 831 htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask; 832 *htt->rx_ring.alloc_idx.vaddr = 0; 833 834 /* Initialize the Rx refill retry timer */ 835 timer_setup(timer, ath10k_htt_rx_ring_refill_retry, 0); 836 837 spin_lock_init(&htt->rx_ring.lock); 838 839 htt->rx_ring.fill_cnt = 0; 840 htt->rx_ring.sw_rd_idx.msdu_payld = 0; 841 hash_init(htt->rx_ring.skb_table); 842 843 skb_queue_head_init(&htt->rx_msdus_q); 844 skb_queue_head_init(&htt->rx_in_ord_compl_q); 845 skb_queue_head_init(&htt->tx_fetch_ind_q); 846 atomic_set(&htt->num_mpdus_ready, 0); 847 848 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n", 849 htt->rx_ring.size, htt->rx_ring.fill_level); 850 return 0; 851 852 err_dma_idx: 853 dma_free_coherent(htt->ar->dev, 854 ath10k_htt_get_rx_ring_size(htt), 855 vaddr_ring, 856 htt->rx_ring.base_paddr); 857 ath10k_htt_config_paddrs_ring(htt, NULL); 858 err_dma_ring: 859 kfree(htt->rx_ring.netbufs_ring); 860 htt->rx_ring.netbufs_ring = NULL; 861 err_netbuf: 862 return -ENOMEM; 863 } 864 865 static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar, 866 enum htt_rx_mpdu_encrypt_type type) 867 { 868 switch (type) { 869 case HTT_RX_MPDU_ENCRYPT_NONE: 870 return 0; 871 case HTT_RX_MPDU_ENCRYPT_WEP40: 872 case HTT_RX_MPDU_ENCRYPT_WEP104: 873 return IEEE80211_WEP_IV_LEN; 874 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: 875 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: 876 return IEEE80211_TKIP_IV_LEN; 877 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: 878 return IEEE80211_CCMP_HDR_LEN; 879 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: 880 return IEEE80211_CCMP_256_HDR_LEN; 881 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2: 882 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2: 883 return IEEE80211_GCMP_HDR_LEN; 884 case HTT_RX_MPDU_ENCRYPT_WEP128: 885 case HTT_RX_MPDU_ENCRYPT_WAPI: 886 break; 887 } 888 889 ath10k_warn(ar, "unsupported encryption type %d\n", type); 890 return 0; 891 } 892 893 #define MICHAEL_MIC_LEN 8 894 895 static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar, 896 enum htt_rx_mpdu_encrypt_type type) 897 { 898 switch (type) { 899 case HTT_RX_MPDU_ENCRYPT_NONE: 900 case HTT_RX_MPDU_ENCRYPT_WEP40: 901 case HTT_RX_MPDU_ENCRYPT_WEP104: 902 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: 903 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: 904 return 0; 905 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: 906 return IEEE80211_CCMP_MIC_LEN; 907 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: 908 return IEEE80211_CCMP_256_MIC_LEN; 909 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2: 910 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2: 911 return IEEE80211_GCMP_MIC_LEN; 912 case HTT_RX_MPDU_ENCRYPT_WEP128: 913 case HTT_RX_MPDU_ENCRYPT_WAPI: 914 break; 915 } 916 917 ath10k_warn(ar, "unsupported encryption type %d\n", type); 918 return 0; 919 } 920 921 static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar, 922 enum htt_rx_mpdu_encrypt_type type) 923 { 924 switch (type) { 925 case HTT_RX_MPDU_ENCRYPT_NONE: 926 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: 927 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: 928 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2: 929 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2: 930 return 0; 931 case HTT_RX_MPDU_ENCRYPT_WEP40: 932 case HTT_RX_MPDU_ENCRYPT_WEP104: 933 return IEEE80211_WEP_ICV_LEN; 934 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: 935 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: 936 return IEEE80211_TKIP_ICV_LEN; 937 case HTT_RX_MPDU_ENCRYPT_WEP128: 938 case HTT_RX_MPDU_ENCRYPT_WAPI: 939 break; 940 } 941 942 ath10k_warn(ar, "unsupported encryption type %d\n", type); 943 return 0; 944 } 945 946 struct amsdu_subframe_hdr { 947 u8 dst[ETH_ALEN]; 948 u8 src[ETH_ALEN]; 949 __be16 len; 950 } __packed; 951 952 #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63) 953 954 static inline u8 ath10k_bw_to_mac80211_bw(u8 bw) 955 { 956 u8 ret = 0; 957 958 switch (bw) { 959 case 0: 960 ret = RATE_INFO_BW_20; 961 break; 962 case 1: 963 ret = RATE_INFO_BW_40; 964 break; 965 case 2: 966 ret = RATE_INFO_BW_80; 967 break; 968 case 3: 969 ret = RATE_INFO_BW_160; 970 break; 971 } 972 973 return ret; 974 } 975 976 static void ath10k_htt_rx_h_rates(struct ath10k *ar, 977 struct ieee80211_rx_status *status, 978 struct htt_rx_desc *rxd) 979 { 980 struct ath10k_hw_params *hw = &ar->hw_params; 981 struct rx_attention *rxd_attention; 982 struct rx_mpdu_start *rxd_mpdu_start; 983 struct rx_mpdu_end *rxd_mpdu_end; 984 struct rx_msdu_start_common *rxd_msdu_start_common; 985 struct rx_msdu_end_common *rxd_msdu_end_common; 986 struct rx_ppdu_start *rxd_ppdu_start; 987 struct ieee80211_supported_band *sband; 988 u8 cck, rate, bw, sgi, mcs, nss; 989 u8 *rxd_msdu_payload; 990 u8 preamble = 0; 991 u8 group_id; 992 u32 info1, info2, info3; 993 u32 stbc, nsts_su; 994 995 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); 996 rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd); 997 rxd_mpdu_end = ath10k_htt_rx_desc_get_mpdu_end(hw, rxd); 998 rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd); 999 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd); 1000 rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd); 1001 rxd_msdu_payload = ath10k_htt_rx_desc_get_msdu_payload(hw, rxd); 1002 1003 info1 = __le32_to_cpu(rxd_ppdu_start->info1); 1004 info2 = __le32_to_cpu(rxd_ppdu_start->info2); 1005 info3 = __le32_to_cpu(rxd_ppdu_start->info3); 1006 1007 preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE); 1008 1009 switch (preamble) { 1010 case HTT_RX_LEGACY: 1011 /* To get legacy rate index band is required. Since band can't 1012 * be undefined check if freq is non-zero. 1013 */ 1014 if (!status->freq) 1015 return; 1016 1017 cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT; 1018 rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE); 1019 rate &= ~RX_PPDU_START_RATE_FLAG; 1020 1021 sband = &ar->mac.sbands[status->band]; 1022 status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck); 1023 break; 1024 case HTT_RX_HT: 1025 case HTT_RX_HT_WITH_TXBF: 1026 /* HT-SIG - Table 20-11 in info2 and info3 */ 1027 mcs = info2 & 0x1F; 1028 nss = mcs >> 3; 1029 bw = (info2 >> 7) & 1; 1030 sgi = (info3 >> 7) & 1; 1031 1032 status->rate_idx = mcs; 1033 status->encoding = RX_ENC_HT; 1034 if (sgi) 1035 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 1036 if (bw) 1037 status->bw = RATE_INFO_BW_40; 1038 break; 1039 case HTT_RX_VHT: 1040 case HTT_RX_VHT_WITH_TXBF: 1041 /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3 1042 * TODO check this 1043 */ 1044 bw = info2 & 3; 1045 sgi = info3 & 1; 1046 stbc = (info2 >> 3) & 1; 1047 group_id = (info2 >> 4) & 0x3F; 1048 1049 if (GROUP_ID_IS_SU_MIMO(group_id)) { 1050 mcs = (info3 >> 4) & 0x0F; 1051 nsts_su = ((info2 >> 10) & 0x07); 1052 if (stbc) 1053 nss = (nsts_su >> 2) + 1; 1054 else 1055 nss = (nsts_su + 1); 1056 } else { 1057 /* Hardware doesn't decode VHT-SIG-B into Rx descriptor 1058 * so it's impossible to decode MCS. Also since 1059 * firmware consumes Group Id Management frames host 1060 * has no knowledge regarding group/user position 1061 * mapping so it's impossible to pick the correct Nsts 1062 * from VHT-SIG-A1. 1063 * 1064 * Bandwidth and SGI are valid so report the rateinfo 1065 * on best-effort basis. 1066 */ 1067 mcs = 0; 1068 nss = 1; 1069 } 1070 1071 if (mcs > 0x09) { 1072 ath10k_warn(ar, "invalid MCS received %u\n", mcs); 1073 ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n", 1074 __le32_to_cpu(rxd_attention->flags), 1075 __le32_to_cpu(rxd_mpdu_start->info0), 1076 __le32_to_cpu(rxd_mpdu_start->info1), 1077 __le32_to_cpu(rxd_msdu_start_common->info0), 1078 __le32_to_cpu(rxd_msdu_start_common->info1), 1079 rxd_ppdu_start->info0, 1080 __le32_to_cpu(rxd_ppdu_start->info1), 1081 __le32_to_cpu(rxd_ppdu_start->info2), 1082 __le32_to_cpu(rxd_ppdu_start->info3), 1083 __le32_to_cpu(rxd_ppdu_start->info4)); 1084 1085 ath10k_warn(ar, "msdu end %08x mpdu end %08x\n", 1086 __le32_to_cpu(rxd_msdu_end_common->info0), 1087 __le32_to_cpu(rxd_mpdu_end->info0)); 1088 1089 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, 1090 "rx desc msdu payload: ", 1091 rxd_msdu_payload, 50); 1092 } 1093 1094 status->rate_idx = mcs; 1095 status->nss = nss; 1096 1097 if (sgi) 1098 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 1099 1100 status->bw = ath10k_bw_to_mac80211_bw(bw); 1101 status->encoding = RX_ENC_VHT; 1102 break; 1103 default: 1104 break; 1105 } 1106 } 1107 1108 static struct ieee80211_channel * 1109 ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd) 1110 { 1111 struct ath10k_hw_params *hw = &ar->hw_params; 1112 struct rx_attention *rxd_attention; 1113 struct rx_msdu_end_common *rxd_msdu_end_common; 1114 struct rx_mpdu_start *rxd_mpdu_start; 1115 struct ath10k_peer *peer; 1116 struct ath10k_vif *arvif; 1117 struct cfg80211_chan_def def; 1118 u16 peer_id; 1119 1120 lockdep_assert_held(&ar->data_lock); 1121 1122 if (!rxd) 1123 return NULL; 1124 1125 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); 1126 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd); 1127 rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd); 1128 1129 if (rxd_attention->flags & 1130 __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID)) 1131 return NULL; 1132 1133 if (!(rxd_msdu_end_common->info0 & 1134 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU))) 1135 return NULL; 1136 1137 peer_id = MS(__le32_to_cpu(rxd_mpdu_start->info0), 1138 RX_MPDU_START_INFO0_PEER_IDX); 1139 1140 peer = ath10k_peer_find_by_id(ar, peer_id); 1141 if (!peer) 1142 return NULL; 1143 1144 arvif = ath10k_get_arvif(ar, peer->vdev_id); 1145 if (WARN_ON_ONCE(!arvif)) 1146 return NULL; 1147 1148 if (ath10k_mac_vif_chan(arvif->vif, &def)) 1149 return NULL; 1150 1151 return def.chan; 1152 } 1153 1154 static struct ieee80211_channel * 1155 ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id) 1156 { 1157 struct ath10k_vif *arvif; 1158 struct cfg80211_chan_def def; 1159 1160 lockdep_assert_held(&ar->data_lock); 1161 1162 list_for_each_entry(arvif, &ar->arvifs, list) { 1163 if (arvif->vdev_id == vdev_id && 1164 ath10k_mac_vif_chan(arvif->vif, &def) == 0) 1165 return def.chan; 1166 } 1167 1168 return NULL; 1169 } 1170 1171 static void 1172 ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw, 1173 struct ieee80211_chanctx_conf *conf, 1174 void *data) 1175 { 1176 struct cfg80211_chan_def *def = data; 1177 1178 *def = conf->def; 1179 } 1180 1181 static struct ieee80211_channel * 1182 ath10k_htt_rx_h_any_channel(struct ath10k *ar) 1183 { 1184 struct cfg80211_chan_def def = {}; 1185 1186 ieee80211_iter_chan_contexts_atomic(ar->hw, 1187 ath10k_htt_rx_h_any_chan_iter, 1188 &def); 1189 1190 return def.chan; 1191 } 1192 1193 static bool ath10k_htt_rx_h_channel(struct ath10k *ar, 1194 struct ieee80211_rx_status *status, 1195 struct htt_rx_desc *rxd, 1196 u32 vdev_id) 1197 { 1198 struct ieee80211_channel *ch; 1199 1200 spin_lock_bh(&ar->data_lock); 1201 ch = ar->scan_channel; 1202 if (!ch) 1203 ch = ar->rx_channel; 1204 if (!ch) 1205 ch = ath10k_htt_rx_h_peer_channel(ar, rxd); 1206 if (!ch) 1207 ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id); 1208 if (!ch) 1209 ch = ath10k_htt_rx_h_any_channel(ar); 1210 if (!ch) 1211 ch = ar->tgt_oper_chan; 1212 spin_unlock_bh(&ar->data_lock); 1213 1214 if (!ch) 1215 return false; 1216 1217 status->band = ch->band; 1218 status->freq = ch->center_freq; 1219 1220 return true; 1221 } 1222 1223 static void ath10k_htt_rx_h_signal(struct ath10k *ar, 1224 struct ieee80211_rx_status *status, 1225 struct htt_rx_desc *rxd) 1226 { 1227 struct ath10k_hw_params *hw = &ar->hw_params; 1228 struct rx_ppdu_start *rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd); 1229 int i; 1230 1231 for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) { 1232 status->chains &= ~BIT(i); 1233 1234 if (rxd_ppdu_start->rssi_chains[i].pri20_mhz != 0x80) { 1235 status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR + 1236 rxd_ppdu_start->rssi_chains[i].pri20_mhz; 1237 1238 status->chains |= BIT(i); 1239 } 1240 } 1241 1242 /* FIXME: Get real NF */ 1243 status->signal = ATH10K_DEFAULT_NOISE_FLOOR + 1244 rxd_ppdu_start->rssi_comb; 1245 status->flag &= ~RX_FLAG_NO_SIGNAL_VAL; 1246 } 1247 1248 static void ath10k_htt_rx_h_mactime(struct ath10k *ar, 1249 struct ieee80211_rx_status *status, 1250 struct htt_rx_desc *rxd) 1251 { 1252 struct ath10k_hw_params *hw = &ar->hw_params; 1253 struct rx_ppdu_end_common *rxd_ppdu_end_common; 1254 1255 rxd_ppdu_end_common = ath10k_htt_rx_desc_get_ppdu_end(hw, rxd); 1256 1257 /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This 1258 * means all prior MSDUs in a PPDU are reported to mac80211 without the 1259 * TSF. Is it worth holding frames until end of PPDU is known? 1260 * 1261 * FIXME: Can we get/compute 64bit TSF? 1262 */ 1263 status->mactime = __le32_to_cpu(rxd_ppdu_end_common->tsf_timestamp); 1264 status->flag |= RX_FLAG_MACTIME_END; 1265 } 1266 1267 static void ath10k_htt_rx_h_ppdu(struct ath10k *ar, 1268 struct sk_buff_head *amsdu, 1269 struct ieee80211_rx_status *status, 1270 u32 vdev_id) 1271 { 1272 struct sk_buff *first; 1273 struct ath10k_hw_params *hw = &ar->hw_params; 1274 struct htt_rx_desc *rxd; 1275 struct rx_attention *rxd_attention; 1276 bool is_first_ppdu; 1277 bool is_last_ppdu; 1278 1279 if (skb_queue_empty(amsdu)) 1280 return; 1281 1282 first = skb_peek(amsdu); 1283 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 1284 (void *)first->data - hw->rx_desc_ops->rx_desc_size); 1285 1286 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); 1287 1288 is_first_ppdu = !!(rxd_attention->flags & 1289 __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU)); 1290 is_last_ppdu = !!(rxd_attention->flags & 1291 __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU)); 1292 1293 if (is_first_ppdu) { 1294 /* New PPDU starts so clear out the old per-PPDU status. */ 1295 status->freq = 0; 1296 status->rate_idx = 0; 1297 status->nss = 0; 1298 status->encoding = RX_ENC_LEGACY; 1299 status->bw = RATE_INFO_BW_20; 1300 1301 status->flag &= ~RX_FLAG_MACTIME; 1302 status->flag |= RX_FLAG_NO_SIGNAL_VAL; 1303 1304 status->flag &= ~(RX_FLAG_AMPDU_IS_LAST); 1305 status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN; 1306 status->ampdu_reference = ar->ampdu_reference; 1307 1308 ath10k_htt_rx_h_signal(ar, status, rxd); 1309 ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id); 1310 ath10k_htt_rx_h_rates(ar, status, rxd); 1311 } 1312 1313 if (is_last_ppdu) { 1314 ath10k_htt_rx_h_mactime(ar, status, rxd); 1315 1316 /* set ampdu last segment flag */ 1317 status->flag |= RX_FLAG_AMPDU_IS_LAST; 1318 ar->ampdu_reference++; 1319 } 1320 } 1321 1322 static const char * const tid_to_ac[] = { 1323 "BE", 1324 "BK", 1325 "BK", 1326 "BE", 1327 "VI", 1328 "VI", 1329 "VO", 1330 "VO", 1331 }; 1332 1333 static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size) 1334 { 1335 u8 *qc; 1336 int tid; 1337 1338 if (!ieee80211_is_data_qos(hdr->frame_control)) 1339 return ""; 1340 1341 qc = ieee80211_get_qos_ctl(hdr); 1342 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 1343 if (tid < 8) 1344 snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]); 1345 else 1346 snprintf(out, size, "tid %d", tid); 1347 1348 return out; 1349 } 1350 1351 static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar, 1352 struct ieee80211_rx_status *rx_status, 1353 struct sk_buff *skb) 1354 { 1355 struct ieee80211_rx_status *status; 1356 1357 status = IEEE80211_SKB_RXCB(skb); 1358 *status = *rx_status; 1359 1360 skb_queue_tail(&ar->htt.rx_msdus_q, skb); 1361 } 1362 1363 static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb) 1364 { 1365 struct ieee80211_rx_status *status; 1366 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1367 char tid[32]; 1368 1369 status = IEEE80211_SKB_RXCB(skb); 1370 1371 if (!(ar->filter_flags & FIF_FCSFAIL) && 1372 status->flag & RX_FLAG_FAILED_FCS_CRC) { 1373 ar->stats.rx_crc_err_drop++; 1374 dev_kfree_skb_any(skb); 1375 return; 1376 } 1377 1378 ath10k_dbg(ar, ATH10K_DBG_DATA, 1379 "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", 1380 skb, 1381 skb->len, 1382 ieee80211_get_SA(hdr), 1383 ath10k_get_tid(hdr, tid, sizeof(tid)), 1384 is_multicast_ether_addr(ieee80211_get_DA(hdr)) ? 1385 "mcast" : "ucast", 1386 IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)), 1387 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", 1388 (status->encoding == RX_ENC_HT) ? "ht" : "", 1389 (status->encoding == RX_ENC_VHT) ? "vht" : "", 1390 (status->bw == RATE_INFO_BW_40) ? "40" : "", 1391 (status->bw == RATE_INFO_BW_80) ? "80" : "", 1392 (status->bw == RATE_INFO_BW_160) ? "160" : "", 1393 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", 1394 status->rate_idx, 1395 status->nss, 1396 status->freq, 1397 status->band, status->flag, 1398 !!(status->flag & RX_FLAG_FAILED_FCS_CRC), 1399 !!(status->flag & RX_FLAG_MMIC_ERROR), 1400 !!(status->flag & RX_FLAG_AMSDU_MORE)); 1401 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ", 1402 skb->data, skb->len); 1403 trace_ath10k_rx_hdr(ar, skb->data, skb->len); 1404 trace_ath10k_rx_payload(ar, skb->data, skb->len); 1405 1406 ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi); 1407 } 1408 1409 static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar, 1410 struct ieee80211_hdr *hdr) 1411 { 1412 int len = ieee80211_hdrlen(hdr->frame_control); 1413 1414 if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING, 1415 ar->running_fw->fw_file.fw_features)) 1416 len = round_up(len, 4); 1417 1418 return len; 1419 } 1420 1421 static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar, 1422 struct sk_buff *msdu, 1423 struct ieee80211_rx_status *status, 1424 enum htt_rx_mpdu_encrypt_type enctype, 1425 bool is_decrypted, 1426 const u8 first_hdr[64]) 1427 { 1428 struct ieee80211_hdr *hdr; 1429 struct ath10k_hw_params *hw = &ar->hw_params; 1430 struct htt_rx_desc *rxd; 1431 struct rx_msdu_end_common *rxd_msdu_end_common; 1432 size_t hdr_len; 1433 size_t crypto_len; 1434 bool is_first; 1435 bool is_last; 1436 bool msdu_limit_err; 1437 int bytes_aligned = ar->hw_params.decap_align_bytes; 1438 u8 *qos; 1439 1440 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 1441 (void *)msdu->data - hw->rx_desc_ops->rx_desc_size); 1442 1443 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd); 1444 is_first = !!(rxd_msdu_end_common->info0 & 1445 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); 1446 is_last = !!(rxd_msdu_end_common->info0 & 1447 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); 1448 1449 /* Delivered decapped frame: 1450 * [802.11 header] 1451 * [crypto param] <-- can be trimmed if !fcs_err && 1452 * !decrypt_err && !peer_idx_invalid 1453 * [amsdu header] <-- only if A-MSDU 1454 * [rfc1042/llc] 1455 * [payload] 1456 * [FCS] <-- at end, needs to be trimmed 1457 */ 1458 1459 /* Some hardwares(QCA99x0 variants) limit number of msdus in a-msdu when 1460 * deaggregate, so that unwanted MSDU-deaggregation is avoided for 1461 * error packets. If limit exceeds, hw sends all remaining MSDUs as 1462 * a single last MSDU with this msdu limit error set. 1463 */ 1464 msdu_limit_err = ath10k_htt_rx_desc_msdu_limit_error(hw, rxd); 1465 1466 /* If MSDU limit error happens, then don't warn on, the partial raw MSDU 1467 * without first MSDU is expected in that case, and handled later here. 1468 */ 1469 /* This probably shouldn't happen but warn just in case */ 1470 if (WARN_ON_ONCE(!is_first && !msdu_limit_err)) 1471 return; 1472 1473 /* This probably shouldn't happen but warn just in case */ 1474 if (WARN_ON_ONCE(!(is_first && is_last) && !msdu_limit_err)) 1475 return; 1476 1477 skb_trim(msdu, msdu->len - FCS_LEN); 1478 1479 /* Push original 80211 header */ 1480 if (unlikely(msdu_limit_err)) { 1481 hdr = (struct ieee80211_hdr *)first_hdr; 1482 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1483 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); 1484 1485 if (ieee80211_is_data_qos(hdr->frame_control)) { 1486 qos = ieee80211_get_qos_ctl(hdr); 1487 qos[0] |= IEEE80211_QOS_CTL_A_MSDU_PRESENT; 1488 } 1489 1490 if (crypto_len) 1491 memcpy(skb_push(msdu, crypto_len), 1492 (void *)hdr + round_up(hdr_len, bytes_aligned), 1493 crypto_len); 1494 1495 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1496 } 1497 1498 /* In most cases this will be true for sniffed frames. It makes sense 1499 * to deliver them as-is without stripping the crypto param. This is 1500 * necessary for software based decryption. 1501 * 1502 * If there's no error then the frame is decrypted. At least that is 1503 * the case for frames that come in via fragmented rx indication. 1504 */ 1505 if (!is_decrypted) 1506 return; 1507 1508 /* The payload is decrypted so strip crypto params. Start from tail 1509 * since hdr is used to compute some stuff. 1510 */ 1511 1512 hdr = (void *)msdu->data; 1513 1514 /* Tail */ 1515 if (status->flag & RX_FLAG_IV_STRIPPED) { 1516 skb_trim(msdu, msdu->len - 1517 ath10k_htt_rx_crypto_mic_len(ar, enctype)); 1518 1519 skb_trim(msdu, msdu->len - 1520 ath10k_htt_rx_crypto_icv_len(ar, enctype)); 1521 } else { 1522 /* MIC */ 1523 if (status->flag & RX_FLAG_MIC_STRIPPED) 1524 skb_trim(msdu, msdu->len - 1525 ath10k_htt_rx_crypto_mic_len(ar, enctype)); 1526 1527 /* ICV */ 1528 if (status->flag & RX_FLAG_ICV_STRIPPED) 1529 skb_trim(msdu, msdu->len - 1530 ath10k_htt_rx_crypto_icv_len(ar, enctype)); 1531 } 1532 1533 /* MMIC */ 1534 if ((status->flag & RX_FLAG_MMIC_STRIPPED) && 1535 !ieee80211_has_morefrags(hdr->frame_control) && 1536 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) 1537 skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN); 1538 1539 /* Head */ 1540 if (status->flag & RX_FLAG_IV_STRIPPED) { 1541 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1542 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); 1543 1544 memmove((void *)msdu->data + crypto_len, 1545 (void *)msdu->data, hdr_len); 1546 skb_pull(msdu, crypto_len); 1547 } 1548 } 1549 1550 static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar, 1551 struct sk_buff *msdu, 1552 struct ieee80211_rx_status *status, 1553 const u8 first_hdr[64], 1554 enum htt_rx_mpdu_encrypt_type enctype) 1555 { 1556 struct ath10k_hw_params *hw = &ar->hw_params; 1557 struct ieee80211_hdr *hdr; 1558 struct htt_rx_desc *rxd; 1559 size_t hdr_len; 1560 u8 da[ETH_ALEN]; 1561 u8 sa[ETH_ALEN]; 1562 int l3_pad_bytes; 1563 int bytes_aligned = ar->hw_params.decap_align_bytes; 1564 1565 /* Delivered decapped frame: 1566 * [nwifi 802.11 header] <-- replaced with 802.11 hdr 1567 * [rfc1042/llc] 1568 * 1569 * Note: The nwifi header doesn't have QoS Control and is 1570 * (always?) a 3addr frame. 1571 * 1572 * Note2: There's no A-MSDU subframe header. Even if it's part 1573 * of an A-MSDU. 1574 */ 1575 1576 /* pull decapped header and copy SA & DA */ 1577 rxd = HTT_RX_BUF_TO_RX_DESC(hw, (void *)msdu->data - 1578 hw->rx_desc_ops->rx_desc_size); 1579 1580 l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd); 1581 skb_put(msdu, l3_pad_bytes); 1582 1583 hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes); 1584 1585 hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr); 1586 ether_addr_copy(da, ieee80211_get_DA(hdr)); 1587 ether_addr_copy(sa, ieee80211_get_SA(hdr)); 1588 skb_pull(msdu, hdr_len); 1589 1590 /* push original 802.11 header */ 1591 hdr = (struct ieee80211_hdr *)first_hdr; 1592 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1593 1594 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 1595 memcpy(skb_push(msdu, 1596 ath10k_htt_rx_crypto_param_len(ar, enctype)), 1597 (void *)hdr + round_up(hdr_len, bytes_aligned), 1598 ath10k_htt_rx_crypto_param_len(ar, enctype)); 1599 } 1600 1601 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1602 1603 /* original 802.11 header has a different DA and in 1604 * case of 4addr it may also have different SA 1605 */ 1606 hdr = (struct ieee80211_hdr *)msdu->data; 1607 ether_addr_copy(ieee80211_get_DA(hdr), da); 1608 ether_addr_copy(ieee80211_get_SA(hdr), sa); 1609 } 1610 1611 static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar, 1612 struct sk_buff *msdu, 1613 enum htt_rx_mpdu_encrypt_type enctype) 1614 { 1615 struct ieee80211_hdr *hdr; 1616 struct ath10k_hw_params *hw = &ar->hw_params; 1617 struct htt_rx_desc *rxd; 1618 struct rx_msdu_end_common *rxd_msdu_end_common; 1619 u8 *rxd_rx_hdr_status; 1620 size_t hdr_len, crypto_len; 1621 void *rfc1042; 1622 bool is_first, is_last, is_amsdu; 1623 int bytes_aligned = ar->hw_params.decap_align_bytes; 1624 1625 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 1626 (void *)msdu->data - hw->rx_desc_ops->rx_desc_size); 1627 1628 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd); 1629 rxd_rx_hdr_status = ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd); 1630 hdr = (void *)rxd_rx_hdr_status; 1631 1632 is_first = !!(rxd_msdu_end_common->info0 & 1633 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); 1634 is_last = !!(rxd_msdu_end_common->info0 & 1635 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); 1636 is_amsdu = !(is_first && is_last); 1637 1638 rfc1042 = hdr; 1639 1640 if (is_first) { 1641 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1642 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); 1643 1644 rfc1042 += round_up(hdr_len, bytes_aligned) + 1645 round_up(crypto_len, bytes_aligned); 1646 } 1647 1648 if (is_amsdu) 1649 rfc1042 += sizeof(struct amsdu_subframe_hdr); 1650 1651 return rfc1042; 1652 } 1653 1654 static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar, 1655 struct sk_buff *msdu, 1656 struct ieee80211_rx_status *status, 1657 const u8 first_hdr[64], 1658 enum htt_rx_mpdu_encrypt_type enctype) 1659 { 1660 struct ath10k_hw_params *hw = &ar->hw_params; 1661 struct ieee80211_hdr *hdr; 1662 struct ethhdr *eth; 1663 size_t hdr_len; 1664 void *rfc1042; 1665 u8 da[ETH_ALEN]; 1666 u8 sa[ETH_ALEN]; 1667 int l3_pad_bytes; 1668 struct htt_rx_desc *rxd; 1669 int bytes_aligned = ar->hw_params.decap_align_bytes; 1670 1671 /* Delivered decapped frame: 1672 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc 1673 * [payload] 1674 */ 1675 1676 rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype); 1677 if (WARN_ON_ONCE(!rfc1042)) 1678 return; 1679 1680 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 1681 (void *)msdu->data - hw->rx_desc_ops->rx_desc_size); 1682 1683 l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd); 1684 skb_put(msdu, l3_pad_bytes); 1685 skb_pull(msdu, l3_pad_bytes); 1686 1687 /* pull decapped header and copy SA & DA */ 1688 eth = (struct ethhdr *)msdu->data; 1689 ether_addr_copy(da, eth->h_dest); 1690 ether_addr_copy(sa, eth->h_source); 1691 skb_pull(msdu, sizeof(struct ethhdr)); 1692 1693 /* push rfc1042/llc/snap */ 1694 memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042, 1695 sizeof(struct rfc1042_hdr)); 1696 1697 /* push original 802.11 header */ 1698 hdr = (struct ieee80211_hdr *)first_hdr; 1699 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1700 1701 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 1702 memcpy(skb_push(msdu, 1703 ath10k_htt_rx_crypto_param_len(ar, enctype)), 1704 (void *)hdr + round_up(hdr_len, bytes_aligned), 1705 ath10k_htt_rx_crypto_param_len(ar, enctype)); 1706 } 1707 1708 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1709 1710 /* original 802.11 header has a different DA and in 1711 * case of 4addr it may also have different SA 1712 */ 1713 hdr = (struct ieee80211_hdr *)msdu->data; 1714 ether_addr_copy(ieee80211_get_DA(hdr), da); 1715 ether_addr_copy(ieee80211_get_SA(hdr), sa); 1716 } 1717 1718 static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar, 1719 struct sk_buff *msdu, 1720 struct ieee80211_rx_status *status, 1721 const u8 first_hdr[64], 1722 enum htt_rx_mpdu_encrypt_type enctype) 1723 { 1724 struct ath10k_hw_params *hw = &ar->hw_params; 1725 struct ieee80211_hdr *hdr; 1726 size_t hdr_len; 1727 int l3_pad_bytes; 1728 struct htt_rx_desc *rxd; 1729 int bytes_aligned = ar->hw_params.decap_align_bytes; 1730 1731 /* Delivered decapped frame: 1732 * [amsdu header] <-- replaced with 802.11 hdr 1733 * [rfc1042/llc] 1734 * [payload] 1735 */ 1736 1737 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 1738 (void *)msdu->data - hw->rx_desc_ops->rx_desc_size); 1739 1740 l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd); 1741 1742 skb_put(msdu, l3_pad_bytes); 1743 skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes); 1744 1745 hdr = (struct ieee80211_hdr *)first_hdr; 1746 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1747 1748 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 1749 memcpy(skb_push(msdu, 1750 ath10k_htt_rx_crypto_param_len(ar, enctype)), 1751 (void *)hdr + round_up(hdr_len, bytes_aligned), 1752 ath10k_htt_rx_crypto_param_len(ar, enctype)); 1753 } 1754 1755 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1756 } 1757 1758 static void ath10k_htt_rx_h_undecap(struct ath10k *ar, 1759 struct sk_buff *msdu, 1760 struct ieee80211_rx_status *status, 1761 u8 first_hdr[64], 1762 enum htt_rx_mpdu_encrypt_type enctype, 1763 bool is_decrypted) 1764 { 1765 struct ath10k_hw_params *hw = &ar->hw_params; 1766 struct htt_rx_desc *rxd; 1767 struct rx_msdu_start_common *rxd_msdu_start_common; 1768 enum rx_msdu_decap_format decap; 1769 1770 /* First msdu's decapped header: 1771 * [802.11 header] <-- padded to 4 bytes long 1772 * [crypto param] <-- padded to 4 bytes long 1773 * [amsdu header] <-- only if A-MSDU 1774 * [rfc1042/llc] 1775 * 1776 * Other (2nd, 3rd, ..) msdu's decapped header: 1777 * [amsdu header] <-- only if A-MSDU 1778 * [rfc1042/llc] 1779 */ 1780 1781 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 1782 (void *)msdu->data - hw->rx_desc_ops->rx_desc_size); 1783 1784 rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd); 1785 decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1), 1786 RX_MSDU_START_INFO1_DECAP_FORMAT); 1787 1788 switch (decap) { 1789 case RX_MSDU_DECAP_RAW: 1790 ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype, 1791 is_decrypted, first_hdr); 1792 break; 1793 case RX_MSDU_DECAP_NATIVE_WIFI: 1794 ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr, 1795 enctype); 1796 break; 1797 case RX_MSDU_DECAP_ETHERNET2_DIX: 1798 ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype); 1799 break; 1800 case RX_MSDU_DECAP_8023_SNAP_LLC: 1801 ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr, 1802 enctype); 1803 break; 1804 } 1805 } 1806 1807 static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb) 1808 { 1809 struct htt_rx_desc *rxd; 1810 struct rx_attention *rxd_attention; 1811 struct rx_msdu_start_common *rxd_msdu_start_common; 1812 u32 flags, info; 1813 bool is_ip4, is_ip6; 1814 bool is_tcp, is_udp; 1815 bool ip_csum_ok, tcpudp_csum_ok; 1816 1817 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 1818 (void *)skb->data - hw->rx_desc_ops->rx_desc_size); 1819 1820 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); 1821 rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd); 1822 flags = __le32_to_cpu(rxd_attention->flags); 1823 info = __le32_to_cpu(rxd_msdu_start_common->info1); 1824 1825 is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO); 1826 is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO); 1827 is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO); 1828 is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO); 1829 ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL); 1830 tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL); 1831 1832 if (!is_ip4 && !is_ip6) 1833 return CHECKSUM_NONE; 1834 if (!is_tcp && !is_udp) 1835 return CHECKSUM_NONE; 1836 if (!ip_csum_ok) 1837 return CHECKSUM_NONE; 1838 if (!tcpudp_csum_ok) 1839 return CHECKSUM_NONE; 1840 1841 return CHECKSUM_UNNECESSARY; 1842 } 1843 1844 static void ath10k_htt_rx_h_csum_offload(struct ath10k_hw_params *hw, 1845 struct sk_buff *msdu) 1846 { 1847 msdu->ip_summed = ath10k_htt_rx_get_csum_state(hw, msdu); 1848 } 1849 1850 static u64 ath10k_htt_rx_h_get_pn(struct ath10k *ar, struct sk_buff *skb, 1851 enum htt_rx_mpdu_encrypt_type enctype) 1852 { 1853 struct ieee80211_hdr *hdr; 1854 u64 pn = 0; 1855 u8 *ehdr; 1856 1857 hdr = (struct ieee80211_hdr *)skb->data; 1858 ehdr = skb->data + ieee80211_hdrlen(hdr->frame_control); 1859 1860 if (enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) { 1861 pn = ehdr[0]; 1862 pn |= (u64)ehdr[1] << 8; 1863 pn |= (u64)ehdr[4] << 16; 1864 pn |= (u64)ehdr[5] << 24; 1865 pn |= (u64)ehdr[6] << 32; 1866 pn |= (u64)ehdr[7] << 40; 1867 } 1868 return pn; 1869 } 1870 1871 static bool ath10k_htt_rx_h_frag_multicast_check(struct ath10k *ar, 1872 struct sk_buff *skb) 1873 { 1874 struct ieee80211_hdr *hdr; 1875 1876 hdr = (struct ieee80211_hdr *)skb->data; 1877 return !is_multicast_ether_addr(hdr->addr1); 1878 } 1879 1880 static bool ath10k_htt_rx_h_frag_pn_check(struct ath10k *ar, 1881 struct sk_buff *skb, 1882 u16 peer_id, 1883 enum htt_rx_mpdu_encrypt_type enctype) 1884 { 1885 struct ath10k_peer *peer; 1886 union htt_rx_pn_t *last_pn, new_pn = {}; 1887 struct ieee80211_hdr *hdr; 1888 u8 tid, frag_number; 1889 u32 seq; 1890 1891 peer = ath10k_peer_find_by_id(ar, peer_id); 1892 if (!peer) { 1893 ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer for frag pn check\n"); 1894 return false; 1895 } 1896 1897 hdr = (struct ieee80211_hdr *)skb->data; 1898 if (ieee80211_is_data_qos(hdr->frame_control)) 1899 tid = ieee80211_get_tid(hdr); 1900 else 1901 tid = ATH10K_TXRX_NON_QOS_TID; 1902 1903 last_pn = &peer->frag_tids_last_pn[tid]; 1904 new_pn.pn48 = ath10k_htt_rx_h_get_pn(ar, skb, enctype); 1905 frag_number = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; 1906 seq = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)); 1907 1908 if (frag_number == 0) { 1909 last_pn->pn48 = new_pn.pn48; 1910 peer->frag_tids_seq[tid] = seq; 1911 } else { 1912 if (seq != peer->frag_tids_seq[tid]) 1913 return false; 1914 1915 if (new_pn.pn48 != last_pn->pn48 + 1) 1916 return false; 1917 1918 last_pn->pn48 = new_pn.pn48; 1919 } 1920 1921 return true; 1922 } 1923 1924 static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, 1925 struct sk_buff_head *amsdu, 1926 struct ieee80211_rx_status *status, 1927 bool fill_crypt_header, 1928 u8 *rx_hdr, 1929 enum ath10k_pkt_rx_err *err, 1930 u16 peer_id, 1931 bool frag) 1932 { 1933 struct sk_buff *first; 1934 struct sk_buff *last; 1935 struct sk_buff *msdu, *temp; 1936 struct ath10k_hw_params *hw = &ar->hw_params; 1937 struct htt_rx_desc *rxd; 1938 struct rx_attention *rxd_attention; 1939 struct rx_mpdu_start *rxd_mpdu_start; 1940 1941 struct ieee80211_hdr *hdr; 1942 enum htt_rx_mpdu_encrypt_type enctype; 1943 u8 first_hdr[64]; 1944 u8 *qos; 1945 bool has_fcs_err; 1946 bool has_crypto_err; 1947 bool has_tkip_err; 1948 bool has_peer_idx_invalid; 1949 bool is_decrypted; 1950 bool is_mgmt; 1951 u32 attention; 1952 bool frag_pn_check = true, multicast_check = true; 1953 1954 if (skb_queue_empty(amsdu)) 1955 return; 1956 1957 first = skb_peek(amsdu); 1958 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 1959 (void *)first->data - hw->rx_desc_ops->rx_desc_size); 1960 1961 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); 1962 rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd); 1963 1964 is_mgmt = !!(rxd_attention->flags & 1965 __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE)); 1966 1967 enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0), 1968 RX_MPDU_START_INFO0_ENCRYPT_TYPE); 1969 1970 /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11 1971 * decapped header. It'll be used for undecapping of each MSDU. 1972 */ 1973 hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd); 1974 memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN); 1975 1976 if (rx_hdr) 1977 memcpy(rx_hdr, hdr, RX_HTT_HDR_STATUS_LEN); 1978 1979 /* Each A-MSDU subframe will use the original header as the base and be 1980 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl. 1981 */ 1982 hdr = (void *)first_hdr; 1983 1984 if (ieee80211_is_data_qos(hdr->frame_control)) { 1985 qos = ieee80211_get_qos_ctl(hdr); 1986 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 1987 } 1988 1989 /* Some attention flags are valid only in the last MSDU. */ 1990 last = skb_peek_tail(amsdu); 1991 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 1992 (void *)last->data - hw->rx_desc_ops->rx_desc_size); 1993 1994 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); 1995 attention = __le32_to_cpu(rxd_attention->flags); 1996 1997 has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR); 1998 has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR); 1999 has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR); 2000 has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID); 2001 2002 /* Note: If hardware captures an encrypted frame that it can't decrypt, 2003 * e.g. due to fcs error, missing peer or invalid key data it will 2004 * report the frame as raw. 2005 */ 2006 is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE && 2007 !has_fcs_err && 2008 !has_crypto_err && 2009 !has_peer_idx_invalid); 2010 2011 /* Clear per-MPDU flags while leaving per-PPDU flags intact. */ 2012 status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | 2013 RX_FLAG_MMIC_ERROR | 2014 RX_FLAG_DECRYPTED | 2015 RX_FLAG_IV_STRIPPED | 2016 RX_FLAG_ONLY_MONITOR | 2017 RX_FLAG_MMIC_STRIPPED); 2018 2019 if (has_fcs_err) 2020 status->flag |= RX_FLAG_FAILED_FCS_CRC; 2021 2022 if (has_tkip_err) 2023 status->flag |= RX_FLAG_MMIC_ERROR; 2024 2025 if (err) { 2026 if (has_fcs_err) 2027 *err = ATH10K_PKT_RX_ERR_FCS; 2028 else if (has_tkip_err) 2029 *err = ATH10K_PKT_RX_ERR_TKIP; 2030 else if (has_crypto_err) 2031 *err = ATH10K_PKT_RX_ERR_CRYPT; 2032 else if (has_peer_idx_invalid) 2033 *err = ATH10K_PKT_RX_ERR_PEER_IDX_INVAL; 2034 } 2035 2036 /* Firmware reports all necessary management frames via WMI already. 2037 * They are not reported to monitor interfaces at all so pass the ones 2038 * coming via HTT to monitor interfaces instead. This simplifies 2039 * matters a lot. 2040 */ 2041 if (is_mgmt) 2042 status->flag |= RX_FLAG_ONLY_MONITOR; 2043 2044 if (is_decrypted) { 2045 status->flag |= RX_FLAG_DECRYPTED; 2046 2047 if (likely(!is_mgmt)) 2048 status->flag |= RX_FLAG_MMIC_STRIPPED; 2049 2050 if (fill_crypt_header) 2051 status->flag |= RX_FLAG_MIC_STRIPPED | 2052 RX_FLAG_ICV_STRIPPED; 2053 else 2054 status->flag |= RX_FLAG_IV_STRIPPED; 2055 } 2056 2057 skb_queue_walk(amsdu, msdu) { 2058 if (frag && !fill_crypt_header && is_decrypted && 2059 enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) 2060 frag_pn_check = ath10k_htt_rx_h_frag_pn_check(ar, 2061 msdu, 2062 peer_id, 2063 enctype); 2064 2065 if (frag) 2066 multicast_check = ath10k_htt_rx_h_frag_multicast_check(ar, 2067 msdu); 2068 2069 if (!frag_pn_check || !multicast_check) { 2070 /* Discard the fragment with invalid PN or multicast DA 2071 */ 2072 temp = msdu->prev; 2073 __skb_unlink(msdu, amsdu); 2074 dev_kfree_skb_any(msdu); 2075 msdu = temp; 2076 frag_pn_check = true; 2077 multicast_check = true; 2078 continue; 2079 } 2080 2081 ath10k_htt_rx_h_csum_offload(&ar->hw_params, msdu); 2082 2083 if (frag && !fill_crypt_header && 2084 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) 2085 status->flag &= ~RX_FLAG_MMIC_STRIPPED; 2086 2087 ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype, 2088 is_decrypted); 2089 2090 /* Undecapping involves copying the original 802.11 header back 2091 * to sk_buff. If frame is protected and hardware has decrypted 2092 * it then remove the protected bit. 2093 */ 2094 if (!is_decrypted) 2095 continue; 2096 if (is_mgmt) 2097 continue; 2098 2099 if (fill_crypt_header) 2100 continue; 2101 2102 hdr = (void *)msdu->data; 2103 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 2104 2105 if (frag && !fill_crypt_header && 2106 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) 2107 status->flag &= ~RX_FLAG_IV_STRIPPED & 2108 ~RX_FLAG_MMIC_STRIPPED; 2109 } 2110 } 2111 2112 static void ath10k_htt_rx_h_enqueue(struct ath10k *ar, 2113 struct sk_buff_head *amsdu, 2114 struct ieee80211_rx_status *status) 2115 { 2116 struct sk_buff *msdu; 2117 struct sk_buff *first_subframe; 2118 2119 first_subframe = skb_peek(amsdu); 2120 2121 while ((msdu = __skb_dequeue(amsdu))) { 2122 /* Setup per-MSDU flags */ 2123 if (skb_queue_empty(amsdu)) 2124 status->flag &= ~RX_FLAG_AMSDU_MORE; 2125 else 2126 status->flag |= RX_FLAG_AMSDU_MORE; 2127 2128 if (msdu == first_subframe) { 2129 first_subframe = NULL; 2130 status->flag &= ~RX_FLAG_ALLOW_SAME_PN; 2131 } else { 2132 status->flag |= RX_FLAG_ALLOW_SAME_PN; 2133 } 2134 2135 ath10k_htt_rx_h_queue_msdu(ar, status, msdu); 2136 } 2137 } 2138 2139 static int ath10k_unchain_msdu(struct sk_buff_head *amsdu, 2140 unsigned long *unchain_cnt) 2141 { 2142 struct sk_buff *skb, *first; 2143 int space; 2144 int total_len = 0; 2145 int amsdu_len = skb_queue_len(amsdu); 2146 2147 /* TODO: Might could optimize this by using 2148 * skb_try_coalesce or similar method to 2149 * decrease copying, or maybe get mac80211 to 2150 * provide a way to just receive a list of 2151 * skb? 2152 */ 2153 2154 first = __skb_dequeue(amsdu); 2155 2156 /* Allocate total length all at once. */ 2157 skb_queue_walk(amsdu, skb) 2158 total_len += skb->len; 2159 2160 space = total_len - skb_tailroom(first); 2161 if ((space > 0) && 2162 (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) { 2163 /* TODO: bump some rx-oom error stat */ 2164 /* put it back together so we can free the 2165 * whole list at once. 2166 */ 2167 __skb_queue_head(amsdu, first); 2168 return -1; 2169 } 2170 2171 /* Walk list again, copying contents into 2172 * msdu_head 2173 */ 2174 while ((skb = __skb_dequeue(amsdu))) { 2175 skb_copy_from_linear_data(skb, skb_put(first, skb->len), 2176 skb->len); 2177 dev_kfree_skb_any(skb); 2178 } 2179 2180 __skb_queue_head(amsdu, first); 2181 2182 *unchain_cnt += amsdu_len - 1; 2183 2184 return 0; 2185 } 2186 2187 static void ath10k_htt_rx_h_unchain(struct ath10k *ar, 2188 struct sk_buff_head *amsdu, 2189 unsigned long *drop_cnt, 2190 unsigned long *unchain_cnt) 2191 { 2192 struct sk_buff *first; 2193 struct ath10k_hw_params *hw = &ar->hw_params; 2194 struct htt_rx_desc *rxd; 2195 struct rx_msdu_start_common *rxd_msdu_start_common; 2196 struct rx_frag_info_common *rxd_frag_info; 2197 enum rx_msdu_decap_format decap; 2198 2199 first = skb_peek(amsdu); 2200 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 2201 (void *)first->data - hw->rx_desc_ops->rx_desc_size); 2202 2203 rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd); 2204 rxd_frag_info = ath10k_htt_rx_desc_get_frag_info(hw, rxd); 2205 decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1), 2206 RX_MSDU_START_INFO1_DECAP_FORMAT); 2207 2208 /* FIXME: Current unchaining logic can only handle simple case of raw 2209 * msdu chaining. If decapping is other than raw the chaining may be 2210 * more complex and this isn't handled by the current code. Don't even 2211 * try re-constructing such frames - it'll be pretty much garbage. 2212 */ 2213 if (decap != RX_MSDU_DECAP_RAW || 2214 skb_queue_len(amsdu) != 1 + rxd_frag_info->ring2_more_count) { 2215 *drop_cnt += skb_queue_len(amsdu); 2216 __skb_queue_purge(amsdu); 2217 return; 2218 } 2219 2220 ath10k_unchain_msdu(amsdu, unchain_cnt); 2221 } 2222 2223 static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar, 2224 struct sk_buff_head *amsdu) 2225 { 2226 u8 *subframe_hdr; 2227 struct sk_buff *first; 2228 bool is_first, is_last; 2229 struct ath10k_hw_params *hw = &ar->hw_params; 2230 struct htt_rx_desc *rxd; 2231 struct rx_msdu_end_common *rxd_msdu_end_common; 2232 struct rx_mpdu_start *rxd_mpdu_start; 2233 struct ieee80211_hdr *hdr; 2234 size_t hdr_len, crypto_len; 2235 enum htt_rx_mpdu_encrypt_type enctype; 2236 int bytes_aligned = ar->hw_params.decap_align_bytes; 2237 2238 first = skb_peek(amsdu); 2239 2240 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 2241 (void *)first->data - hw->rx_desc_ops->rx_desc_size); 2242 2243 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd); 2244 rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd); 2245 hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd); 2246 2247 is_first = !!(rxd_msdu_end_common->info0 & 2248 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); 2249 is_last = !!(rxd_msdu_end_common->info0 & 2250 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); 2251 2252 /* Return in case of non-aggregated msdu */ 2253 if (is_first && is_last) 2254 return true; 2255 2256 /* First msdu flag is not set for the first msdu of the list */ 2257 if (!is_first) 2258 return false; 2259 2260 enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0), 2261 RX_MPDU_START_INFO0_ENCRYPT_TYPE); 2262 2263 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2264 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); 2265 2266 subframe_hdr = (u8 *)hdr + round_up(hdr_len, bytes_aligned) + 2267 crypto_len; 2268 2269 /* Validate if the amsdu has a proper first subframe. 2270 * There are chances a single msdu can be received as amsdu when 2271 * the unauthenticated amsdu flag of a QoS header 2272 * gets flipped in non-SPP AMSDU's, in such cases the first 2273 * subframe has llc/snap header in place of a valid da. 2274 * return false if the da matches rfc1042 pattern 2275 */ 2276 if (ether_addr_equal(subframe_hdr, rfc1042_header)) 2277 return false; 2278 2279 return true; 2280 } 2281 2282 static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar, 2283 struct sk_buff_head *amsdu, 2284 struct ieee80211_rx_status *rx_status) 2285 { 2286 if (!rx_status->freq) { 2287 ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n"); 2288 return false; 2289 } 2290 2291 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) { 2292 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n"); 2293 return false; 2294 } 2295 2296 if (!ath10k_htt_rx_validate_amsdu(ar, amsdu)) { 2297 ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid amsdu received\n"); 2298 return false; 2299 } 2300 2301 return true; 2302 } 2303 2304 static void ath10k_htt_rx_h_filter(struct ath10k *ar, 2305 struct sk_buff_head *amsdu, 2306 struct ieee80211_rx_status *rx_status, 2307 unsigned long *drop_cnt) 2308 { 2309 if (skb_queue_empty(amsdu)) 2310 return; 2311 2312 if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status)) 2313 return; 2314 2315 if (drop_cnt) 2316 *drop_cnt += skb_queue_len(amsdu); 2317 2318 __skb_queue_purge(amsdu); 2319 } 2320 2321 static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt) 2322 { 2323 struct ath10k *ar = htt->ar; 2324 struct ieee80211_rx_status *rx_status = &htt->rx_status; 2325 struct sk_buff_head amsdu; 2326 int ret; 2327 unsigned long drop_cnt = 0; 2328 unsigned long unchain_cnt = 0; 2329 unsigned long drop_cnt_filter = 0; 2330 unsigned long msdus_to_queue, num_msdus; 2331 enum ath10k_pkt_rx_err err = ATH10K_PKT_RX_ERR_MAX; 2332 u8 first_hdr[RX_HTT_HDR_STATUS_LEN]; 2333 2334 __skb_queue_head_init(&amsdu); 2335 2336 spin_lock_bh(&htt->rx_ring.lock); 2337 if (htt->rx_confused) { 2338 spin_unlock_bh(&htt->rx_ring.lock); 2339 return -EIO; 2340 } 2341 ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu); 2342 spin_unlock_bh(&htt->rx_ring.lock); 2343 2344 if (ret < 0) { 2345 ath10k_warn(ar, "rx ring became corrupted: %d\n", ret); 2346 __skb_queue_purge(&amsdu); 2347 /* FIXME: It's probably a good idea to reboot the 2348 * device instead of leaving it inoperable. 2349 */ 2350 htt->rx_confused = true; 2351 return ret; 2352 } 2353 2354 num_msdus = skb_queue_len(&amsdu); 2355 2356 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff); 2357 2358 /* only for ret = 1 indicates chained msdus */ 2359 if (ret > 0) 2360 ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt); 2361 2362 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter); 2363 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err, 0, 2364 false); 2365 msdus_to_queue = skb_queue_len(&amsdu); 2366 ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status); 2367 2368 ath10k_sta_update_rx_tid_stats(ar, first_hdr, num_msdus, err, 2369 unchain_cnt, drop_cnt, drop_cnt_filter, 2370 msdus_to_queue); 2371 2372 return 0; 2373 } 2374 2375 static void ath10k_htt_rx_mpdu_desc_pn_hl(struct htt_hl_rx_desc *rx_desc, 2376 union htt_rx_pn_t *pn, 2377 int pn_len_bits) 2378 { 2379 switch (pn_len_bits) { 2380 case 48: 2381 pn->pn48 = __le32_to_cpu(rx_desc->pn_31_0) + 2382 ((u64)(__le32_to_cpu(rx_desc->u0.pn_63_32) & 0xFFFF) << 32); 2383 break; 2384 case 24: 2385 pn->pn24 = __le32_to_cpu(rx_desc->pn_31_0); 2386 break; 2387 } 2388 } 2389 2390 static bool ath10k_htt_rx_pn_cmp48(union htt_rx_pn_t *new_pn, 2391 union htt_rx_pn_t *old_pn) 2392 { 2393 return ((new_pn->pn48 & 0xffffffffffffULL) <= 2394 (old_pn->pn48 & 0xffffffffffffULL)); 2395 } 2396 2397 static bool ath10k_htt_rx_pn_check_replay_hl(struct ath10k *ar, 2398 struct ath10k_peer *peer, 2399 struct htt_rx_indication_hl *rx) 2400 { 2401 bool last_pn_valid, pn_invalid = false; 2402 enum htt_txrx_sec_cast_type sec_index; 2403 enum htt_security_types sec_type; 2404 union htt_rx_pn_t new_pn = {}; 2405 struct htt_hl_rx_desc *rx_desc; 2406 union htt_rx_pn_t *last_pn; 2407 u32 rx_desc_info, tid; 2408 int num_mpdu_ranges; 2409 2410 lockdep_assert_held(&ar->data_lock); 2411 2412 if (!peer) 2413 return false; 2414 2415 if (!(rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU)) 2416 return false; 2417 2418 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), 2419 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); 2420 2421 rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges]; 2422 rx_desc_info = __le32_to_cpu(rx_desc->info); 2423 2424 if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED)) 2425 return false; 2426 2427 tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID); 2428 last_pn_valid = peer->tids_last_pn_valid[tid]; 2429 last_pn = &peer->tids_last_pn[tid]; 2430 2431 if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST)) 2432 sec_index = HTT_TXRX_SEC_MCAST; 2433 else 2434 sec_index = HTT_TXRX_SEC_UCAST; 2435 2436 sec_type = peer->rx_pn[sec_index].sec_type; 2437 ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len); 2438 2439 if (sec_type != HTT_SECURITY_AES_CCMP && 2440 sec_type != HTT_SECURITY_TKIP && 2441 sec_type != HTT_SECURITY_TKIP_NOMIC) 2442 return false; 2443 2444 if (last_pn_valid) 2445 pn_invalid = ath10k_htt_rx_pn_cmp48(&new_pn, last_pn); 2446 else 2447 peer->tids_last_pn_valid[tid] = true; 2448 2449 if (!pn_invalid) 2450 last_pn->pn48 = new_pn.pn48; 2451 2452 return pn_invalid; 2453 } 2454 2455 static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt, 2456 struct htt_rx_indication_hl *rx, 2457 struct sk_buff *skb, 2458 enum htt_rx_pn_check_type check_pn_type, 2459 enum htt_rx_tkip_demic_type tkip_mic_type) 2460 { 2461 struct ath10k *ar = htt->ar; 2462 struct ath10k_peer *peer; 2463 struct htt_rx_indication_mpdu_range *mpdu_ranges; 2464 struct fw_rx_desc_hl *fw_desc; 2465 enum htt_txrx_sec_cast_type sec_index; 2466 enum htt_security_types sec_type; 2467 union htt_rx_pn_t new_pn = {}; 2468 struct htt_hl_rx_desc *rx_desc; 2469 struct ieee80211_hdr *hdr; 2470 struct ieee80211_rx_status *rx_status; 2471 u16 peer_id; 2472 u8 rx_desc_len; 2473 int num_mpdu_ranges; 2474 size_t tot_hdr_len; 2475 struct ieee80211_channel *ch; 2476 bool pn_invalid, qos, first_msdu; 2477 u32 tid, rx_desc_info; 2478 2479 peer_id = __le16_to_cpu(rx->hdr.peer_id); 2480 tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID); 2481 2482 spin_lock_bh(&ar->data_lock); 2483 peer = ath10k_peer_find_by_id(ar, peer_id); 2484 spin_unlock_bh(&ar->data_lock); 2485 if (!peer && peer_id != HTT_INVALID_PEERID) 2486 ath10k_warn(ar, "Got RX ind from invalid peer: %u\n", peer_id); 2487 2488 if (!peer) 2489 return true; 2490 2491 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), 2492 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); 2493 mpdu_ranges = htt_rx_ind_get_mpdu_ranges_hl(rx); 2494 fw_desc = &rx->fw_desc; 2495 rx_desc_len = fw_desc->len; 2496 2497 if (fw_desc->u.bits.discard) { 2498 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt discard mpdu\n"); 2499 goto err; 2500 } 2501 2502 /* I have not yet seen any case where num_mpdu_ranges > 1. 2503 * qcacld does not seem handle that case either, so we introduce the 2504 * same limitation here as well. 2505 */ 2506 if (num_mpdu_ranges > 1) 2507 ath10k_warn(ar, 2508 "Unsupported number of MPDU ranges: %d, ignoring all but the first\n", 2509 num_mpdu_ranges); 2510 2511 if (mpdu_ranges->mpdu_range_status != 2512 HTT_RX_IND_MPDU_STATUS_OK && 2513 mpdu_ranges->mpdu_range_status != 2514 HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR) { 2515 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt mpdu_range_status %d\n", 2516 mpdu_ranges->mpdu_range_status); 2517 goto err; 2518 } 2519 2520 rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges]; 2521 rx_desc_info = __le32_to_cpu(rx_desc->info); 2522 2523 if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST)) 2524 sec_index = HTT_TXRX_SEC_MCAST; 2525 else 2526 sec_index = HTT_TXRX_SEC_UCAST; 2527 2528 sec_type = peer->rx_pn[sec_index].sec_type; 2529 first_msdu = rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU; 2530 2531 ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len); 2532 2533 if (check_pn_type == HTT_RX_PN_CHECK && tid >= IEEE80211_NUM_TIDS) { 2534 spin_lock_bh(&ar->data_lock); 2535 pn_invalid = ath10k_htt_rx_pn_check_replay_hl(ar, peer, rx); 2536 spin_unlock_bh(&ar->data_lock); 2537 2538 if (pn_invalid) 2539 goto err; 2540 } 2541 2542 /* Strip off all headers before the MAC header before delivery to 2543 * mac80211 2544 */ 2545 tot_hdr_len = sizeof(struct htt_resp_hdr) + sizeof(rx->hdr) + 2546 sizeof(rx->ppdu) + sizeof(rx->prefix) + 2547 sizeof(rx->fw_desc) + 2548 sizeof(*mpdu_ranges) * num_mpdu_ranges + rx_desc_len; 2549 2550 skb_pull(skb, tot_hdr_len); 2551 2552 hdr = (struct ieee80211_hdr *)skb->data; 2553 qos = ieee80211_is_data_qos(hdr->frame_control); 2554 2555 rx_status = IEEE80211_SKB_RXCB(skb); 2556 memset(rx_status, 0, sizeof(*rx_status)); 2557 2558 if (rx->ppdu.combined_rssi == 0) { 2559 /* SDIO firmware does not provide signal */ 2560 rx_status->signal = 0; 2561 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 2562 } else { 2563 rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR + 2564 rx->ppdu.combined_rssi; 2565 rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL; 2566 } 2567 2568 spin_lock_bh(&ar->data_lock); 2569 ch = ar->scan_channel; 2570 if (!ch) 2571 ch = ar->rx_channel; 2572 if (!ch) 2573 ch = ath10k_htt_rx_h_any_channel(ar); 2574 if (!ch) 2575 ch = ar->tgt_oper_chan; 2576 spin_unlock_bh(&ar->data_lock); 2577 2578 if (ch) { 2579 rx_status->band = ch->band; 2580 rx_status->freq = ch->center_freq; 2581 } 2582 if (rx->fw_desc.flags & FW_RX_DESC_FLAGS_LAST_MSDU) 2583 rx_status->flag &= ~RX_FLAG_AMSDU_MORE; 2584 else 2585 rx_status->flag |= RX_FLAG_AMSDU_MORE; 2586 2587 /* Not entirely sure about this, but all frames from the chipset has 2588 * the protected flag set even though they have already been decrypted. 2589 * Unmasking this flag is necessary in order for mac80211 not to drop 2590 * the frame. 2591 * TODO: Verify this is always the case or find out a way to check 2592 * if there has been hw decryption. 2593 */ 2594 if (ieee80211_has_protected(hdr->frame_control)) { 2595 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 2596 rx_status->flag |= RX_FLAG_DECRYPTED | 2597 RX_FLAG_IV_STRIPPED | 2598 RX_FLAG_MMIC_STRIPPED; 2599 2600 if (tid < IEEE80211_NUM_TIDS && 2601 first_msdu && 2602 check_pn_type == HTT_RX_PN_CHECK && 2603 (sec_type == HTT_SECURITY_AES_CCMP || 2604 sec_type == HTT_SECURITY_TKIP || 2605 sec_type == HTT_SECURITY_TKIP_NOMIC)) { 2606 u8 offset, *ivp, i; 2607 s8 keyidx = 0; 2608 __le64 pn48 = cpu_to_le64(new_pn.pn48); 2609 2610 hdr = (struct ieee80211_hdr *)skb->data; 2611 offset = ieee80211_hdrlen(hdr->frame_control); 2612 hdr->frame_control |= __cpu_to_le16(IEEE80211_FCTL_PROTECTED); 2613 rx_status->flag &= ~RX_FLAG_IV_STRIPPED; 2614 2615 memmove(skb->data - IEEE80211_CCMP_HDR_LEN, 2616 skb->data, offset); 2617 skb_push(skb, IEEE80211_CCMP_HDR_LEN); 2618 ivp = skb->data + offset; 2619 memset(skb->data + offset, 0, IEEE80211_CCMP_HDR_LEN); 2620 /* Ext IV */ 2621 ivp[IEEE80211_WEP_IV_LEN - 1] |= ATH10K_IEEE80211_EXTIV; 2622 2623 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 2624 if (peer->keys[i] && 2625 peer->keys[i]->flags & IEEE80211_KEY_FLAG_PAIRWISE) 2626 keyidx = peer->keys[i]->keyidx; 2627 } 2628 2629 /* Key ID */ 2630 ivp[IEEE80211_WEP_IV_LEN - 1] |= keyidx << 6; 2631 2632 if (sec_type == HTT_SECURITY_AES_CCMP) { 2633 rx_status->flag |= RX_FLAG_MIC_STRIPPED; 2634 /* pn 0, pn 1 */ 2635 memcpy(skb->data + offset, &pn48, 2); 2636 /* pn 1, pn 3 , pn 34 , pn 5 */ 2637 memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4); 2638 } else { 2639 rx_status->flag |= RX_FLAG_ICV_STRIPPED; 2640 /* TSC 0 */ 2641 memcpy(skb->data + offset + 2, &pn48, 1); 2642 /* TSC 1 */ 2643 memcpy(skb->data + offset, ((u8 *)&pn48) + 1, 1); 2644 /* TSC 2 , TSC 3 , TSC 4 , TSC 5*/ 2645 memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4); 2646 } 2647 } 2648 } 2649 2650 if (tkip_mic_type == HTT_RX_TKIP_MIC) 2651 rx_status->flag &= ~RX_FLAG_IV_STRIPPED & 2652 ~RX_FLAG_MMIC_STRIPPED; 2653 2654 if (mpdu_ranges->mpdu_range_status == HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR) 2655 rx_status->flag |= RX_FLAG_MMIC_ERROR; 2656 2657 if (!qos && tid < IEEE80211_NUM_TIDS) { 2658 u8 offset; 2659 __le16 qos_ctrl = 0; 2660 2661 hdr = (struct ieee80211_hdr *)skb->data; 2662 offset = ieee80211_hdrlen(hdr->frame_control); 2663 2664 hdr->frame_control |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 2665 memmove(skb->data - IEEE80211_QOS_CTL_LEN, skb->data, offset); 2666 skb_push(skb, IEEE80211_QOS_CTL_LEN); 2667 qos_ctrl = cpu_to_le16(tid); 2668 memcpy(skb->data + offset, &qos_ctrl, IEEE80211_QOS_CTL_LEN); 2669 } 2670 2671 if (ar->napi.dev) 2672 ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi); 2673 else 2674 ieee80211_rx_ni(ar->hw, skb); 2675 2676 /* We have delivered the skb to the upper layers (mac80211) so we 2677 * must not free it. 2678 */ 2679 return false; 2680 err: 2681 /* Tell the caller that it must free the skb since we have not 2682 * consumed it 2683 */ 2684 return true; 2685 } 2686 2687 static int ath10k_htt_rx_frag_tkip_decap_nomic(struct sk_buff *skb, 2688 u16 head_len, 2689 u16 hdr_len) 2690 { 2691 u8 *ivp, *orig_hdr; 2692 2693 orig_hdr = skb->data; 2694 ivp = orig_hdr + hdr_len + head_len; 2695 2696 /* the ExtIV bit is always set to 1 for TKIP */ 2697 if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV)) 2698 return -EINVAL; 2699 2700 memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len); 2701 skb_pull(skb, IEEE80211_TKIP_IV_LEN); 2702 skb_trim(skb, skb->len - ATH10K_IEEE80211_TKIP_MICLEN); 2703 return 0; 2704 } 2705 2706 static int ath10k_htt_rx_frag_tkip_decap_withmic(struct sk_buff *skb, 2707 u16 head_len, 2708 u16 hdr_len) 2709 { 2710 u8 *ivp, *orig_hdr; 2711 2712 orig_hdr = skb->data; 2713 ivp = orig_hdr + hdr_len + head_len; 2714 2715 /* the ExtIV bit is always set to 1 for TKIP */ 2716 if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV)) 2717 return -EINVAL; 2718 2719 memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len); 2720 skb_pull(skb, IEEE80211_TKIP_IV_LEN); 2721 skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN); 2722 return 0; 2723 } 2724 2725 static int ath10k_htt_rx_frag_ccmp_decap(struct sk_buff *skb, 2726 u16 head_len, 2727 u16 hdr_len) 2728 { 2729 u8 *ivp, *orig_hdr; 2730 2731 orig_hdr = skb->data; 2732 ivp = orig_hdr + hdr_len + head_len; 2733 2734 /* the ExtIV bit is always set to 1 for CCMP */ 2735 if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV)) 2736 return -EINVAL; 2737 2738 skb_trim(skb, skb->len - IEEE80211_CCMP_MIC_LEN); 2739 memmove(orig_hdr + IEEE80211_CCMP_HDR_LEN, orig_hdr, head_len + hdr_len); 2740 skb_pull(skb, IEEE80211_CCMP_HDR_LEN); 2741 return 0; 2742 } 2743 2744 static int ath10k_htt_rx_frag_wep_decap(struct sk_buff *skb, 2745 u16 head_len, 2746 u16 hdr_len) 2747 { 2748 u8 *orig_hdr; 2749 2750 orig_hdr = skb->data; 2751 2752 memmove(orig_hdr + IEEE80211_WEP_IV_LEN, 2753 orig_hdr, head_len + hdr_len); 2754 skb_pull(skb, IEEE80211_WEP_IV_LEN); 2755 skb_trim(skb, skb->len - IEEE80211_WEP_ICV_LEN); 2756 return 0; 2757 } 2758 2759 static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt, 2760 struct htt_rx_fragment_indication *rx, 2761 struct sk_buff *skb) 2762 { 2763 struct ath10k *ar = htt->ar; 2764 enum htt_rx_tkip_demic_type tkip_mic = HTT_RX_NON_TKIP_MIC; 2765 enum htt_txrx_sec_cast_type sec_index; 2766 struct htt_rx_indication_hl *rx_hl; 2767 enum htt_security_types sec_type; 2768 u32 tid, frag, seq, rx_desc_info; 2769 union htt_rx_pn_t new_pn = {}; 2770 struct htt_hl_rx_desc *rx_desc; 2771 u16 peer_id, sc, hdr_space; 2772 union htt_rx_pn_t *last_pn; 2773 struct ieee80211_hdr *hdr; 2774 int ret, num_mpdu_ranges; 2775 struct ath10k_peer *peer; 2776 struct htt_resp *resp; 2777 size_t tot_hdr_len; 2778 2779 resp = (struct htt_resp *)(skb->data + HTT_RX_FRAG_IND_INFO0_HEADER_LEN); 2780 skb_pull(skb, HTT_RX_FRAG_IND_INFO0_HEADER_LEN); 2781 skb_trim(skb, skb->len - FCS_LEN); 2782 2783 peer_id = __le16_to_cpu(rx->peer_id); 2784 rx_hl = (struct htt_rx_indication_hl *)(&resp->rx_ind_hl); 2785 2786 spin_lock_bh(&ar->data_lock); 2787 peer = ath10k_peer_find_by_id(ar, peer_id); 2788 if (!peer) { 2789 ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer: %u\n", peer_id); 2790 goto err; 2791 } 2792 2793 num_mpdu_ranges = MS(__le32_to_cpu(rx_hl->hdr.info1), 2794 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); 2795 2796 tot_hdr_len = sizeof(struct htt_resp_hdr) + 2797 sizeof(rx_hl->hdr) + 2798 sizeof(rx_hl->ppdu) + 2799 sizeof(rx_hl->prefix) + 2800 sizeof(rx_hl->fw_desc) + 2801 sizeof(struct htt_rx_indication_mpdu_range) * num_mpdu_ranges; 2802 2803 tid = MS(rx_hl->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID); 2804 rx_desc = (struct htt_hl_rx_desc *)(skb->data + tot_hdr_len); 2805 rx_desc_info = __le32_to_cpu(rx_desc->info); 2806 2807 hdr = (struct ieee80211_hdr *)((u8 *)rx_desc + rx_hl->fw_desc.len); 2808 2809 if (is_multicast_ether_addr(hdr->addr1)) { 2810 /* Discard the fragment with multicast DA */ 2811 goto err; 2812 } 2813 2814 if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED)) { 2815 spin_unlock_bh(&ar->data_lock); 2816 return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb, 2817 HTT_RX_NON_PN_CHECK, 2818 HTT_RX_NON_TKIP_MIC); 2819 } 2820 2821 if (ieee80211_has_retry(hdr->frame_control)) 2822 goto err; 2823 2824 hdr_space = ieee80211_hdrlen(hdr->frame_control); 2825 sc = __le16_to_cpu(hdr->seq_ctrl); 2826 seq = IEEE80211_SEQ_TO_SN(sc); 2827 frag = sc & IEEE80211_SCTL_FRAG; 2828 2829 sec_index = MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST) ? 2830 HTT_TXRX_SEC_MCAST : HTT_TXRX_SEC_UCAST; 2831 sec_type = peer->rx_pn[sec_index].sec_type; 2832 ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len); 2833 2834 switch (sec_type) { 2835 case HTT_SECURITY_TKIP: 2836 tkip_mic = HTT_RX_TKIP_MIC; 2837 ret = ath10k_htt_rx_frag_tkip_decap_withmic(skb, 2838 tot_hdr_len + 2839 rx_hl->fw_desc.len, 2840 hdr_space); 2841 if (ret) 2842 goto err; 2843 break; 2844 case HTT_SECURITY_TKIP_NOMIC: 2845 ret = ath10k_htt_rx_frag_tkip_decap_nomic(skb, 2846 tot_hdr_len + 2847 rx_hl->fw_desc.len, 2848 hdr_space); 2849 if (ret) 2850 goto err; 2851 break; 2852 case HTT_SECURITY_AES_CCMP: 2853 ret = ath10k_htt_rx_frag_ccmp_decap(skb, 2854 tot_hdr_len + rx_hl->fw_desc.len, 2855 hdr_space); 2856 if (ret) 2857 goto err; 2858 break; 2859 case HTT_SECURITY_WEP128: 2860 case HTT_SECURITY_WEP104: 2861 case HTT_SECURITY_WEP40: 2862 ret = ath10k_htt_rx_frag_wep_decap(skb, 2863 tot_hdr_len + rx_hl->fw_desc.len, 2864 hdr_space); 2865 if (ret) 2866 goto err; 2867 break; 2868 default: 2869 break; 2870 } 2871 2872 resp = (struct htt_resp *)(skb->data); 2873 2874 if (sec_type != HTT_SECURITY_AES_CCMP && 2875 sec_type != HTT_SECURITY_TKIP && 2876 sec_type != HTT_SECURITY_TKIP_NOMIC) { 2877 spin_unlock_bh(&ar->data_lock); 2878 return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb, 2879 HTT_RX_NON_PN_CHECK, 2880 HTT_RX_NON_TKIP_MIC); 2881 } 2882 2883 last_pn = &peer->frag_tids_last_pn[tid]; 2884 2885 if (frag == 0) { 2886 if (ath10k_htt_rx_pn_check_replay_hl(ar, peer, &resp->rx_ind_hl)) 2887 goto err; 2888 2889 last_pn->pn48 = new_pn.pn48; 2890 peer->frag_tids_seq[tid] = seq; 2891 } else if (sec_type == HTT_SECURITY_AES_CCMP) { 2892 if (seq != peer->frag_tids_seq[tid]) 2893 goto err; 2894 2895 if (new_pn.pn48 != last_pn->pn48 + 1) 2896 goto err; 2897 2898 last_pn->pn48 = new_pn.pn48; 2899 last_pn = &peer->tids_last_pn[tid]; 2900 last_pn->pn48 = new_pn.pn48; 2901 } 2902 2903 spin_unlock_bh(&ar->data_lock); 2904 2905 return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb, 2906 HTT_RX_NON_PN_CHECK, tkip_mic); 2907 2908 err: 2909 spin_unlock_bh(&ar->data_lock); 2910 2911 /* Tell the caller that it must free the skb since we have not 2912 * consumed it 2913 */ 2914 return true; 2915 } 2916 2917 static void ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt *htt, 2918 struct htt_rx_indication *rx) 2919 { 2920 struct ath10k *ar = htt->ar; 2921 struct htt_rx_indication_mpdu_range *mpdu_ranges; 2922 int num_mpdu_ranges; 2923 int i, mpdu_count = 0; 2924 u16 peer_id; 2925 u8 tid; 2926 2927 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), 2928 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); 2929 peer_id = __le16_to_cpu(rx->hdr.peer_id); 2930 tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID); 2931 2932 mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx); 2933 2934 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ", 2935 rx, struct_size(rx, mpdu_ranges, num_mpdu_ranges)); 2936 2937 for (i = 0; i < num_mpdu_ranges; i++) 2938 mpdu_count += mpdu_ranges[i].mpdu_count; 2939 2940 atomic_add(mpdu_count, &htt->num_mpdus_ready); 2941 2942 ath10k_sta_update_rx_tid_stats_ampdu(ar, peer_id, tid, mpdu_ranges, 2943 num_mpdu_ranges); 2944 } 2945 2946 static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar, 2947 struct sk_buff *skb) 2948 { 2949 struct ath10k_htt *htt = &ar->htt; 2950 struct htt_resp *resp = (struct htt_resp *)skb->data; 2951 struct htt_tx_done tx_done = {}; 2952 int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS); 2953 __le16 msdu_id, *msdus; 2954 bool rssi_enabled = false; 2955 u8 msdu_count = 0, num_airtime_records, tid; 2956 int i, htt_pad = 0; 2957 struct htt_data_tx_compl_ppdu_dur *ppdu_info; 2958 struct ath10k_peer *peer; 2959 u16 ppdu_info_offset = 0, peer_id; 2960 u32 tx_duration; 2961 2962 switch (status) { 2963 case HTT_DATA_TX_STATUS_NO_ACK: 2964 tx_done.status = HTT_TX_COMPL_STATE_NOACK; 2965 break; 2966 case HTT_DATA_TX_STATUS_OK: 2967 tx_done.status = HTT_TX_COMPL_STATE_ACK; 2968 break; 2969 case HTT_DATA_TX_STATUS_DISCARD: 2970 case HTT_DATA_TX_STATUS_POSTPONE: 2971 tx_done.status = HTT_TX_COMPL_STATE_DISCARD; 2972 break; 2973 default: 2974 ath10k_warn(ar, "unhandled tx completion status %d\n", status); 2975 tx_done.status = HTT_TX_COMPL_STATE_DISCARD; 2976 break; 2977 } 2978 2979 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n", 2980 resp->data_tx_completion.num_msdus); 2981 2982 msdu_count = resp->data_tx_completion.num_msdus; 2983 msdus = resp->data_tx_completion.msdus; 2984 rssi_enabled = ath10k_is_rssi_enable(&ar->hw_params, resp); 2985 2986 if (rssi_enabled) 2987 htt_pad = ath10k_tx_data_rssi_get_pad_bytes(&ar->hw_params, 2988 resp); 2989 2990 for (i = 0; i < msdu_count; i++) { 2991 msdu_id = msdus[i]; 2992 tx_done.msdu_id = __le16_to_cpu(msdu_id); 2993 2994 if (rssi_enabled) { 2995 /* Total no of MSDUs should be even, 2996 * if odd MSDUs are sent firmware fills 2997 * last msdu id with 0xffff 2998 */ 2999 if (msdu_count & 0x01) { 3000 msdu_id = msdus[msdu_count + i + 1 + htt_pad]; 3001 tx_done.ack_rssi = __le16_to_cpu(msdu_id); 3002 } else { 3003 msdu_id = msdus[msdu_count + i + htt_pad]; 3004 tx_done.ack_rssi = __le16_to_cpu(msdu_id); 3005 } 3006 } 3007 3008 /* kfifo_put: In practice firmware shouldn't fire off per-CE 3009 * interrupt and main interrupt (MSI/-X range case) for the same 3010 * HTC service so it should be safe to use kfifo_put w/o lock. 3011 * 3012 * From kfifo_put() documentation: 3013 * Note that with only one concurrent reader and one concurrent 3014 * writer, you don't need extra locking to use these macro. 3015 */ 3016 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) { 3017 ath10k_txrx_tx_unref(htt, &tx_done); 3018 } else if (!kfifo_put(&htt->txdone_fifo, tx_done)) { 3019 ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n", 3020 tx_done.msdu_id, tx_done.status); 3021 ath10k_txrx_tx_unref(htt, &tx_done); 3022 } 3023 } 3024 3025 if (!(resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT)) 3026 return; 3027 3028 ppdu_info_offset = (msdu_count & 0x01) ? msdu_count + 1 : msdu_count; 3029 3030 if (rssi_enabled) 3031 ppdu_info_offset += ppdu_info_offset; 3032 3033 if (resp->data_tx_completion.flags2 & 3034 (HTT_TX_CMPL_FLAG_PPID_PRESENT | HTT_TX_CMPL_FLAG_PA_PRESENT)) 3035 ppdu_info_offset += 2; 3036 3037 ppdu_info = (struct htt_data_tx_compl_ppdu_dur *)&msdus[ppdu_info_offset]; 3038 num_airtime_records = FIELD_GET(HTT_TX_COMPL_PPDU_DUR_INFO0_NUM_ENTRIES_MASK, 3039 __le32_to_cpu(ppdu_info->info0)); 3040 3041 for (i = 0; i < num_airtime_records; i++) { 3042 struct htt_data_tx_ppdu_dur *ppdu_dur; 3043 u32 info0; 3044 3045 ppdu_dur = &ppdu_info->ppdu_dur[i]; 3046 info0 = __le32_to_cpu(ppdu_dur->info0); 3047 3048 peer_id = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK, 3049 info0); 3050 rcu_read_lock(); 3051 spin_lock_bh(&ar->data_lock); 3052 3053 peer = ath10k_peer_find_by_id(ar, peer_id); 3054 if (!peer || !peer->sta) { 3055 spin_unlock_bh(&ar->data_lock); 3056 rcu_read_unlock(); 3057 continue; 3058 } 3059 3060 tid = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_TID_MASK, info0) & 3061 IEEE80211_QOS_CTL_TID_MASK; 3062 tx_duration = __le32_to_cpu(ppdu_dur->tx_duration); 3063 3064 ieee80211_sta_register_airtime(peer->sta, tid, tx_duration, 0); 3065 3066 spin_unlock_bh(&ar->data_lock); 3067 rcu_read_unlock(); 3068 } 3069 } 3070 3071 static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp) 3072 { 3073 struct htt_rx_addba *ev = &resp->rx_addba; 3074 struct ath10k_peer *peer; 3075 struct ath10k_vif *arvif; 3076 u16 info0, tid, peer_id; 3077 3078 info0 = __le16_to_cpu(ev->info0); 3079 tid = MS(info0, HTT_RX_BA_INFO0_TID); 3080 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID); 3081 3082 ath10k_dbg(ar, ATH10K_DBG_HTT, 3083 "htt rx addba tid %u peer_id %u size %u\n", 3084 tid, peer_id, ev->window_size); 3085 3086 spin_lock_bh(&ar->data_lock); 3087 peer = ath10k_peer_find_by_id(ar, peer_id); 3088 if (!peer) { 3089 ath10k_warn(ar, "received addba event for invalid peer_id: %u\n", 3090 peer_id); 3091 spin_unlock_bh(&ar->data_lock); 3092 return; 3093 } 3094 3095 arvif = ath10k_get_arvif(ar, peer->vdev_id); 3096 if (!arvif) { 3097 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n", 3098 peer->vdev_id); 3099 spin_unlock_bh(&ar->data_lock); 3100 return; 3101 } 3102 3103 ath10k_dbg(ar, ATH10K_DBG_HTT, 3104 "htt rx start rx ba session sta %pM tid %u size %u\n", 3105 peer->addr, tid, ev->window_size); 3106 3107 ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid); 3108 spin_unlock_bh(&ar->data_lock); 3109 } 3110 3111 static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp) 3112 { 3113 struct htt_rx_delba *ev = &resp->rx_delba; 3114 struct ath10k_peer *peer; 3115 struct ath10k_vif *arvif; 3116 u16 info0, tid, peer_id; 3117 3118 info0 = __le16_to_cpu(ev->info0); 3119 tid = MS(info0, HTT_RX_BA_INFO0_TID); 3120 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID); 3121 3122 ath10k_dbg(ar, ATH10K_DBG_HTT, 3123 "htt rx delba tid %u peer_id %u\n", 3124 tid, peer_id); 3125 3126 spin_lock_bh(&ar->data_lock); 3127 peer = ath10k_peer_find_by_id(ar, peer_id); 3128 if (!peer) { 3129 ath10k_warn(ar, "received addba event for invalid peer_id: %u\n", 3130 peer_id); 3131 spin_unlock_bh(&ar->data_lock); 3132 return; 3133 } 3134 3135 arvif = ath10k_get_arvif(ar, peer->vdev_id); 3136 if (!arvif) { 3137 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n", 3138 peer->vdev_id); 3139 spin_unlock_bh(&ar->data_lock); 3140 return; 3141 } 3142 3143 ath10k_dbg(ar, ATH10K_DBG_HTT, 3144 "htt rx stop rx ba session sta %pM tid %u\n", 3145 peer->addr, tid); 3146 3147 ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid); 3148 spin_unlock_bh(&ar->data_lock); 3149 } 3150 3151 static int ath10k_htt_rx_extract_amsdu(struct ath10k_hw_params *hw, 3152 struct sk_buff_head *list, 3153 struct sk_buff_head *amsdu) 3154 { 3155 struct sk_buff *msdu; 3156 struct htt_rx_desc *rxd; 3157 struct rx_msdu_end_common *rxd_msdu_end_common; 3158 3159 if (skb_queue_empty(list)) 3160 return -ENOBUFS; 3161 3162 if (WARN_ON(!skb_queue_empty(amsdu))) 3163 return -EINVAL; 3164 3165 while ((msdu = __skb_dequeue(list))) { 3166 __skb_queue_tail(amsdu, msdu); 3167 3168 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 3169 (void *)msdu->data - 3170 hw->rx_desc_ops->rx_desc_size); 3171 3172 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd); 3173 if (rxd_msdu_end_common->info0 & 3174 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)) 3175 break; 3176 } 3177 3178 msdu = skb_peek_tail(amsdu); 3179 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 3180 (void *)msdu->data - hw->rx_desc_ops->rx_desc_size); 3181 3182 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd); 3183 if (!(rxd_msdu_end_common->info0 & 3184 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) { 3185 skb_queue_splice_init(amsdu, list); 3186 return -EAGAIN; 3187 } 3188 3189 return 0; 3190 } 3191 3192 static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status, 3193 struct sk_buff *skb) 3194 { 3195 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 3196 3197 if (!ieee80211_has_protected(hdr->frame_control)) 3198 return; 3199 3200 /* Offloaded frames are already decrypted but firmware insists they are 3201 * protected in the 802.11 header. Strip the flag. Otherwise mac80211 3202 * will drop the frame. 3203 */ 3204 3205 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 3206 status->flag |= RX_FLAG_DECRYPTED | 3207 RX_FLAG_IV_STRIPPED | 3208 RX_FLAG_MMIC_STRIPPED; 3209 } 3210 3211 static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar, 3212 struct sk_buff_head *list) 3213 { 3214 struct ath10k_htt *htt = &ar->htt; 3215 struct ieee80211_rx_status *status = &htt->rx_status; 3216 struct htt_rx_offload_msdu *rx; 3217 struct sk_buff *msdu; 3218 size_t offset; 3219 3220 while ((msdu = __skb_dequeue(list))) { 3221 /* Offloaded frames don't have Rx descriptor. Instead they have 3222 * a short meta information header. 3223 */ 3224 3225 rx = (void *)msdu->data; 3226 3227 skb_put(msdu, sizeof(*rx)); 3228 skb_pull(msdu, sizeof(*rx)); 3229 3230 if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) { 3231 ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n"); 3232 dev_kfree_skb_any(msdu); 3233 continue; 3234 } 3235 3236 skb_put(msdu, __le16_to_cpu(rx->msdu_len)); 3237 3238 /* Offloaded rx header length isn't multiple of 2 nor 4 so the 3239 * actual payload is unaligned. Align the frame. Otherwise 3240 * mac80211 complains. This shouldn't reduce performance much 3241 * because these offloaded frames are rare. 3242 */ 3243 offset = 4 - ((unsigned long)msdu->data & 3); 3244 skb_put(msdu, offset); 3245 memmove(msdu->data + offset, msdu->data, msdu->len); 3246 skb_pull(msdu, offset); 3247 3248 /* FIXME: The frame is NWifi. Re-construct QoS Control 3249 * if possible later. 3250 */ 3251 3252 memset(status, 0, sizeof(*status)); 3253 status->flag |= RX_FLAG_NO_SIGNAL_VAL; 3254 3255 ath10k_htt_rx_h_rx_offload_prot(status, msdu); 3256 ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id); 3257 ath10k_htt_rx_h_queue_msdu(ar, status, msdu); 3258 } 3259 } 3260 3261 static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) 3262 { 3263 struct ath10k_htt *htt = &ar->htt; 3264 struct htt_resp *resp = (void *)skb->data; 3265 struct ieee80211_rx_status *status = &htt->rx_status; 3266 struct sk_buff_head list; 3267 struct sk_buff_head amsdu; 3268 u16 peer_id; 3269 u16 msdu_count; 3270 u8 vdev_id; 3271 u8 tid; 3272 bool offload; 3273 bool frag; 3274 int ret; 3275 3276 lockdep_assert_held(&htt->rx_ring.lock); 3277 3278 if (htt->rx_confused) 3279 return -EIO; 3280 3281 skb_pull(skb, sizeof(resp->hdr)); 3282 skb_pull(skb, sizeof(resp->rx_in_ord_ind)); 3283 3284 peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id); 3285 msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count); 3286 vdev_id = resp->rx_in_ord_ind.vdev_id; 3287 tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID); 3288 offload = !!(resp->rx_in_ord_ind.info & 3289 HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK); 3290 frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK); 3291 3292 ath10k_dbg(ar, ATH10K_DBG_HTT, 3293 "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n", 3294 vdev_id, peer_id, tid, offload, frag, msdu_count); 3295 3296 if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) { 3297 ath10k_warn(ar, "dropping invalid in order rx indication\n"); 3298 return -EINVAL; 3299 } 3300 3301 /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later 3302 * extracted and processed. 3303 */ 3304 __skb_queue_head_init(&list); 3305 if (ar->hw_params.target_64bit) 3306 ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind, 3307 &list); 3308 else 3309 ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind, 3310 &list); 3311 3312 if (ret < 0) { 3313 ath10k_warn(ar, "failed to pop paddr list: %d\n", ret); 3314 htt->rx_confused = true; 3315 return -EIO; 3316 } 3317 3318 /* Offloaded frames are very different and need to be handled 3319 * separately. 3320 */ 3321 if (offload) 3322 ath10k_htt_rx_h_rx_offload(ar, &list); 3323 3324 while (!skb_queue_empty(&list)) { 3325 __skb_queue_head_init(&amsdu); 3326 ret = ath10k_htt_rx_extract_amsdu(&ar->hw_params, &list, &amsdu); 3327 switch (ret) { 3328 case 0: 3329 /* Note: The in-order indication may report interleaved 3330 * frames from different PPDUs meaning reported rx rate 3331 * to mac80211 isn't accurate/reliable. It's still 3332 * better to report something than nothing though. This 3333 * should still give an idea about rx rate to the user. 3334 */ 3335 ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id); 3336 ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL); 3337 ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL, 3338 NULL, peer_id, frag); 3339 ath10k_htt_rx_h_enqueue(ar, &amsdu, status); 3340 break; 3341 case -EAGAIN: 3342 fallthrough; 3343 default: 3344 /* Should not happen. */ 3345 ath10k_warn(ar, "failed to extract amsdu: %d\n", ret); 3346 htt->rx_confused = true; 3347 __skb_queue_purge(&list); 3348 return -EIO; 3349 } 3350 } 3351 return ret; 3352 } 3353 3354 static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar, 3355 const __le32 *resp_ids, 3356 int num_resp_ids) 3357 { 3358 int i; 3359 u32 resp_id; 3360 3361 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n", 3362 num_resp_ids); 3363 3364 for (i = 0; i < num_resp_ids; i++) { 3365 resp_id = le32_to_cpu(resp_ids[i]); 3366 3367 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n", 3368 resp_id); 3369 3370 /* TODO: free resp_id */ 3371 } 3372 } 3373 3374 static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb) 3375 { 3376 struct ieee80211_hw *hw = ar->hw; 3377 struct ieee80211_txq *txq; 3378 struct htt_resp *resp = (struct htt_resp *)skb->data; 3379 struct htt_tx_fetch_record *record; 3380 size_t len; 3381 size_t max_num_bytes; 3382 size_t max_num_msdus; 3383 size_t num_bytes; 3384 size_t num_msdus; 3385 const __le32 *resp_ids; 3386 u16 num_records; 3387 u16 num_resp_ids; 3388 u16 peer_id; 3389 u8 tid; 3390 int ret; 3391 int i; 3392 bool may_tx; 3393 3394 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n"); 3395 3396 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind); 3397 if (unlikely(skb->len < len)) { 3398 ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n"); 3399 return; 3400 } 3401 3402 num_records = le16_to_cpu(resp->tx_fetch_ind.num_records); 3403 num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids); 3404 3405 len += sizeof(resp->tx_fetch_ind.records[0]) * num_records; 3406 len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids; 3407 3408 if (unlikely(skb->len < len)) { 3409 ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n"); 3410 return; 3411 } 3412 3413 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %u num resps %u seq %u\n", 3414 num_records, num_resp_ids, 3415 le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num)); 3416 3417 if (!ar->htt.tx_q_state.enabled) { 3418 ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n"); 3419 return; 3420 } 3421 3422 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) { 3423 ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n"); 3424 return; 3425 } 3426 3427 rcu_read_lock(); 3428 3429 for (i = 0; i < num_records; i++) { 3430 record = &resp->tx_fetch_ind.records[i]; 3431 peer_id = MS(le16_to_cpu(record->info), 3432 HTT_TX_FETCH_RECORD_INFO_PEER_ID); 3433 tid = MS(le16_to_cpu(record->info), 3434 HTT_TX_FETCH_RECORD_INFO_TID); 3435 max_num_msdus = le16_to_cpu(record->num_msdus); 3436 max_num_bytes = le32_to_cpu(record->num_bytes); 3437 3438 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %u tid %u msdus %zu bytes %zu\n", 3439 i, peer_id, tid, max_num_msdus, max_num_bytes); 3440 3441 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) || 3442 unlikely(tid >= ar->htt.tx_q_state.num_tids)) { 3443 ath10k_warn(ar, "received out of range peer_id %u tid %u\n", 3444 peer_id, tid); 3445 continue; 3446 } 3447 3448 spin_lock_bh(&ar->data_lock); 3449 txq = ath10k_mac_txq_lookup(ar, peer_id, tid); 3450 spin_unlock_bh(&ar->data_lock); 3451 3452 /* It is okay to release the lock and use txq because RCU read 3453 * lock is held. 3454 */ 3455 3456 if (unlikely(!txq)) { 3457 ath10k_warn(ar, "failed to lookup txq for peer_id %u tid %u\n", 3458 peer_id, tid); 3459 continue; 3460 } 3461 3462 num_msdus = 0; 3463 num_bytes = 0; 3464 3465 ieee80211_txq_schedule_start(hw, txq->ac); 3466 may_tx = ieee80211_txq_may_transmit(hw, txq); 3467 while (num_msdus < max_num_msdus && 3468 num_bytes < max_num_bytes) { 3469 if (!may_tx) 3470 break; 3471 3472 ret = ath10k_mac_tx_push_txq(hw, txq); 3473 if (ret < 0) 3474 break; 3475 3476 num_msdus++; 3477 num_bytes += ret; 3478 } 3479 ieee80211_return_txq(hw, txq, false); 3480 ieee80211_txq_schedule_end(hw, txq->ac); 3481 3482 record->num_msdus = cpu_to_le16(num_msdus); 3483 record->num_bytes = cpu_to_le32(num_bytes); 3484 3485 ath10k_htt_tx_txq_recalc(hw, txq); 3486 } 3487 3488 rcu_read_unlock(); 3489 3490 resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind); 3491 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids); 3492 3493 ret = ath10k_htt_tx_fetch_resp(ar, 3494 resp->tx_fetch_ind.token, 3495 resp->tx_fetch_ind.fetch_seq_num, 3496 resp->tx_fetch_ind.records, 3497 num_records); 3498 if (unlikely(ret)) { 3499 ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n", 3500 le32_to_cpu(resp->tx_fetch_ind.token), ret); 3501 /* FIXME: request fw restart */ 3502 } 3503 3504 ath10k_htt_tx_txq_sync(ar); 3505 } 3506 3507 static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar, 3508 struct sk_buff *skb) 3509 { 3510 const struct htt_resp *resp = (void *)skb->data; 3511 size_t len; 3512 int num_resp_ids; 3513 3514 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n"); 3515 3516 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm); 3517 if (unlikely(skb->len < len)) { 3518 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n"); 3519 return; 3520 } 3521 3522 num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids); 3523 len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids; 3524 3525 if (unlikely(skb->len < len)) { 3526 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n"); 3527 return; 3528 } 3529 3530 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, 3531 resp->tx_fetch_confirm.resp_ids, 3532 num_resp_ids); 3533 } 3534 3535 static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar, 3536 struct sk_buff *skb) 3537 { 3538 const struct htt_resp *resp = (void *)skb->data; 3539 const struct htt_tx_mode_switch_record *record; 3540 struct ieee80211_txq *txq; 3541 struct ath10k_txq *artxq; 3542 size_t len; 3543 size_t num_records; 3544 enum htt_tx_mode_switch_mode mode; 3545 bool enable; 3546 u16 info0; 3547 u16 info1; 3548 u16 threshold; 3549 u16 peer_id; 3550 u8 tid; 3551 int i; 3552 3553 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n"); 3554 3555 len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind); 3556 if (unlikely(skb->len < len)) { 3557 ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n"); 3558 return; 3559 } 3560 3561 info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0); 3562 info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1); 3563 3564 enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE); 3565 num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD); 3566 mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE); 3567 threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD); 3568 3569 ath10k_dbg(ar, ATH10K_DBG_HTT, 3570 "htt rx tx mode switch ind info0 0x%04x info1 0x%04x enable %d num records %zd mode %d threshold %u\n", 3571 info0, info1, enable, num_records, mode, threshold); 3572 3573 len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records; 3574 3575 if (unlikely(skb->len < len)) { 3576 ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n"); 3577 return; 3578 } 3579 3580 switch (mode) { 3581 case HTT_TX_MODE_SWITCH_PUSH: 3582 case HTT_TX_MODE_SWITCH_PUSH_PULL: 3583 break; 3584 default: 3585 ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n", 3586 mode); 3587 return; 3588 } 3589 3590 if (!enable) 3591 return; 3592 3593 ar->htt.tx_q_state.enabled = enable; 3594 ar->htt.tx_q_state.mode = mode; 3595 ar->htt.tx_q_state.num_push_allowed = threshold; 3596 3597 rcu_read_lock(); 3598 3599 for (i = 0; i < num_records; i++) { 3600 record = &resp->tx_mode_switch_ind.records[i]; 3601 info0 = le16_to_cpu(record->info0); 3602 peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID); 3603 tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID); 3604 3605 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) || 3606 unlikely(tid >= ar->htt.tx_q_state.num_tids)) { 3607 ath10k_warn(ar, "received out of range peer_id %u tid %u\n", 3608 peer_id, tid); 3609 continue; 3610 } 3611 3612 spin_lock_bh(&ar->data_lock); 3613 txq = ath10k_mac_txq_lookup(ar, peer_id, tid); 3614 spin_unlock_bh(&ar->data_lock); 3615 3616 /* It is okay to release the lock and use txq because RCU read 3617 * lock is held. 3618 */ 3619 3620 if (unlikely(!txq)) { 3621 ath10k_warn(ar, "failed to lookup txq for peer_id %u tid %u\n", 3622 peer_id, tid); 3623 continue; 3624 } 3625 3626 spin_lock_bh(&ar->htt.tx_lock); 3627 artxq = (void *)txq->drv_priv; 3628 artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus); 3629 spin_unlock_bh(&ar->htt.tx_lock); 3630 } 3631 3632 rcu_read_unlock(); 3633 3634 ath10k_mac_tx_push_pending(ar); 3635 } 3636 3637 void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) 3638 { 3639 bool release; 3640 3641 release = ath10k_htt_t2h_msg_handler(ar, skb); 3642 3643 /* Free the indication buffer */ 3644 if (release) 3645 dev_kfree_skb_any(skb); 3646 } 3647 3648 static inline s8 ath10k_get_legacy_rate_idx(struct ath10k *ar, u8 rate) 3649 { 3650 static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12, 3651 18, 24, 36, 48, 54}; 3652 int i; 3653 3654 for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) { 3655 if (rate == legacy_rates[i]) 3656 return i; 3657 } 3658 3659 ath10k_warn(ar, "Invalid legacy rate %d peer stats", rate); 3660 return -EINVAL; 3661 } 3662 3663 static void 3664 ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar, 3665 struct ath10k_sta *arsta, 3666 struct ath10k_per_peer_tx_stats *pstats, 3667 s8 legacy_rate_idx) 3668 { 3669 struct rate_info *txrate = &arsta->txrate; 3670 struct ath10k_htt_tx_stats *tx_stats; 3671 int idx, ht_idx, gi, mcs, bw, nss; 3672 unsigned long flags; 3673 3674 if (!arsta->tx_stats) 3675 return; 3676 3677 tx_stats = arsta->tx_stats; 3678 flags = txrate->flags; 3679 gi = test_bit(ATH10K_RATE_INFO_FLAGS_SGI_BIT, &flags); 3680 mcs = ATH10K_HW_MCS_RATE(pstats->ratecode); 3681 bw = txrate->bw; 3682 nss = txrate->nss; 3683 ht_idx = mcs + (nss - 1) * 8; 3684 idx = mcs * 8 + 8 * 10 * (nss - 1); 3685 idx += bw * 2 + gi; 3686 3687 #define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name] 3688 3689 if (txrate->flags & RATE_INFO_FLAGS_VHT_MCS) { 3690 STATS_OP_FMT(SUCC).vht[0][mcs] += pstats->succ_bytes; 3691 STATS_OP_FMT(SUCC).vht[1][mcs] += pstats->succ_pkts; 3692 STATS_OP_FMT(FAIL).vht[0][mcs] += pstats->failed_bytes; 3693 STATS_OP_FMT(FAIL).vht[1][mcs] += pstats->failed_pkts; 3694 STATS_OP_FMT(RETRY).vht[0][mcs] += pstats->retry_bytes; 3695 STATS_OP_FMT(RETRY).vht[1][mcs] += pstats->retry_pkts; 3696 } else if (txrate->flags & RATE_INFO_FLAGS_MCS) { 3697 STATS_OP_FMT(SUCC).ht[0][ht_idx] += pstats->succ_bytes; 3698 STATS_OP_FMT(SUCC).ht[1][ht_idx] += pstats->succ_pkts; 3699 STATS_OP_FMT(FAIL).ht[0][ht_idx] += pstats->failed_bytes; 3700 STATS_OP_FMT(FAIL).ht[1][ht_idx] += pstats->failed_pkts; 3701 STATS_OP_FMT(RETRY).ht[0][ht_idx] += pstats->retry_bytes; 3702 STATS_OP_FMT(RETRY).ht[1][ht_idx] += pstats->retry_pkts; 3703 } else { 3704 mcs = legacy_rate_idx; 3705 3706 STATS_OP_FMT(SUCC).legacy[0][mcs] += pstats->succ_bytes; 3707 STATS_OP_FMT(SUCC).legacy[1][mcs] += pstats->succ_pkts; 3708 STATS_OP_FMT(FAIL).legacy[0][mcs] += pstats->failed_bytes; 3709 STATS_OP_FMT(FAIL).legacy[1][mcs] += pstats->failed_pkts; 3710 STATS_OP_FMT(RETRY).legacy[0][mcs] += pstats->retry_bytes; 3711 STATS_OP_FMT(RETRY).legacy[1][mcs] += pstats->retry_pkts; 3712 } 3713 3714 if (ATH10K_HW_AMPDU(pstats->flags)) { 3715 tx_stats->ba_fails += ATH10K_HW_BA_FAIL(pstats->flags); 3716 3717 if (txrate->flags & RATE_INFO_FLAGS_MCS) { 3718 STATS_OP_FMT(AMPDU).ht[0][ht_idx] += 3719 pstats->succ_bytes + pstats->retry_bytes; 3720 STATS_OP_FMT(AMPDU).ht[1][ht_idx] += 3721 pstats->succ_pkts + pstats->retry_pkts; 3722 } else { 3723 STATS_OP_FMT(AMPDU).vht[0][mcs] += 3724 pstats->succ_bytes + pstats->retry_bytes; 3725 STATS_OP_FMT(AMPDU).vht[1][mcs] += 3726 pstats->succ_pkts + pstats->retry_pkts; 3727 } 3728 STATS_OP_FMT(AMPDU).bw[0][bw] += 3729 pstats->succ_bytes + pstats->retry_bytes; 3730 STATS_OP_FMT(AMPDU).nss[0][nss - 1] += 3731 pstats->succ_bytes + pstats->retry_bytes; 3732 STATS_OP_FMT(AMPDU).gi[0][gi] += 3733 pstats->succ_bytes + pstats->retry_bytes; 3734 STATS_OP_FMT(AMPDU).rate_table[0][idx] += 3735 pstats->succ_bytes + pstats->retry_bytes; 3736 STATS_OP_FMT(AMPDU).bw[1][bw] += 3737 pstats->succ_pkts + pstats->retry_pkts; 3738 STATS_OP_FMT(AMPDU).nss[1][nss - 1] += 3739 pstats->succ_pkts + pstats->retry_pkts; 3740 STATS_OP_FMT(AMPDU).gi[1][gi] += 3741 pstats->succ_pkts + pstats->retry_pkts; 3742 STATS_OP_FMT(AMPDU).rate_table[1][idx] += 3743 pstats->succ_pkts + pstats->retry_pkts; 3744 } else { 3745 tx_stats->ack_fails += 3746 ATH10K_HW_BA_FAIL(pstats->flags); 3747 } 3748 3749 STATS_OP_FMT(SUCC).bw[0][bw] += pstats->succ_bytes; 3750 STATS_OP_FMT(SUCC).nss[0][nss - 1] += pstats->succ_bytes; 3751 STATS_OP_FMT(SUCC).gi[0][gi] += pstats->succ_bytes; 3752 3753 STATS_OP_FMT(SUCC).bw[1][bw] += pstats->succ_pkts; 3754 STATS_OP_FMT(SUCC).nss[1][nss - 1] += pstats->succ_pkts; 3755 STATS_OP_FMT(SUCC).gi[1][gi] += pstats->succ_pkts; 3756 3757 STATS_OP_FMT(FAIL).bw[0][bw] += pstats->failed_bytes; 3758 STATS_OP_FMT(FAIL).nss[0][nss - 1] += pstats->failed_bytes; 3759 STATS_OP_FMT(FAIL).gi[0][gi] += pstats->failed_bytes; 3760 3761 STATS_OP_FMT(FAIL).bw[1][bw] += pstats->failed_pkts; 3762 STATS_OP_FMT(FAIL).nss[1][nss - 1] += pstats->failed_pkts; 3763 STATS_OP_FMT(FAIL).gi[1][gi] += pstats->failed_pkts; 3764 3765 STATS_OP_FMT(RETRY).bw[0][bw] += pstats->retry_bytes; 3766 STATS_OP_FMT(RETRY).nss[0][nss - 1] += pstats->retry_bytes; 3767 STATS_OP_FMT(RETRY).gi[0][gi] += pstats->retry_bytes; 3768 3769 STATS_OP_FMT(RETRY).bw[1][bw] += pstats->retry_pkts; 3770 STATS_OP_FMT(RETRY).nss[1][nss - 1] += pstats->retry_pkts; 3771 STATS_OP_FMT(RETRY).gi[1][gi] += pstats->retry_pkts; 3772 3773 if (txrate->flags >= RATE_INFO_FLAGS_MCS) { 3774 STATS_OP_FMT(SUCC).rate_table[0][idx] += pstats->succ_bytes; 3775 STATS_OP_FMT(SUCC).rate_table[1][idx] += pstats->succ_pkts; 3776 STATS_OP_FMT(FAIL).rate_table[0][idx] += pstats->failed_bytes; 3777 STATS_OP_FMT(FAIL).rate_table[1][idx] += pstats->failed_pkts; 3778 STATS_OP_FMT(RETRY).rate_table[0][idx] += pstats->retry_bytes; 3779 STATS_OP_FMT(RETRY).rate_table[1][idx] += pstats->retry_pkts; 3780 } 3781 3782 tx_stats->tx_duration += pstats->duration; 3783 } 3784 3785 static void 3786 ath10k_update_per_peer_tx_stats(struct ath10k *ar, 3787 struct ieee80211_sta *sta, 3788 struct ath10k_per_peer_tx_stats *peer_stats) 3789 { 3790 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 3791 struct ieee80211_chanctx_conf *conf = NULL; 3792 u8 rate = 0, sgi; 3793 s8 rate_idx = 0; 3794 bool skip_auto_rate; 3795 struct rate_info txrate; 3796 3797 lockdep_assert_held(&ar->data_lock); 3798 3799 txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode); 3800 txrate.bw = ATH10K_HW_BW(peer_stats->flags); 3801 txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode); 3802 txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode); 3803 sgi = ATH10K_HW_GI(peer_stats->flags); 3804 skip_auto_rate = ATH10K_FW_SKIPPED_RATE_CTRL(peer_stats->flags); 3805 3806 /* Firmware's rate control skips broadcast/management frames, 3807 * if host has configure fixed rates and in some other special cases. 3808 */ 3809 if (skip_auto_rate) 3810 return; 3811 3812 if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) { 3813 ath10k_warn(ar, "Invalid VHT mcs %d peer stats", txrate.mcs); 3814 return; 3815 } 3816 3817 if (txrate.flags == WMI_RATE_PREAMBLE_HT && 3818 (txrate.mcs > 7 || txrate.nss < 1)) { 3819 ath10k_warn(ar, "Invalid HT mcs %d nss %d peer stats", 3820 txrate.mcs, txrate.nss); 3821 return; 3822 } 3823 3824 memset(&arsta->txrate, 0, sizeof(arsta->txrate)); 3825 memset(&arsta->tx_info.status, 0, sizeof(arsta->tx_info.status)); 3826 if (txrate.flags == WMI_RATE_PREAMBLE_CCK || 3827 txrate.flags == WMI_RATE_PREAMBLE_OFDM) { 3828 rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode); 3829 /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */ 3830 if (rate == 6 && txrate.flags == WMI_RATE_PREAMBLE_CCK) 3831 rate = 5; 3832 rate_idx = ath10k_get_legacy_rate_idx(ar, rate); 3833 if (rate_idx < 0) 3834 return; 3835 arsta->txrate.legacy = rate; 3836 } else if (txrate.flags == WMI_RATE_PREAMBLE_HT) { 3837 arsta->txrate.flags = RATE_INFO_FLAGS_MCS; 3838 arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1); 3839 } else { 3840 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; 3841 arsta->txrate.mcs = txrate.mcs; 3842 } 3843 3844 switch (txrate.flags) { 3845 case WMI_RATE_PREAMBLE_OFDM: 3846 if (arsta->arvif && arsta->arvif->vif) 3847 conf = rcu_dereference(arsta->arvif->vif->bss_conf.chanctx_conf); 3848 if (conf && conf->def.chan->band == NL80211_BAND_5GHZ) 3849 arsta->tx_info.status.rates[0].idx = rate_idx - 4; 3850 break; 3851 case WMI_RATE_PREAMBLE_CCK: 3852 arsta->tx_info.status.rates[0].idx = rate_idx; 3853 if (sgi) 3854 arsta->tx_info.status.rates[0].flags |= 3855 (IEEE80211_TX_RC_USE_SHORT_PREAMBLE | 3856 IEEE80211_TX_RC_SHORT_GI); 3857 break; 3858 case WMI_RATE_PREAMBLE_HT: 3859 arsta->tx_info.status.rates[0].idx = 3860 txrate.mcs + ((txrate.nss - 1) * 8); 3861 if (sgi) 3862 arsta->tx_info.status.rates[0].flags |= 3863 IEEE80211_TX_RC_SHORT_GI; 3864 arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_MCS; 3865 break; 3866 case WMI_RATE_PREAMBLE_VHT: 3867 ieee80211_rate_set_vht(&arsta->tx_info.status.rates[0], 3868 txrate.mcs, txrate.nss); 3869 if (sgi) 3870 arsta->tx_info.status.rates[0].flags |= 3871 IEEE80211_TX_RC_SHORT_GI; 3872 arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_VHT_MCS; 3873 break; 3874 } 3875 3876 arsta->txrate.nss = txrate.nss; 3877 arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw); 3878 arsta->last_tx_bitrate = cfg80211_calculate_bitrate(&arsta->txrate); 3879 if (sgi) 3880 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 3881 3882 switch (arsta->txrate.bw) { 3883 case RATE_INFO_BW_40: 3884 arsta->tx_info.status.rates[0].flags |= 3885 IEEE80211_TX_RC_40_MHZ_WIDTH; 3886 break; 3887 case RATE_INFO_BW_80: 3888 arsta->tx_info.status.rates[0].flags |= 3889 IEEE80211_TX_RC_80_MHZ_WIDTH; 3890 break; 3891 case RATE_INFO_BW_160: 3892 arsta->tx_info.status.rates[0].flags |= 3893 IEEE80211_TX_RC_160_MHZ_WIDTH; 3894 break; 3895 } 3896 3897 if (peer_stats->succ_pkts) { 3898 arsta->tx_info.flags = IEEE80211_TX_STAT_ACK; 3899 arsta->tx_info.status.rates[0].count = 1; 3900 ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info); 3901 } 3902 3903 if (ar->htt.disable_tx_comp) { 3904 arsta->tx_failed += peer_stats->failed_pkts; 3905 ath10k_dbg(ar, ATH10K_DBG_HTT, "tx failed %d\n", 3906 arsta->tx_failed); 3907 } 3908 3909 arsta->tx_retries += peer_stats->retry_pkts; 3910 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx retries %d", arsta->tx_retries); 3911 3912 if (ath10k_debug_is_extd_tx_stats_enabled(ar)) 3913 ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats, 3914 rate_idx); 3915 } 3916 3917 static void ath10k_htt_fetch_peer_stats(struct ath10k *ar, 3918 struct sk_buff *skb) 3919 { 3920 struct htt_resp *resp = (struct htt_resp *)skb->data; 3921 struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats; 3922 struct htt_per_peer_tx_stats_ind *tx_stats; 3923 struct ieee80211_sta *sta; 3924 struct ath10k_peer *peer; 3925 int peer_id, i; 3926 u8 ppdu_len, num_ppdu; 3927 3928 num_ppdu = resp->peer_tx_stats.num_ppdu; 3929 ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32); 3930 3931 if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) { 3932 ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len); 3933 return; 3934 } 3935 3936 tx_stats = (struct htt_per_peer_tx_stats_ind *) 3937 (resp->peer_tx_stats.payload); 3938 peer_id = __le16_to_cpu(tx_stats->peer_id); 3939 3940 rcu_read_lock(); 3941 spin_lock_bh(&ar->data_lock); 3942 peer = ath10k_peer_find_by_id(ar, peer_id); 3943 if (!peer || !peer->sta) { 3944 ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n", 3945 peer_id); 3946 goto out; 3947 } 3948 3949 sta = peer->sta; 3950 for (i = 0; i < num_ppdu; i++) { 3951 tx_stats = (struct htt_per_peer_tx_stats_ind *) 3952 (resp->peer_tx_stats.payload + i * ppdu_len); 3953 3954 p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes); 3955 p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes); 3956 p_tx_stats->failed_bytes = 3957 __le32_to_cpu(tx_stats->failed_bytes); 3958 p_tx_stats->ratecode = tx_stats->ratecode; 3959 p_tx_stats->flags = tx_stats->flags; 3960 p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts); 3961 p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts); 3962 p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts); 3963 p_tx_stats->duration = __le16_to_cpu(tx_stats->tx_duration); 3964 3965 ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats); 3966 } 3967 3968 out: 3969 spin_unlock_bh(&ar->data_lock); 3970 rcu_read_unlock(); 3971 } 3972 3973 static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data) 3974 { 3975 struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data; 3976 struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats; 3977 struct ath10k_10_2_peer_tx_stats *tx_stats; 3978 struct ieee80211_sta *sta; 3979 struct ath10k_peer *peer; 3980 u16 log_type = __le16_to_cpu(hdr->log_type); 3981 u32 peer_id = 0, i; 3982 3983 if (log_type != ATH_PKTLOG_TYPE_TX_STAT) 3984 return; 3985 3986 tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) + 3987 ATH10K_10_2_TX_STATS_OFFSET); 3988 3989 if (!tx_stats->tx_ppdu_cnt) 3990 return; 3991 3992 peer_id = tx_stats->peer_id; 3993 3994 rcu_read_lock(); 3995 spin_lock_bh(&ar->data_lock); 3996 peer = ath10k_peer_find_by_id(ar, peer_id); 3997 if (!peer || !peer->sta) { 3998 ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n", 3999 peer_id); 4000 goto out; 4001 } 4002 4003 sta = peer->sta; 4004 for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) { 4005 p_tx_stats->succ_bytes = 4006 __le16_to_cpu(tx_stats->success_bytes[i]); 4007 p_tx_stats->retry_bytes = 4008 __le16_to_cpu(tx_stats->retry_bytes[i]); 4009 p_tx_stats->failed_bytes = 4010 __le16_to_cpu(tx_stats->failed_bytes[i]); 4011 p_tx_stats->ratecode = tx_stats->ratecode[i]; 4012 p_tx_stats->flags = tx_stats->flags[i]; 4013 p_tx_stats->succ_pkts = tx_stats->success_pkts[i]; 4014 p_tx_stats->retry_pkts = tx_stats->retry_pkts[i]; 4015 p_tx_stats->failed_pkts = tx_stats->failed_pkts[i]; 4016 4017 ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats); 4018 } 4019 spin_unlock_bh(&ar->data_lock); 4020 rcu_read_unlock(); 4021 4022 return; 4023 4024 out: 4025 spin_unlock_bh(&ar->data_lock); 4026 rcu_read_unlock(); 4027 } 4028 4029 static int ath10k_htt_rx_pn_len(enum htt_security_types sec_type) 4030 { 4031 switch (sec_type) { 4032 case HTT_SECURITY_TKIP: 4033 case HTT_SECURITY_TKIP_NOMIC: 4034 case HTT_SECURITY_AES_CCMP: 4035 return 48; 4036 default: 4037 return 0; 4038 } 4039 } 4040 4041 static void ath10k_htt_rx_sec_ind_handler(struct ath10k *ar, 4042 struct htt_security_indication *ev) 4043 { 4044 enum htt_txrx_sec_cast_type sec_index; 4045 enum htt_security_types sec_type; 4046 struct ath10k_peer *peer; 4047 4048 spin_lock_bh(&ar->data_lock); 4049 4050 peer = ath10k_peer_find_by_id(ar, __le16_to_cpu(ev->peer_id)); 4051 if (!peer) { 4052 ath10k_warn(ar, "failed to find peer id %d for security indication", 4053 __le16_to_cpu(ev->peer_id)); 4054 goto out; 4055 } 4056 4057 sec_type = MS(ev->flags, HTT_SECURITY_TYPE); 4058 4059 if (ev->flags & HTT_SECURITY_IS_UNICAST) 4060 sec_index = HTT_TXRX_SEC_UCAST; 4061 else 4062 sec_index = HTT_TXRX_SEC_MCAST; 4063 4064 peer->rx_pn[sec_index].sec_type = sec_type; 4065 peer->rx_pn[sec_index].pn_len = ath10k_htt_rx_pn_len(sec_type); 4066 4067 memset(peer->tids_last_pn_valid, 0, sizeof(peer->tids_last_pn_valid)); 4068 memset(peer->tids_last_pn, 0, sizeof(peer->tids_last_pn)); 4069 4070 out: 4071 spin_unlock_bh(&ar->data_lock); 4072 } 4073 4074 bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) 4075 { 4076 struct ath10k_htt *htt = &ar->htt; 4077 struct htt_resp *resp = (struct htt_resp *)skb->data; 4078 enum htt_t2h_msg_type type; 4079 4080 /* confirm alignment */ 4081 if (!IS_ALIGNED((unsigned long)skb->data, 4)) 4082 ath10k_warn(ar, "unaligned htt message, expect trouble\n"); 4083 4084 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n", 4085 resp->hdr.msg_type); 4086 4087 if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) { 4088 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X", 4089 resp->hdr.msg_type, ar->htt.t2h_msg_types_max); 4090 return true; 4091 } 4092 type = ar->htt.t2h_msg_types[resp->hdr.msg_type]; 4093 4094 switch (type) { 4095 case HTT_T2H_MSG_TYPE_VERSION_CONF: { 4096 htt->target_version_major = resp->ver_resp.major; 4097 htt->target_version_minor = resp->ver_resp.minor; 4098 complete(&htt->target_version_received); 4099 break; 4100 } 4101 case HTT_T2H_MSG_TYPE_RX_IND: 4102 if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) { 4103 ath10k_htt_rx_proc_rx_ind_ll(htt, &resp->rx_ind); 4104 } else { 4105 skb_queue_tail(&htt->rx_indication_head, skb); 4106 return false; 4107 } 4108 break; 4109 case HTT_T2H_MSG_TYPE_PEER_MAP: { 4110 struct htt_peer_map_event ev = { 4111 .vdev_id = resp->peer_map.vdev_id, 4112 .peer_id = __le16_to_cpu(resp->peer_map.peer_id), 4113 }; 4114 memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr)); 4115 ath10k_peer_map_event(htt, &ev); 4116 break; 4117 } 4118 case HTT_T2H_MSG_TYPE_PEER_UNMAP: { 4119 struct htt_peer_unmap_event ev = { 4120 .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id), 4121 }; 4122 ath10k_peer_unmap_event(htt, &ev); 4123 break; 4124 } 4125 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: { 4126 struct htt_tx_done tx_done = {}; 4127 struct ath10k_htt *htt = &ar->htt; 4128 struct ath10k_htc *htc = &ar->htc; 4129 struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid]; 4130 int status = __le32_to_cpu(resp->mgmt_tx_completion.status); 4131 int info = __le32_to_cpu(resp->mgmt_tx_completion.info); 4132 4133 tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id); 4134 4135 switch (status) { 4136 case HTT_MGMT_TX_STATUS_OK: 4137 tx_done.status = HTT_TX_COMPL_STATE_ACK; 4138 if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, 4139 ar->wmi.svc_map) && 4140 (resp->mgmt_tx_completion.flags & 4141 HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI)) { 4142 tx_done.ack_rssi = 4143 FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK, 4144 info); 4145 } 4146 break; 4147 case HTT_MGMT_TX_STATUS_RETRY: 4148 tx_done.status = HTT_TX_COMPL_STATE_NOACK; 4149 break; 4150 case HTT_MGMT_TX_STATUS_DROP: 4151 tx_done.status = HTT_TX_COMPL_STATE_DISCARD; 4152 break; 4153 } 4154 4155 if (htt->disable_tx_comp) { 4156 spin_lock_bh(&htc->tx_lock); 4157 ep->tx_credits++; 4158 spin_unlock_bh(&htc->tx_lock); 4159 } 4160 4161 status = ath10k_txrx_tx_unref(htt, &tx_done); 4162 if (!status) { 4163 spin_lock_bh(&htt->tx_lock); 4164 ath10k_htt_tx_mgmt_dec_pending(htt); 4165 spin_unlock_bh(&htt->tx_lock); 4166 } 4167 break; 4168 } 4169 case HTT_T2H_MSG_TYPE_TX_COMPL_IND: 4170 ath10k_htt_rx_tx_compl_ind(htt->ar, skb); 4171 break; 4172 case HTT_T2H_MSG_TYPE_SEC_IND: { 4173 struct ath10k *ar = htt->ar; 4174 struct htt_security_indication *ev = &resp->security_indication; 4175 4176 ath10k_htt_rx_sec_ind_handler(ar, ev); 4177 ath10k_dbg(ar, ATH10K_DBG_HTT, 4178 "sec ind peer_id %d unicast %d type %d\n", 4179 __le16_to_cpu(ev->peer_id), 4180 !!(ev->flags & HTT_SECURITY_IS_UNICAST), 4181 MS(ev->flags, HTT_SECURITY_TYPE)); 4182 complete(&ar->install_key_done); 4183 break; 4184 } 4185 case HTT_T2H_MSG_TYPE_RX_FRAG_IND: { 4186 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", 4187 skb->data, skb->len); 4188 atomic_inc(&htt->num_mpdus_ready); 4189 4190 return ath10k_htt_rx_proc_rx_frag_ind(htt, 4191 &resp->rx_frag_ind, 4192 skb); 4193 } 4194 case HTT_T2H_MSG_TYPE_TEST: 4195 break; 4196 case HTT_T2H_MSG_TYPE_STATS_CONF: 4197 trace_ath10k_htt_stats(ar, skb->data, skb->len); 4198 break; 4199 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND: 4200 /* Firmware can return tx frames if it's unable to fully 4201 * process them and suspects host may be able to fix it. ath10k 4202 * sends all tx frames as already inspected so this shouldn't 4203 * happen unless fw has a bug. 4204 */ 4205 ath10k_warn(ar, "received an unexpected htt tx inspect event\n"); 4206 break; 4207 case HTT_T2H_MSG_TYPE_RX_ADDBA: 4208 ath10k_htt_rx_addba(ar, resp); 4209 break; 4210 case HTT_T2H_MSG_TYPE_RX_DELBA: 4211 ath10k_htt_rx_delba(ar, resp); 4212 break; 4213 case HTT_T2H_MSG_TYPE_PKTLOG: { 4214 trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload, 4215 skb->len - 4216 offsetof(struct htt_resp, 4217 pktlog_msg.payload)); 4218 4219 if (ath10k_peer_stats_enabled(ar)) 4220 ath10k_fetch_10_2_tx_stats(ar, 4221 resp->pktlog_msg.payload); 4222 break; 4223 } 4224 case HTT_T2H_MSG_TYPE_RX_FLUSH: { 4225 /* Ignore this event because mac80211 takes care of Rx 4226 * aggregation reordering. 4227 */ 4228 break; 4229 } 4230 case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: { 4231 skb_queue_tail(&htt->rx_in_ord_compl_q, skb); 4232 return false; 4233 } 4234 case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: { 4235 struct ath10k_htt *htt = &ar->htt; 4236 struct ath10k_htc *htc = &ar->htc; 4237 struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid]; 4238 u32 msg_word = __le32_to_cpu(*(__le32 *)resp); 4239 int htt_credit_delta; 4240 4241 htt_credit_delta = HTT_TX_CREDIT_DELTA_ABS_GET(msg_word); 4242 if (HTT_TX_CREDIT_SIGN_BIT_GET(msg_word)) 4243 htt_credit_delta = -htt_credit_delta; 4244 4245 ath10k_dbg(ar, ATH10K_DBG_HTT, 4246 "htt credit update delta %d\n", 4247 htt_credit_delta); 4248 4249 if (htt->disable_tx_comp) { 4250 spin_lock_bh(&htc->tx_lock); 4251 ep->tx_credits += htt_credit_delta; 4252 spin_unlock_bh(&htc->tx_lock); 4253 ath10k_dbg(ar, ATH10K_DBG_HTT, 4254 "htt credit total %d\n", 4255 ep->tx_credits); 4256 ep->ep_ops.ep_tx_credits(htc->ar); 4257 } 4258 break; 4259 } 4260 case HTT_T2H_MSG_TYPE_CHAN_CHANGE: { 4261 u32 phymode = __le32_to_cpu(resp->chan_change.phymode); 4262 u32 freq = __le32_to_cpu(resp->chan_change.freq); 4263 4264 ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq); 4265 ath10k_dbg(ar, ATH10K_DBG_HTT, 4266 "htt chan change freq %u phymode %s\n", 4267 freq, ath10k_wmi_phymode_str(phymode)); 4268 break; 4269 } 4270 case HTT_T2H_MSG_TYPE_AGGR_CONF: 4271 break; 4272 case HTT_T2H_MSG_TYPE_TX_FETCH_IND: { 4273 struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC); 4274 4275 if (!tx_fetch_ind) { 4276 ath10k_warn(ar, "failed to copy htt tx fetch ind\n"); 4277 break; 4278 } 4279 skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind); 4280 break; 4281 } 4282 case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM: 4283 ath10k_htt_rx_tx_fetch_confirm(ar, skb); 4284 break; 4285 case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND: 4286 ath10k_htt_rx_tx_mode_switch_ind(ar, skb); 4287 break; 4288 case HTT_T2H_MSG_TYPE_PEER_STATS: 4289 ath10k_htt_fetch_peer_stats(ar, skb); 4290 break; 4291 case HTT_T2H_MSG_TYPE_EN_STATS: 4292 default: 4293 ath10k_warn(ar, "htt event (%d) not handled\n", 4294 resp->hdr.msg_type); 4295 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", 4296 skb->data, skb->len); 4297 break; 4298 } 4299 return true; 4300 } 4301 EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler); 4302 4303 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar, 4304 struct sk_buff *skb) 4305 { 4306 trace_ath10k_htt_pktlog(ar, skb->data, skb->len); 4307 dev_kfree_skb_any(skb); 4308 } 4309 EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler); 4310 4311 static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget) 4312 { 4313 struct sk_buff *skb; 4314 4315 while (quota < budget) { 4316 if (skb_queue_empty(&ar->htt.rx_msdus_q)) 4317 break; 4318 4319 skb = skb_dequeue(&ar->htt.rx_msdus_q); 4320 if (!skb) 4321 break; 4322 ath10k_process_rx(ar, skb); 4323 quota++; 4324 } 4325 4326 return quota; 4327 } 4328 4329 int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget) 4330 { 4331 struct htt_resp *resp; 4332 struct ath10k_htt *htt = &ar->htt; 4333 struct sk_buff *skb; 4334 bool release; 4335 int quota; 4336 4337 for (quota = 0; quota < budget; quota++) { 4338 skb = skb_dequeue(&htt->rx_indication_head); 4339 if (!skb) 4340 break; 4341 4342 resp = (struct htt_resp *)skb->data; 4343 4344 release = ath10k_htt_rx_proc_rx_ind_hl(htt, 4345 &resp->rx_ind_hl, 4346 skb, 4347 HTT_RX_PN_CHECK, 4348 HTT_RX_NON_TKIP_MIC); 4349 4350 if (release) 4351 dev_kfree_skb_any(skb); 4352 4353 ath10k_dbg(ar, ATH10K_DBG_HTT, "rx indication poll pending count:%d\n", 4354 skb_queue_len(&htt->rx_indication_head)); 4355 } 4356 return quota; 4357 } 4358 EXPORT_SYMBOL(ath10k_htt_rx_hl_indication); 4359 4360 int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget) 4361 { 4362 struct ath10k_htt *htt = &ar->htt; 4363 struct htt_tx_done tx_done = {}; 4364 struct sk_buff_head tx_ind_q; 4365 struct sk_buff *skb; 4366 unsigned long flags; 4367 int quota = 0, done, ret; 4368 bool resched_napi = false; 4369 4370 __skb_queue_head_init(&tx_ind_q); 4371 4372 /* Process pending frames before dequeuing more data 4373 * from hardware. 4374 */ 4375 quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget); 4376 if (quota == budget) { 4377 resched_napi = true; 4378 goto exit; 4379 } 4380 4381 while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) { 4382 spin_lock_bh(&htt->rx_ring.lock); 4383 ret = ath10k_htt_rx_in_ord_ind(ar, skb); 4384 spin_unlock_bh(&htt->rx_ring.lock); 4385 4386 dev_kfree_skb_any(skb); 4387 if (ret == -EIO) { 4388 resched_napi = true; 4389 goto exit; 4390 } 4391 } 4392 4393 while (atomic_read(&htt->num_mpdus_ready)) { 4394 ret = ath10k_htt_rx_handle_amsdu(htt); 4395 if (ret == -EIO) { 4396 resched_napi = true; 4397 goto exit; 4398 } 4399 atomic_dec(&htt->num_mpdus_ready); 4400 } 4401 4402 /* Deliver received data after processing data from hardware */ 4403 quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget); 4404 4405 /* From NAPI documentation: 4406 * The napi poll() function may also process TX completions, in which 4407 * case if it processes the entire TX ring then it should count that 4408 * work as the rest of the budget. 4409 */ 4410 if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo)) 4411 quota = budget; 4412 4413 /* kfifo_get: called only within txrx_tasklet so it's neatly serialized. 4414 * From kfifo_get() documentation: 4415 * Note that with only one concurrent reader and one concurrent writer, 4416 * you don't need extra locking to use these macro. 4417 */ 4418 while (kfifo_get(&htt->txdone_fifo, &tx_done)) 4419 ath10k_txrx_tx_unref(htt, &tx_done); 4420 4421 ath10k_mac_tx_push_pending(ar); 4422 4423 spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags); 4424 skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q); 4425 spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags); 4426 4427 while ((skb = __skb_dequeue(&tx_ind_q))) { 4428 ath10k_htt_rx_tx_fetch_ind(ar, skb); 4429 dev_kfree_skb_any(skb); 4430 } 4431 4432 exit: 4433 ath10k_htt_rx_msdu_buff_replenish(htt); 4434 /* In case of rx failure or more data to read, report budget 4435 * to reschedule NAPI poll 4436 */ 4437 done = resched_napi ? budget : quota; 4438 4439 return done; 4440 } 4441 EXPORT_SYMBOL(ath10k_htt_txrx_compl_task); 4442 4443 static const struct ath10k_htt_rx_ops htt_rx_ops_32 = { 4444 .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32, 4445 .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32, 4446 .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32, 4447 .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32, 4448 .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32, 4449 }; 4450 4451 static const struct ath10k_htt_rx_ops htt_rx_ops_64 = { 4452 .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64, 4453 .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64, 4454 .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64, 4455 .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64, 4456 .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64, 4457 }; 4458 4459 static const struct ath10k_htt_rx_ops htt_rx_ops_hl = { 4460 .htt_rx_proc_rx_frag_ind = ath10k_htt_rx_proc_rx_frag_ind_hl, 4461 }; 4462 4463 void ath10k_htt_set_rx_ops(struct ath10k_htt *htt) 4464 { 4465 struct ath10k *ar = htt->ar; 4466 4467 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) 4468 htt->rx_ops = &htt_rx_ops_hl; 4469 else if (ar->hw_params.target_64bit) 4470 htt->rx_ops = &htt_rx_ops_64; 4471 else 4472 htt->rx_ops = &htt_rx_ops_32; 4473 } 4474