1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (c) 2005-2011 Atheros Communications Inc. 4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 5 * Copyright (c) 2018, The Linux Foundation. All rights reserved. 6 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. 7 * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. 8 */ 9 10 #include <linux/export.h> 11 12 #include "core.h" 13 #include "htc.h" 14 #include "htt.h" 15 #include "txrx.h" 16 #include "debug.h" 17 #include "trace.h" 18 #include "mac.h" 19 20 #include <linux/log2.h> 21 #include <linux/bitfield.h> 22 23 /* when under memory pressure rx ring refill may fail and needs a retry */ 24 #define HTT_RX_RING_REFILL_RETRY_MS 50 25 26 #define HTT_RX_RING_REFILL_RESCHED_MS 5 27 28 /* shortcut to interpret a raw memory buffer as a rx descriptor */ 29 #define HTT_RX_BUF_TO_RX_DESC(hw, buf) ath10k_htt_rx_desc_from_raw_buffer(hw, buf) 30 31 static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb); 32 33 static struct sk_buff * 34 ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr) 35 { 36 struct ath10k_skb_rxcb *rxcb; 37 38 hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr) 39 if (rxcb->paddr == paddr) 40 return ATH10K_RXCB_SKB(rxcb); 41 42 WARN_ON_ONCE(1); 43 return NULL; 44 } 45 46 static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt) 47 { 48 struct sk_buff *skb; 49 struct ath10k_skb_rxcb *rxcb; 50 struct hlist_node *n; 51 int i; 52 53 if (htt->rx_ring.in_ord_rx) { 54 hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) { 55 skb = ATH10K_RXCB_SKB(rxcb); 56 dma_unmap_single(htt->ar->dev, rxcb->paddr, 57 skb->len + skb_tailroom(skb), 58 DMA_FROM_DEVICE); 59 hash_del(&rxcb->hlist); 60 dev_kfree_skb_any(skb); 61 } 62 } else { 63 for (i = 0; i < htt->rx_ring.size; i++) { 64 skb = htt->rx_ring.netbufs_ring[i]; 65 if (!skb) 66 continue; 67 68 rxcb = ATH10K_SKB_RXCB(skb); 69 dma_unmap_single(htt->ar->dev, rxcb->paddr, 70 skb->len + skb_tailroom(skb), 71 DMA_FROM_DEVICE); 72 dev_kfree_skb_any(skb); 73 } 74 } 75 76 htt->rx_ring.fill_cnt = 0; 77 hash_init(htt->rx_ring.skb_table); 78 memset(htt->rx_ring.netbufs_ring, 0, 79 htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0])); 80 } 81 82 static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt) 83 { 84 return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32); 85 } 86 87 static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt) 88 { 89 return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64); 90 } 91 92 static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt, 93 void *vaddr) 94 { 95 htt->rx_ring.paddrs_ring_32 = vaddr; 96 } 97 98 static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt, 99 void *vaddr) 100 { 101 htt->rx_ring.paddrs_ring_64 = vaddr; 102 } 103 104 static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt, 105 dma_addr_t paddr, int idx) 106 { 107 htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr); 108 } 109 110 static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt, 111 dma_addr_t paddr, int idx) 112 { 113 htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr); 114 } 115 116 static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx) 117 { 118 htt->rx_ring.paddrs_ring_32[idx] = 0; 119 } 120 121 static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx) 122 { 123 htt->rx_ring.paddrs_ring_64[idx] = 0; 124 } 125 126 static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt) 127 { 128 return (void *)htt->rx_ring.paddrs_ring_32; 129 } 130 131 static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt) 132 { 133 return (void *)htt->rx_ring.paddrs_ring_64; 134 } 135 136 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) 137 { 138 struct ath10k_hw_params *hw = &htt->ar->hw_params; 139 struct htt_rx_desc *rx_desc; 140 struct ath10k_skb_rxcb *rxcb; 141 struct sk_buff *skb; 142 dma_addr_t paddr; 143 int ret = 0, idx; 144 145 /* The Full Rx Reorder firmware has no way of telling the host 146 * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring. 147 * To keep things simple make sure ring is always half empty. This 148 * guarantees there'll be no replenishment overruns possible. 149 */ 150 BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2); 151 152 idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); 153 154 if (idx < 0 || idx >= htt->rx_ring.size) { 155 ath10k_err(htt->ar, "rx ring index is not valid, firmware malfunctioning?\n"); 156 idx &= htt->rx_ring.size_mask; 157 ret = -ENOMEM; 158 goto fail; 159 } 160 161 while (num > 0) { 162 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN); 163 if (!skb) { 164 ret = -ENOMEM; 165 goto fail; 166 } 167 168 if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN)) 169 skb_pull(skb, 170 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) - 171 skb->data); 172 173 /* Clear rx_desc attention word before posting to Rx ring */ 174 rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, skb->data); 175 ath10k_htt_rx_desc_get_attention(hw, rx_desc)->flags = __cpu_to_le32(0); 176 177 paddr = dma_map_single(htt->ar->dev, skb->data, 178 skb->len + skb_tailroom(skb), 179 DMA_FROM_DEVICE); 180 181 if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) { 182 dev_kfree_skb_any(skb); 183 ret = -ENOMEM; 184 goto fail; 185 } 186 187 rxcb = ATH10K_SKB_RXCB(skb); 188 rxcb->paddr = paddr; 189 htt->rx_ring.netbufs_ring[idx] = skb; 190 ath10k_htt_set_paddrs_ring(htt, paddr, idx); 191 htt->rx_ring.fill_cnt++; 192 193 if (htt->rx_ring.in_ord_rx) { 194 hash_add(htt->rx_ring.skb_table, 195 &ATH10K_SKB_RXCB(skb)->hlist, 196 paddr); 197 } 198 199 num--; 200 idx++; 201 idx &= htt->rx_ring.size_mask; 202 } 203 204 fail: 205 /* 206 * Make sure the rx buffer is updated before available buffer 207 * index to avoid any potential rx ring corruption. 208 */ 209 mb(); 210 *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx); 211 return ret; 212 } 213 214 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) 215 { 216 lockdep_assert_held(&htt->rx_ring.lock); 217 return __ath10k_htt_rx_ring_fill_n(htt, num); 218 } 219 220 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt) 221 { 222 int ret, num_deficit, num_to_fill; 223 224 /* Refilling the whole RX ring buffer proves to be a bad idea. The 225 * reason is RX may take up significant amount of CPU cycles and starve 226 * other tasks, e.g. TX on an ethernet device while acting as a bridge 227 * with ath10k wlan interface. This ended up with very poor performance 228 * once CPU the host system was overwhelmed with RX on ath10k. 229 * 230 * By limiting the number of refills the replenishing occurs 231 * progressively. This in turns makes use of the fact tasklets are 232 * processed in FIFO order. This means actual RX processing can starve 233 * out refilling. If there's not enough buffers on RX ring FW will not 234 * report RX until it is refilled with enough buffers. This 235 * automatically balances load wrt to CPU power. 236 * 237 * This probably comes at a cost of lower maximum throughput but 238 * improves the average and stability. 239 */ 240 spin_lock_bh(&htt->rx_ring.lock); 241 num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt; 242 num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit); 243 num_deficit -= num_to_fill; 244 ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill); 245 if (ret == -ENOMEM) { 246 /* 247 * Failed to fill it to the desired level - 248 * we'll start a timer and try again next time. 249 * As long as enough buffers are left in the ring for 250 * another A-MPDU rx, no special recovery is needed. 251 */ 252 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + 253 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS)); 254 } else if (num_deficit > 0) { 255 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + 256 msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS)); 257 } 258 spin_unlock_bh(&htt->rx_ring.lock); 259 } 260 261 static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t) 262 { 263 struct ath10k_htt *htt = timer_container_of(htt, t, 264 rx_ring.refill_retry_timer); 265 266 ath10k_htt_rx_msdu_buff_replenish(htt); 267 } 268 269 int ath10k_htt_rx_ring_refill(struct ath10k *ar) 270 { 271 struct ath10k_htt *htt = &ar->htt; 272 int ret; 273 274 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) 275 return 0; 276 277 spin_lock_bh(&htt->rx_ring.lock); 278 ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level - 279 htt->rx_ring.fill_cnt)); 280 281 if (ret) 282 ath10k_htt_rx_ring_free(htt); 283 284 spin_unlock_bh(&htt->rx_ring.lock); 285 286 return ret; 287 } 288 289 void ath10k_htt_rx_free(struct ath10k_htt *htt) 290 { 291 if (htt->ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) 292 return; 293 294 timer_delete_sync(&htt->rx_ring.refill_retry_timer); 295 296 skb_queue_purge(&htt->rx_msdus_q); 297 skb_queue_purge(&htt->rx_in_ord_compl_q); 298 skb_queue_purge(&htt->tx_fetch_ind_q); 299 300 spin_lock_bh(&htt->rx_ring.lock); 301 ath10k_htt_rx_ring_free(htt); 302 spin_unlock_bh(&htt->rx_ring.lock); 303 304 dma_free_coherent(htt->ar->dev, 305 ath10k_htt_get_rx_ring_size(htt), 306 ath10k_htt_get_vaddr_ring(htt), 307 htt->rx_ring.base_paddr); 308 309 ath10k_htt_config_paddrs_ring(htt, NULL); 310 311 dma_free_coherent(htt->ar->dev, 312 sizeof(*htt->rx_ring.alloc_idx.vaddr), 313 htt->rx_ring.alloc_idx.vaddr, 314 htt->rx_ring.alloc_idx.paddr); 315 htt->rx_ring.alloc_idx.vaddr = NULL; 316 317 kfree(htt->rx_ring.netbufs_ring); 318 htt->rx_ring.netbufs_ring = NULL; 319 } 320 321 static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt) 322 { 323 struct ath10k *ar = htt->ar; 324 int idx; 325 struct sk_buff *msdu; 326 327 lockdep_assert_held(&htt->rx_ring.lock); 328 329 if (htt->rx_ring.fill_cnt == 0) { 330 ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n"); 331 return NULL; 332 } 333 334 idx = htt->rx_ring.sw_rd_idx.msdu_payld; 335 msdu = htt->rx_ring.netbufs_ring[idx]; 336 htt->rx_ring.netbufs_ring[idx] = NULL; 337 ath10k_htt_reset_paddrs_ring(htt, idx); 338 339 idx++; 340 idx &= htt->rx_ring.size_mask; 341 htt->rx_ring.sw_rd_idx.msdu_payld = idx; 342 htt->rx_ring.fill_cnt--; 343 344 dma_unmap_single(htt->ar->dev, 345 ATH10K_SKB_RXCB(msdu)->paddr, 346 msdu->len + skb_tailroom(msdu), 347 DMA_FROM_DEVICE); 348 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ", 349 msdu->data, msdu->len + skb_tailroom(msdu)); 350 351 return msdu; 352 } 353 354 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */ 355 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, 356 struct sk_buff_head *amsdu) 357 { 358 struct ath10k *ar = htt->ar; 359 struct ath10k_hw_params *hw = &ar->hw_params; 360 int msdu_len, msdu_chaining = 0; 361 struct sk_buff *msdu; 362 struct htt_rx_desc *rx_desc; 363 struct rx_attention *rx_desc_attention; 364 struct rx_frag_info_common *rx_desc_frag_info_common; 365 struct rx_msdu_start_common *rx_desc_msdu_start_common; 366 struct rx_msdu_end_common *rx_desc_msdu_end_common; 367 368 lockdep_assert_held(&htt->rx_ring.lock); 369 370 for (;;) { 371 int last_msdu, msdu_len_invalid, msdu_chained; 372 373 msdu = ath10k_htt_rx_netbuf_pop(htt); 374 if (!msdu) { 375 __skb_queue_purge(amsdu); 376 return -ENOENT; 377 } 378 379 __skb_queue_tail(amsdu, msdu); 380 381 rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data); 382 rx_desc_attention = ath10k_htt_rx_desc_get_attention(hw, rx_desc); 383 rx_desc_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, 384 rx_desc); 385 rx_desc_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rx_desc); 386 rx_desc_frag_info_common = ath10k_htt_rx_desc_get_frag_info(hw, rx_desc); 387 388 /* FIXME: we must report msdu payload since this is what caller 389 * expects now 390 */ 391 skb_put(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset); 392 skb_pull(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset); 393 394 /* 395 * Sanity check - confirm the HW is finished filling in the 396 * rx data. 397 * If the HW and SW are working correctly, then it's guaranteed 398 * that the HW's MAC DMA is done before this point in the SW. 399 * To prevent the case that we handle a stale Rx descriptor, 400 * just assert for now until we have a way to recover. 401 */ 402 if (!(__le32_to_cpu(rx_desc_attention->flags) 403 & RX_ATTENTION_FLAGS_MSDU_DONE)) { 404 __skb_queue_purge(amsdu); 405 return -EIO; 406 } 407 408 msdu_len_invalid = !!(__le32_to_cpu(rx_desc_attention->flags) 409 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR | 410 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR)); 411 msdu_len = MS(__le32_to_cpu(rx_desc_msdu_start_common->info0), 412 RX_MSDU_START_INFO0_MSDU_LENGTH); 413 msdu_chained = rx_desc_frag_info_common->ring2_more_count; 414 415 if (msdu_len_invalid) 416 msdu_len = 0; 417 418 skb_trim(msdu, 0); 419 skb_put(msdu, min(msdu_len, ath10k_htt_rx_msdu_size(hw))); 420 msdu_len -= msdu->len; 421 422 /* Note: Chained buffers do not contain rx descriptor */ 423 while (msdu_chained--) { 424 msdu = ath10k_htt_rx_netbuf_pop(htt); 425 if (!msdu) { 426 __skb_queue_purge(amsdu); 427 return -ENOENT; 428 } 429 430 __skb_queue_tail(amsdu, msdu); 431 skb_trim(msdu, 0); 432 skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE)); 433 msdu_len -= msdu->len; 434 msdu_chaining = 1; 435 } 436 437 last_msdu = __le32_to_cpu(rx_desc_msdu_end_common->info0) & 438 RX_MSDU_END_INFO0_LAST_MSDU; 439 440 /* FIXME: why are we skipping the first part of the rx_desc? */ 441 #if defined(__linux__) 442 trace_ath10k_htt_rx_desc(ar, (void *)rx_desc + sizeof(u32), 443 #elif defined(__FreeBSD__) 444 trace_ath10k_htt_rx_desc(ar, (u8 *)rx_desc + sizeof(u32), 445 #endif 446 hw->rx_desc_ops->rx_desc_size - sizeof(u32)); 447 448 if (last_msdu) 449 break; 450 } 451 452 if (skb_queue_empty(amsdu)) 453 msdu_chaining = -1; 454 455 /* 456 * Don't refill the ring yet. 457 * 458 * First, the elements popped here are still in use - it is not 459 * safe to overwrite them until the matching call to 460 * mpdu_desc_list_next. Second, for efficiency it is preferable to 461 * refill the rx ring with 1 PPDU's worth of rx buffers (something 462 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers 463 * (something like 3 buffers). Consequently, we'll rely on the txrx 464 * SW to tell us when it is done pulling all the PPDU's rx buffers 465 * out of the rx ring, and then refill it just once. 466 */ 467 468 return msdu_chaining; 469 } 470 471 static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt, 472 u64 paddr) 473 { 474 struct ath10k *ar = htt->ar; 475 struct ath10k_skb_rxcb *rxcb; 476 struct sk_buff *msdu; 477 478 lockdep_assert_held(&htt->rx_ring.lock); 479 480 msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr); 481 if (!msdu) 482 return NULL; 483 484 rxcb = ATH10K_SKB_RXCB(msdu); 485 hash_del(&rxcb->hlist); 486 htt->rx_ring.fill_cnt--; 487 488 dma_unmap_single(htt->ar->dev, rxcb->paddr, 489 msdu->len + skb_tailroom(msdu), 490 DMA_FROM_DEVICE); 491 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ", 492 msdu->data, msdu->len + skb_tailroom(msdu)); 493 494 return msdu; 495 } 496 497 static inline void ath10k_htt_append_frag_list(struct sk_buff *skb_head, 498 struct sk_buff *frag_list, 499 unsigned int frag_len) 500 { 501 skb_shinfo(skb_head)->frag_list = frag_list; 502 skb_head->data_len = frag_len; 503 skb_head->len += skb_head->data_len; 504 } 505 506 static int ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt *htt, 507 struct sk_buff *msdu, 508 struct htt_rx_in_ord_msdu_desc **msdu_desc) 509 { 510 struct ath10k *ar = htt->ar; 511 struct ath10k_hw_params *hw = &ar->hw_params; 512 u32 paddr; 513 struct sk_buff *frag_buf; 514 struct sk_buff *prev_frag_buf; 515 u8 last_frag; 516 struct htt_rx_in_ord_msdu_desc *ind_desc = *msdu_desc; 517 struct htt_rx_desc *rxd; 518 int amsdu_len = __le16_to_cpu(ind_desc->msdu_len); 519 520 rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data); 521 trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size); 522 523 skb_put(msdu, hw->rx_desc_ops->rx_desc_size); 524 skb_pull(msdu, hw->rx_desc_ops->rx_desc_size); 525 skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw))); 526 amsdu_len -= msdu->len; 527 528 last_frag = ind_desc->reserved; 529 if (last_frag) { 530 if (amsdu_len) { 531 ath10k_warn(ar, "invalid amsdu len %u, left %d", 532 __le16_to_cpu(ind_desc->msdu_len), 533 amsdu_len); 534 } 535 return 0; 536 } 537 538 ind_desc++; 539 paddr = __le32_to_cpu(ind_desc->msdu_paddr); 540 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr); 541 if (!frag_buf) { 542 ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%x", paddr); 543 return -ENOENT; 544 } 545 546 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE)); 547 ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len); 548 549 amsdu_len -= frag_buf->len; 550 prev_frag_buf = frag_buf; 551 last_frag = ind_desc->reserved; 552 while (!last_frag) { 553 ind_desc++; 554 paddr = __le32_to_cpu(ind_desc->msdu_paddr); 555 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr); 556 if (!frag_buf) { 557 ath10k_warn(ar, "failed to pop frag-n paddr: 0x%x", 558 paddr); 559 prev_frag_buf->next = NULL; 560 return -ENOENT; 561 } 562 563 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE)); 564 last_frag = ind_desc->reserved; 565 amsdu_len -= frag_buf->len; 566 567 prev_frag_buf->next = frag_buf; 568 prev_frag_buf = frag_buf; 569 } 570 571 if (amsdu_len) { 572 ath10k_warn(ar, "invalid amsdu len %u, left %d", 573 __le16_to_cpu(ind_desc->msdu_len), amsdu_len); 574 } 575 576 *msdu_desc = ind_desc; 577 578 prev_frag_buf->next = NULL; 579 return 0; 580 } 581 582 static int 583 ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt *htt, 584 struct sk_buff *msdu, 585 struct htt_rx_in_ord_msdu_desc_ext **msdu_desc) 586 { 587 struct ath10k *ar = htt->ar; 588 struct ath10k_hw_params *hw = &ar->hw_params; 589 u64 paddr; 590 struct sk_buff *frag_buf; 591 struct sk_buff *prev_frag_buf; 592 u8 last_frag; 593 struct htt_rx_in_ord_msdu_desc_ext *ind_desc = *msdu_desc; 594 struct htt_rx_desc *rxd; 595 int amsdu_len = __le16_to_cpu(ind_desc->msdu_len); 596 597 rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data); 598 trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size); 599 600 skb_put(msdu, hw->rx_desc_ops->rx_desc_size); 601 skb_pull(msdu, hw->rx_desc_ops->rx_desc_size); 602 skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw))); 603 amsdu_len -= msdu->len; 604 605 last_frag = ind_desc->reserved; 606 if (last_frag) { 607 if (amsdu_len) { 608 ath10k_warn(ar, "invalid amsdu len %u, left %d", 609 __le16_to_cpu(ind_desc->msdu_len), 610 amsdu_len); 611 } 612 return 0; 613 } 614 615 ind_desc++; 616 paddr = __le64_to_cpu(ind_desc->msdu_paddr); 617 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr); 618 if (!frag_buf) { 619 #if defined(__linux__) 620 ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%llx", paddr); 621 #elif defined(__FreeBSD__) 622 ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%jx", (uintmax_t)paddr); 623 #endif 624 return -ENOENT; 625 } 626 627 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE)); 628 ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len); 629 630 amsdu_len -= frag_buf->len; 631 prev_frag_buf = frag_buf; 632 last_frag = ind_desc->reserved; 633 while (!last_frag) { 634 ind_desc++; 635 paddr = __le64_to_cpu(ind_desc->msdu_paddr); 636 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr); 637 if (!frag_buf) { 638 #if defined(__linux__) 639 ath10k_warn(ar, "failed to pop frag-n paddr: 0x%llx", 640 paddr); 641 #elif defined(__FreeBSD__) 642 ath10k_warn(ar, "failed to pop frag-n paddr: 0x%jx", 643 (uintmax_t)paddr); 644 #endif 645 prev_frag_buf->next = NULL; 646 return -ENOENT; 647 } 648 649 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE)); 650 last_frag = ind_desc->reserved; 651 amsdu_len -= frag_buf->len; 652 653 prev_frag_buf->next = frag_buf; 654 prev_frag_buf = frag_buf; 655 } 656 657 if (amsdu_len) { 658 ath10k_warn(ar, "invalid amsdu len %u, left %d", 659 __le16_to_cpu(ind_desc->msdu_len), amsdu_len); 660 } 661 662 *msdu_desc = ind_desc; 663 664 prev_frag_buf->next = NULL; 665 return 0; 666 } 667 668 static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt, 669 struct htt_rx_in_ord_ind *ev, 670 struct sk_buff_head *list) 671 { 672 struct ath10k *ar = htt->ar; 673 struct ath10k_hw_params *hw = &ar->hw_params; 674 struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32; 675 struct htt_rx_desc *rxd; 676 struct rx_attention *rxd_attention; 677 struct sk_buff *msdu; 678 int msdu_count, ret; 679 bool is_offload; 680 u32 paddr; 681 682 lockdep_assert_held(&htt->rx_ring.lock); 683 684 msdu_count = __le16_to_cpu(ev->msdu_count); 685 is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK); 686 687 while (msdu_count--) { 688 paddr = __le32_to_cpu(msdu_desc->msdu_paddr); 689 690 msdu = ath10k_htt_rx_pop_paddr(htt, paddr); 691 if (!msdu) { 692 __skb_queue_purge(list); 693 return -ENOENT; 694 } 695 696 if (!is_offload && ar->monitor_arvif) { 697 ret = ath10k_htt_rx_handle_amsdu_mon_32(htt, msdu, 698 &msdu_desc); 699 if (ret) { 700 __skb_queue_purge(list); 701 return ret; 702 } 703 __skb_queue_tail(list, msdu); 704 msdu_desc++; 705 continue; 706 } 707 708 __skb_queue_tail(list, msdu); 709 710 if (!is_offload) { 711 rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data); 712 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); 713 714 trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size); 715 716 skb_put(msdu, hw->rx_desc_ops->rx_desc_size); 717 skb_pull(msdu, hw->rx_desc_ops->rx_desc_size); 718 skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len)); 719 720 if (!(__le32_to_cpu(rxd_attention->flags) & 721 RX_ATTENTION_FLAGS_MSDU_DONE)) { 722 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n"); 723 return -EIO; 724 } 725 } 726 727 msdu_desc++; 728 } 729 730 return 0; 731 } 732 733 static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt, 734 struct htt_rx_in_ord_ind *ev, 735 struct sk_buff_head *list) 736 { 737 struct ath10k *ar = htt->ar; 738 struct ath10k_hw_params *hw = &ar->hw_params; 739 struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64; 740 struct htt_rx_desc *rxd; 741 struct rx_attention *rxd_attention; 742 struct sk_buff *msdu; 743 int msdu_count, ret; 744 bool is_offload; 745 u64 paddr; 746 747 lockdep_assert_held(&htt->rx_ring.lock); 748 749 msdu_count = __le16_to_cpu(ev->msdu_count); 750 is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK); 751 752 while (msdu_count--) { 753 paddr = __le64_to_cpu(msdu_desc->msdu_paddr); 754 msdu = ath10k_htt_rx_pop_paddr(htt, paddr); 755 if (!msdu) { 756 __skb_queue_purge(list); 757 return -ENOENT; 758 } 759 760 if (!is_offload && ar->monitor_arvif) { 761 ret = ath10k_htt_rx_handle_amsdu_mon_64(htt, msdu, 762 &msdu_desc); 763 if (ret) { 764 __skb_queue_purge(list); 765 return ret; 766 } 767 __skb_queue_tail(list, msdu); 768 msdu_desc++; 769 continue; 770 } 771 772 __skb_queue_tail(list, msdu); 773 774 if (!is_offload) { 775 rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data); 776 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); 777 778 trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size); 779 780 skb_put(msdu, hw->rx_desc_ops->rx_desc_size); 781 skb_pull(msdu, hw->rx_desc_ops->rx_desc_size); 782 skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len)); 783 784 if (!(__le32_to_cpu(rxd_attention->flags) & 785 RX_ATTENTION_FLAGS_MSDU_DONE)) { 786 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n"); 787 return -EIO; 788 } 789 } 790 791 msdu_desc++; 792 } 793 794 return 0; 795 } 796 797 int ath10k_htt_rx_alloc(struct ath10k_htt *htt) 798 { 799 struct ath10k *ar = htt->ar; 800 dma_addr_t paddr; 801 void *vaddr, *vaddr_ring; 802 size_t size; 803 struct timer_list *timer = &htt->rx_ring.refill_retry_timer; 804 805 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) 806 return 0; 807 808 htt->rx_confused = false; 809 810 /* XXX: The fill level could be changed during runtime in response to 811 * the host processing latency. Is this really worth it? 812 */ 813 htt->rx_ring.size = HTT_RX_RING_SIZE; 814 htt->rx_ring.size_mask = htt->rx_ring.size - 1; 815 htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level; 816 817 if (!is_power_of_2(htt->rx_ring.size)) { 818 ath10k_warn(ar, "htt rx ring size is not power of 2\n"); 819 return -EINVAL; 820 } 821 822 htt->rx_ring.netbufs_ring = 823 kcalloc(htt->rx_ring.size, sizeof(struct sk_buff *), 824 GFP_KERNEL); 825 if (!htt->rx_ring.netbufs_ring) 826 goto err_netbuf; 827 828 size = ath10k_htt_get_rx_ring_size(htt); 829 830 vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL); 831 if (!vaddr_ring) 832 goto err_dma_ring; 833 834 ath10k_htt_config_paddrs_ring(htt, vaddr_ring); 835 htt->rx_ring.base_paddr = paddr; 836 837 vaddr = dma_alloc_coherent(htt->ar->dev, 838 sizeof(*htt->rx_ring.alloc_idx.vaddr), 839 &paddr, GFP_KERNEL); 840 if (!vaddr) 841 goto err_dma_idx; 842 843 htt->rx_ring.alloc_idx.vaddr = vaddr; 844 htt->rx_ring.alloc_idx.paddr = paddr; 845 htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask; 846 *htt->rx_ring.alloc_idx.vaddr = 0; 847 848 /* Initialize the Rx refill retry timer */ 849 timer_setup(timer, ath10k_htt_rx_ring_refill_retry, 0); 850 851 spin_lock_init(&htt->rx_ring.lock); 852 853 htt->rx_ring.fill_cnt = 0; 854 htt->rx_ring.sw_rd_idx.msdu_payld = 0; 855 hash_init(htt->rx_ring.skb_table); 856 857 skb_queue_head_init(&htt->rx_msdus_q); 858 skb_queue_head_init(&htt->rx_in_ord_compl_q); 859 skb_queue_head_init(&htt->tx_fetch_ind_q); 860 atomic_set(&htt->num_mpdus_ready, 0); 861 862 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n", 863 htt->rx_ring.size, htt->rx_ring.fill_level); 864 return 0; 865 866 err_dma_idx: 867 dma_free_coherent(htt->ar->dev, 868 ath10k_htt_get_rx_ring_size(htt), 869 vaddr_ring, 870 htt->rx_ring.base_paddr); 871 ath10k_htt_config_paddrs_ring(htt, NULL); 872 err_dma_ring: 873 kfree(htt->rx_ring.netbufs_ring); 874 htt->rx_ring.netbufs_ring = NULL; 875 err_netbuf: 876 return -ENOMEM; 877 } 878 879 static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar, 880 enum htt_rx_mpdu_encrypt_type type) 881 { 882 switch (type) { 883 case HTT_RX_MPDU_ENCRYPT_NONE: 884 return 0; 885 case HTT_RX_MPDU_ENCRYPT_WEP40: 886 case HTT_RX_MPDU_ENCRYPT_WEP104: 887 return IEEE80211_WEP_IV_LEN; 888 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: 889 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: 890 return IEEE80211_TKIP_IV_LEN; 891 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: 892 return IEEE80211_CCMP_HDR_LEN; 893 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: 894 return IEEE80211_CCMP_256_HDR_LEN; 895 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2: 896 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2: 897 return IEEE80211_GCMP_HDR_LEN; 898 case HTT_RX_MPDU_ENCRYPT_WEP128: 899 case HTT_RX_MPDU_ENCRYPT_WAPI: 900 break; 901 } 902 903 ath10k_warn(ar, "unsupported encryption type %d\n", type); 904 return 0; 905 } 906 907 #define MICHAEL_MIC_LEN 8 908 909 static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar, 910 enum htt_rx_mpdu_encrypt_type type) 911 { 912 switch (type) { 913 case HTT_RX_MPDU_ENCRYPT_NONE: 914 case HTT_RX_MPDU_ENCRYPT_WEP40: 915 case HTT_RX_MPDU_ENCRYPT_WEP104: 916 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: 917 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: 918 return 0; 919 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: 920 return IEEE80211_CCMP_MIC_LEN; 921 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: 922 return IEEE80211_CCMP_256_MIC_LEN; 923 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2: 924 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2: 925 return IEEE80211_GCMP_MIC_LEN; 926 case HTT_RX_MPDU_ENCRYPT_WEP128: 927 case HTT_RX_MPDU_ENCRYPT_WAPI: 928 break; 929 } 930 931 ath10k_warn(ar, "unsupported encryption type %d\n", type); 932 return 0; 933 } 934 935 static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar, 936 enum htt_rx_mpdu_encrypt_type type) 937 { 938 switch (type) { 939 case HTT_RX_MPDU_ENCRYPT_NONE: 940 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: 941 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: 942 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2: 943 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2: 944 return 0; 945 case HTT_RX_MPDU_ENCRYPT_WEP40: 946 case HTT_RX_MPDU_ENCRYPT_WEP104: 947 return IEEE80211_WEP_ICV_LEN; 948 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: 949 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: 950 return IEEE80211_TKIP_ICV_LEN; 951 case HTT_RX_MPDU_ENCRYPT_WEP128: 952 case HTT_RX_MPDU_ENCRYPT_WAPI: 953 break; 954 } 955 956 ath10k_warn(ar, "unsupported encryption type %d\n", type); 957 return 0; 958 } 959 960 struct amsdu_subframe_hdr { 961 u8 dst[ETH_ALEN]; 962 u8 src[ETH_ALEN]; 963 __be16 len; 964 } __packed; 965 966 #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63) 967 968 static inline u8 ath10k_bw_to_mac80211_bw(u8 bw) 969 { 970 u8 ret = 0; 971 972 switch (bw) { 973 case 0: 974 ret = RATE_INFO_BW_20; 975 break; 976 case 1: 977 ret = RATE_INFO_BW_40; 978 break; 979 case 2: 980 ret = RATE_INFO_BW_80; 981 break; 982 case 3: 983 ret = RATE_INFO_BW_160; 984 break; 985 } 986 987 return ret; 988 } 989 990 static void ath10k_htt_rx_h_rates(struct ath10k *ar, 991 struct ieee80211_rx_status *status, 992 struct htt_rx_desc *rxd) 993 { 994 struct ath10k_hw_params *hw = &ar->hw_params; 995 struct rx_attention *rxd_attention; 996 struct rx_mpdu_start *rxd_mpdu_start; 997 struct rx_mpdu_end *rxd_mpdu_end; 998 struct rx_msdu_start_common *rxd_msdu_start_common; 999 struct rx_msdu_end_common *rxd_msdu_end_common; 1000 struct rx_ppdu_start *rxd_ppdu_start; 1001 struct ieee80211_supported_band *sband; 1002 u8 cck, rate, bw, sgi, mcs, nss; 1003 u8 *rxd_msdu_payload; 1004 u8 preamble = 0; 1005 u8 group_id; 1006 u32 info1, info2, info3; 1007 u32 stbc, nsts_su; 1008 1009 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); 1010 rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd); 1011 rxd_mpdu_end = ath10k_htt_rx_desc_get_mpdu_end(hw, rxd); 1012 rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd); 1013 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd); 1014 rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd); 1015 rxd_msdu_payload = ath10k_htt_rx_desc_get_msdu_payload(hw, rxd); 1016 1017 info1 = __le32_to_cpu(rxd_ppdu_start->info1); 1018 info2 = __le32_to_cpu(rxd_ppdu_start->info2); 1019 info3 = __le32_to_cpu(rxd_ppdu_start->info3); 1020 1021 preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE); 1022 1023 switch (preamble) { 1024 case HTT_RX_LEGACY: 1025 /* To get legacy rate index band is required. Since band can't 1026 * be undefined check if freq is non-zero. 1027 */ 1028 if (!status->freq) 1029 return; 1030 1031 cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT; 1032 rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE); 1033 rate &= ~RX_PPDU_START_RATE_FLAG; 1034 1035 sband = &ar->mac.sbands[status->band]; 1036 status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck); 1037 break; 1038 case HTT_RX_HT: 1039 case HTT_RX_HT_WITH_TXBF: 1040 /* HT-SIG - Table 20-11 in info2 and info3 */ 1041 mcs = info2 & 0x1F; 1042 nss = mcs >> 3; 1043 bw = (info2 >> 7) & 1; 1044 sgi = (info3 >> 7) & 1; 1045 1046 status->rate_idx = mcs; 1047 status->encoding = RX_ENC_HT; 1048 if (sgi) 1049 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 1050 if (bw) 1051 status->bw = RATE_INFO_BW_40; 1052 break; 1053 case HTT_RX_VHT: 1054 case HTT_RX_VHT_WITH_TXBF: 1055 /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3 1056 * TODO check this 1057 */ 1058 bw = info2 & 3; 1059 sgi = info3 & 1; 1060 stbc = (info2 >> 3) & 1; 1061 group_id = (info2 >> 4) & 0x3F; 1062 1063 if (GROUP_ID_IS_SU_MIMO(group_id)) { 1064 mcs = (info3 >> 4) & 0x0F; 1065 nsts_su = ((info2 >> 10) & 0x07); 1066 if (stbc) 1067 nss = (nsts_su >> 2) + 1; 1068 else 1069 nss = (nsts_su + 1); 1070 } else { 1071 /* Hardware doesn't decode VHT-SIG-B into Rx descriptor 1072 * so it's impossible to decode MCS. Also since 1073 * firmware consumes Group Id Management frames host 1074 * has no knowledge regarding group/user position 1075 * mapping so it's impossible to pick the correct Nsts 1076 * from VHT-SIG-A1. 1077 * 1078 * Bandwidth and SGI are valid so report the rateinfo 1079 * on best-effort basis. 1080 */ 1081 mcs = 0; 1082 nss = 1; 1083 } 1084 1085 if (mcs > 0x09) { 1086 ath10k_warn(ar, "invalid MCS received %u\n", mcs); 1087 ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n", 1088 __le32_to_cpu(rxd_attention->flags), 1089 __le32_to_cpu(rxd_mpdu_start->info0), 1090 __le32_to_cpu(rxd_mpdu_start->info1), 1091 __le32_to_cpu(rxd_msdu_start_common->info0), 1092 __le32_to_cpu(rxd_msdu_start_common->info1), 1093 rxd_ppdu_start->info0, 1094 __le32_to_cpu(rxd_ppdu_start->info1), 1095 __le32_to_cpu(rxd_ppdu_start->info2), 1096 __le32_to_cpu(rxd_ppdu_start->info3), 1097 __le32_to_cpu(rxd_ppdu_start->info4)); 1098 1099 ath10k_warn(ar, "msdu end %08x mpdu end %08x\n", 1100 __le32_to_cpu(rxd_msdu_end_common->info0), 1101 __le32_to_cpu(rxd_mpdu_end->info0)); 1102 1103 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, 1104 "rx desc msdu payload: ", 1105 rxd_msdu_payload, 50); 1106 } 1107 1108 status->rate_idx = mcs; 1109 status->nss = nss; 1110 1111 if (sgi) 1112 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 1113 1114 status->bw = ath10k_bw_to_mac80211_bw(bw); 1115 status->encoding = RX_ENC_VHT; 1116 break; 1117 default: 1118 break; 1119 } 1120 } 1121 1122 static struct ieee80211_channel * 1123 ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd) 1124 { 1125 struct ath10k_hw_params *hw = &ar->hw_params; 1126 struct rx_attention *rxd_attention; 1127 struct rx_msdu_end_common *rxd_msdu_end_common; 1128 struct rx_mpdu_start *rxd_mpdu_start; 1129 struct ath10k_peer *peer; 1130 struct ath10k_vif *arvif; 1131 struct cfg80211_chan_def def; 1132 u16 peer_id; 1133 1134 lockdep_assert_held(&ar->data_lock); 1135 1136 if (!rxd) 1137 return NULL; 1138 1139 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); 1140 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd); 1141 rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd); 1142 1143 if (rxd_attention->flags & 1144 __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID)) 1145 return NULL; 1146 1147 if (!(rxd_msdu_end_common->info0 & 1148 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU))) 1149 return NULL; 1150 1151 peer_id = MS(__le32_to_cpu(rxd_mpdu_start->info0), 1152 RX_MPDU_START_INFO0_PEER_IDX); 1153 1154 peer = ath10k_peer_find_by_id(ar, peer_id); 1155 if (!peer) 1156 return NULL; 1157 1158 arvif = ath10k_get_arvif(ar, peer->vdev_id); 1159 if (WARN_ON_ONCE(!arvif)) 1160 return NULL; 1161 1162 if (ath10k_mac_vif_chan(arvif->vif, &def)) 1163 return NULL; 1164 1165 return def.chan; 1166 } 1167 1168 static struct ieee80211_channel * 1169 ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id) 1170 { 1171 struct ath10k_vif *arvif; 1172 struct cfg80211_chan_def def; 1173 1174 lockdep_assert_held(&ar->data_lock); 1175 1176 list_for_each_entry(arvif, &ar->arvifs, list) { 1177 if (arvif->vdev_id == vdev_id && 1178 ath10k_mac_vif_chan(arvif->vif, &def) == 0) 1179 return def.chan; 1180 } 1181 1182 return NULL; 1183 } 1184 1185 static void 1186 ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw, 1187 struct ieee80211_chanctx_conf *conf, 1188 void *data) 1189 { 1190 struct cfg80211_chan_def *def = data; 1191 1192 *def = conf->def; 1193 } 1194 1195 static struct ieee80211_channel * 1196 ath10k_htt_rx_h_any_channel(struct ath10k *ar) 1197 { 1198 struct cfg80211_chan_def def = {}; 1199 1200 ieee80211_iter_chan_contexts_atomic(ar->hw, 1201 ath10k_htt_rx_h_any_chan_iter, 1202 &def); 1203 1204 return def.chan; 1205 } 1206 1207 static bool ath10k_htt_rx_h_channel(struct ath10k *ar, 1208 struct ieee80211_rx_status *status, 1209 struct htt_rx_desc *rxd, 1210 u32 vdev_id) 1211 { 1212 struct ieee80211_channel *ch; 1213 1214 spin_lock_bh(&ar->data_lock); 1215 ch = ar->scan_channel; 1216 if (!ch) 1217 ch = ar->rx_channel; 1218 if (!ch) 1219 ch = ath10k_htt_rx_h_peer_channel(ar, rxd); 1220 if (!ch) 1221 ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id); 1222 if (!ch) 1223 ch = ath10k_htt_rx_h_any_channel(ar); 1224 if (!ch) 1225 ch = ar->tgt_oper_chan; 1226 spin_unlock_bh(&ar->data_lock); 1227 1228 if (!ch) 1229 return false; 1230 1231 status->band = ch->band; 1232 status->freq = ch->center_freq; 1233 1234 return true; 1235 } 1236 1237 static void ath10k_htt_rx_h_signal(struct ath10k *ar, 1238 struct ieee80211_rx_status *status, 1239 struct htt_rx_desc *rxd) 1240 { 1241 struct ath10k_hw_params *hw = &ar->hw_params; 1242 struct rx_ppdu_start *rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd); 1243 int i; 1244 1245 for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) { 1246 status->chains &= ~BIT(i); 1247 1248 if (rxd_ppdu_start->rssi_chains[i].pri20_mhz != 0x80) { 1249 status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR + 1250 rxd_ppdu_start->rssi_chains[i].pri20_mhz; 1251 1252 status->chains |= BIT(i); 1253 } 1254 } 1255 1256 /* FIXME: Get real NF */ 1257 status->signal = ATH10K_DEFAULT_NOISE_FLOOR + 1258 rxd_ppdu_start->rssi_comb; 1259 status->flag &= ~RX_FLAG_NO_SIGNAL_VAL; 1260 } 1261 1262 static void ath10k_htt_rx_h_mactime(struct ath10k *ar, 1263 struct ieee80211_rx_status *status, 1264 struct htt_rx_desc *rxd) 1265 { 1266 struct ath10k_hw_params *hw = &ar->hw_params; 1267 struct rx_ppdu_end_common *rxd_ppdu_end_common; 1268 1269 rxd_ppdu_end_common = ath10k_htt_rx_desc_get_ppdu_end(hw, rxd); 1270 1271 /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This 1272 * means all prior MSDUs in a PPDU are reported to mac80211 without the 1273 * TSF. Is it worth holding frames until end of PPDU is known? 1274 * 1275 * FIXME: Can we get/compute 64bit TSF? 1276 */ 1277 status->mactime = __le32_to_cpu(rxd_ppdu_end_common->tsf_timestamp); 1278 status->flag |= RX_FLAG_MACTIME_END; 1279 } 1280 1281 static void ath10k_htt_rx_h_ppdu(struct ath10k *ar, 1282 struct sk_buff_head *amsdu, 1283 struct ieee80211_rx_status *status, 1284 u32 vdev_id) 1285 { 1286 struct sk_buff *first; 1287 struct ath10k_hw_params *hw = &ar->hw_params; 1288 struct htt_rx_desc *rxd; 1289 struct rx_attention *rxd_attention; 1290 bool is_first_ppdu; 1291 bool is_last_ppdu; 1292 1293 if (skb_queue_empty(amsdu)) 1294 return; 1295 1296 first = skb_peek(amsdu); 1297 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 1298 #if defined(__linux__) 1299 (void *)first->data - hw->rx_desc_ops->rx_desc_size); 1300 #elif defined(__FreeBSD__) 1301 (u8 *)first->data - hw->rx_desc_ops->rx_desc_size); 1302 #endif 1303 1304 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); 1305 1306 is_first_ppdu = !!(rxd_attention->flags & 1307 __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU)); 1308 is_last_ppdu = !!(rxd_attention->flags & 1309 __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU)); 1310 1311 if (is_first_ppdu) { 1312 /* New PPDU starts so clear out the old per-PPDU status. */ 1313 status->freq = 0; 1314 status->rate_idx = 0; 1315 status->nss = 0; 1316 status->encoding = RX_ENC_LEGACY; 1317 status->bw = RATE_INFO_BW_20; 1318 1319 status->flag &= ~RX_FLAG_MACTIME; 1320 status->flag |= RX_FLAG_NO_SIGNAL_VAL; 1321 1322 status->flag &= ~(RX_FLAG_AMPDU_IS_LAST); 1323 status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN; 1324 status->ampdu_reference = ar->ampdu_reference; 1325 1326 ath10k_htt_rx_h_signal(ar, status, rxd); 1327 ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id); 1328 ath10k_htt_rx_h_rates(ar, status, rxd); 1329 } 1330 1331 if (is_last_ppdu) { 1332 ath10k_htt_rx_h_mactime(ar, status, rxd); 1333 1334 /* set ampdu last segment flag */ 1335 status->flag |= RX_FLAG_AMPDU_IS_LAST; 1336 ar->ampdu_reference++; 1337 } 1338 } 1339 1340 static const char * const tid_to_ac[] = { 1341 "BE", 1342 "BK", 1343 "BK", 1344 "BE", 1345 "VI", 1346 "VI", 1347 "VO", 1348 "VO", 1349 }; 1350 1351 static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size) 1352 { 1353 u8 *qc; 1354 int tid; 1355 1356 if (!ieee80211_is_data_qos(hdr->frame_control)) 1357 return ""; 1358 1359 qc = ieee80211_get_qos_ctl(hdr); 1360 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 1361 if (tid < 8) 1362 snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]); 1363 else 1364 snprintf(out, size, "tid %d", tid); 1365 1366 return out; 1367 } 1368 1369 static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar, 1370 struct ieee80211_rx_status *rx_status, 1371 struct sk_buff *skb) 1372 { 1373 struct ieee80211_rx_status *status; 1374 1375 status = IEEE80211_SKB_RXCB(skb); 1376 *status = *rx_status; 1377 1378 skb_queue_tail(&ar->htt.rx_msdus_q, skb); 1379 } 1380 1381 static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb) 1382 { 1383 struct ieee80211_rx_status *status; 1384 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1385 char tid[32]; 1386 1387 status = IEEE80211_SKB_RXCB(skb); 1388 1389 if (!(ar->filter_flags & FIF_FCSFAIL) && 1390 status->flag & RX_FLAG_FAILED_FCS_CRC) { 1391 ar->stats.rx_crc_err_drop++; 1392 dev_kfree_skb_any(skb); 1393 return; 1394 } 1395 1396 ath10k_dbg(ar, ATH10K_DBG_DATA, 1397 "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", 1398 skb, 1399 skb->len, 1400 ieee80211_get_SA(hdr), 1401 ath10k_get_tid(hdr, tid, sizeof(tid)), 1402 is_multicast_ether_addr(ieee80211_get_DA(hdr)) ? 1403 "mcast" : "ucast", 1404 IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)), 1405 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", 1406 (status->encoding == RX_ENC_HT) ? "ht" : "", 1407 (status->encoding == RX_ENC_VHT) ? "vht" : "", 1408 (status->bw == RATE_INFO_BW_40) ? "40" : "", 1409 (status->bw == RATE_INFO_BW_80) ? "80" : "", 1410 (status->bw == RATE_INFO_BW_160) ? "160" : "", 1411 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", 1412 status->rate_idx, 1413 status->nss, 1414 status->freq, 1415 status->band, status->flag, 1416 !!(status->flag & RX_FLAG_FAILED_FCS_CRC), 1417 !!(status->flag & RX_FLAG_MMIC_ERROR), 1418 !!(status->flag & RX_FLAG_AMSDU_MORE)); 1419 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ", 1420 skb->data, skb->len); 1421 trace_ath10k_rx_hdr(ar, skb->data, skb->len); 1422 trace_ath10k_rx_payload(ar, skb->data, skb->len); 1423 1424 ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi); 1425 } 1426 1427 static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar, 1428 struct ieee80211_hdr *hdr) 1429 { 1430 int len = ieee80211_hdrlen(hdr->frame_control); 1431 1432 if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING, 1433 ar->running_fw->fw_file.fw_features)) 1434 len = round_up(len, 4); 1435 1436 return len; 1437 } 1438 1439 static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar, 1440 struct sk_buff *msdu, 1441 struct ieee80211_rx_status *status, 1442 enum htt_rx_mpdu_encrypt_type enctype, 1443 bool is_decrypted, 1444 const u8 first_hdr[64]) 1445 { 1446 struct ieee80211_hdr *hdr; 1447 struct ath10k_hw_params *hw = &ar->hw_params; 1448 struct htt_rx_desc *rxd; 1449 struct rx_msdu_end_common *rxd_msdu_end_common; 1450 size_t hdr_len; 1451 size_t crypto_len; 1452 bool is_first; 1453 bool is_last; 1454 bool msdu_limit_err; 1455 int bytes_aligned = ar->hw_params.decap_align_bytes; 1456 u8 *qos; 1457 1458 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 1459 #if defined(__linux__) 1460 (void *)msdu->data - hw->rx_desc_ops->rx_desc_size); 1461 #elif defined(__FreeBSD__) 1462 (u8 *)msdu->data - hw->rx_desc_ops->rx_desc_size); 1463 #endif 1464 1465 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd); 1466 is_first = !!(rxd_msdu_end_common->info0 & 1467 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); 1468 is_last = !!(rxd_msdu_end_common->info0 & 1469 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); 1470 1471 /* Delivered decapped frame: 1472 * [802.11 header] 1473 * [crypto param] <-- can be trimmed if !fcs_err && 1474 * !decrypt_err && !peer_idx_invalid 1475 * [amsdu header] <-- only if A-MSDU 1476 * [rfc1042/llc] 1477 * [payload] 1478 * [FCS] <-- at end, needs to be trimmed 1479 */ 1480 1481 /* Some hardwares(QCA99x0 variants) limit number of msdus in a-msdu when 1482 * deaggregate, so that unwanted MSDU-deaggregation is avoided for 1483 * error packets. If limit exceeds, hw sends all remaining MSDUs as 1484 * a single last MSDU with this msdu limit error set. 1485 */ 1486 msdu_limit_err = ath10k_htt_rx_desc_msdu_limit_error(hw, rxd); 1487 1488 /* If MSDU limit error happens, then don't warn on, the partial raw MSDU 1489 * without first MSDU is expected in that case, and handled later here. 1490 */ 1491 /* This probably shouldn't happen but warn just in case */ 1492 if (WARN_ON_ONCE(!is_first && !msdu_limit_err)) 1493 return; 1494 1495 /* This probably shouldn't happen but warn just in case */ 1496 if (WARN_ON_ONCE(!(is_first && is_last) && !msdu_limit_err)) 1497 return; 1498 1499 skb_trim(msdu, msdu->len - FCS_LEN); 1500 1501 /* Push original 80211 header */ 1502 if (unlikely(msdu_limit_err)) { 1503 #if defined(__linux__) 1504 hdr = (struct ieee80211_hdr *)first_hdr; 1505 #elif defined(__FreeBSD__) 1506 hdr = __DECONST(struct ieee80211_hdr *, first_hdr); 1507 #endif 1508 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1509 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); 1510 1511 if (ieee80211_is_data_qos(hdr->frame_control)) { 1512 qos = ieee80211_get_qos_ctl(hdr); 1513 qos[0] |= IEEE80211_QOS_CTL_A_MSDU_PRESENT; 1514 } 1515 1516 if (crypto_len) 1517 memcpy(skb_push(msdu, crypto_len), 1518 #if defined(__linux__) 1519 (void *)hdr + round_up(hdr_len, bytes_aligned), 1520 #elif defined(__FreeBSD__) 1521 (u8 *)hdr + round_up(hdr_len, bytes_aligned), 1522 #endif 1523 crypto_len); 1524 1525 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1526 } 1527 1528 /* In most cases this will be true for sniffed frames. It makes sense 1529 * to deliver them as-is without stripping the crypto param. This is 1530 * necessary for software based decryption. 1531 * 1532 * If there's no error then the frame is decrypted. At least that is 1533 * the case for frames that come in via fragmented rx indication. 1534 */ 1535 if (!is_decrypted) 1536 return; 1537 1538 /* The payload is decrypted so strip crypto params. Start from tail 1539 * since hdr is used to compute some stuff. 1540 */ 1541 1542 hdr = (void *)msdu->data; 1543 1544 /* Tail */ 1545 if (status->flag & RX_FLAG_IV_STRIPPED) { 1546 skb_trim(msdu, msdu->len - 1547 ath10k_htt_rx_crypto_mic_len(ar, enctype)); 1548 1549 skb_trim(msdu, msdu->len - 1550 ath10k_htt_rx_crypto_icv_len(ar, enctype)); 1551 } else { 1552 /* MIC */ 1553 if (status->flag & RX_FLAG_MIC_STRIPPED) 1554 skb_trim(msdu, msdu->len - 1555 ath10k_htt_rx_crypto_mic_len(ar, enctype)); 1556 1557 /* ICV */ 1558 if (status->flag & RX_FLAG_ICV_STRIPPED) 1559 skb_trim(msdu, msdu->len - 1560 ath10k_htt_rx_crypto_icv_len(ar, enctype)); 1561 } 1562 1563 /* MMIC */ 1564 if ((status->flag & RX_FLAG_MMIC_STRIPPED) && 1565 !ieee80211_has_morefrags(hdr->frame_control) && 1566 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) 1567 skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN); 1568 1569 /* Head */ 1570 if (status->flag & RX_FLAG_IV_STRIPPED) { 1571 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1572 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); 1573 1574 #if defined(__linux__) 1575 memmove((void *)msdu->data + crypto_len, 1576 #elif defined(__FreeBSD__) 1577 memmove((u8 *)msdu->data + crypto_len, 1578 #endif 1579 (void *)msdu->data, hdr_len); 1580 skb_pull(msdu, crypto_len); 1581 } 1582 } 1583 1584 static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar, 1585 struct sk_buff *msdu, 1586 struct ieee80211_rx_status *status, 1587 const u8 first_hdr[64], 1588 enum htt_rx_mpdu_encrypt_type enctype) 1589 { 1590 struct ath10k_hw_params *hw = &ar->hw_params; 1591 #if defined(__linux__) 1592 struct ieee80211_hdr *hdr; 1593 #elif defined(__FreeBSD__) 1594 const struct ieee80211_hdr *hdr; 1595 struct ieee80211_hdr *hdr2; 1596 #endif 1597 struct htt_rx_desc *rxd; 1598 size_t hdr_len; 1599 u8 da[ETH_ALEN]; 1600 u8 sa[ETH_ALEN]; 1601 int l3_pad_bytes; 1602 int bytes_aligned = ar->hw_params.decap_align_bytes; 1603 1604 /* Delivered decapped frame: 1605 * [nwifi 802.11 header] <-- replaced with 802.11 hdr 1606 * [rfc1042/llc] 1607 * 1608 * Note: The nwifi header doesn't have QoS Control and is 1609 * (always?) a 3addr frame. 1610 * 1611 * Note2: There's no A-MSDU subframe header. Even if it's part 1612 * of an A-MSDU. 1613 */ 1614 1615 /* pull decapped header and copy SA & DA */ 1616 #if defined(__linux__) 1617 rxd = HTT_RX_BUF_TO_RX_DESC(hw, (void *)msdu->data - 1618 #elif defined(__FreeBSD__) 1619 rxd = HTT_RX_BUF_TO_RX_DESC(hw, (u8 *)msdu->data - 1620 #endif 1621 hw->rx_desc_ops->rx_desc_size); 1622 1623 l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd); 1624 skb_put(msdu, l3_pad_bytes); 1625 1626 #if defined(__linux__) 1627 hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes); 1628 1629 hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr); 1630 ether_addr_copy(da, ieee80211_get_DA(hdr)); 1631 ether_addr_copy(sa, ieee80211_get_SA(hdr)); 1632 #elif defined(__FreeBSD__) 1633 hdr2 = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes); 1634 1635 hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr2); 1636 ether_addr_copy(da, ieee80211_get_DA(hdr2)); 1637 ether_addr_copy(sa, ieee80211_get_SA(hdr2)); 1638 #endif 1639 skb_pull(msdu, hdr_len); 1640 1641 /* push original 802.11 header */ 1642 #if defined(__linux__) 1643 hdr = (struct ieee80211_hdr *)first_hdr; 1644 #elif defined(__FreeBSD__) 1645 hdr = (const struct ieee80211_hdr *)first_hdr; 1646 #endif 1647 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1648 1649 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 1650 memcpy(skb_push(msdu, 1651 ath10k_htt_rx_crypto_param_len(ar, enctype)), 1652 #if defined(__linux__) 1653 (void *)hdr + round_up(hdr_len, bytes_aligned), 1654 #elif defined(__FreeBSD__) 1655 (const u8 *)hdr + round_up(hdr_len, bytes_aligned), 1656 #endif 1657 ath10k_htt_rx_crypto_param_len(ar, enctype)); 1658 } 1659 1660 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1661 1662 /* original 802.11 header has a different DA and in 1663 * case of 4addr it may also have different SA 1664 */ 1665 #if defined(__linux__) 1666 hdr = (struct ieee80211_hdr *)msdu->data; 1667 ether_addr_copy(ieee80211_get_DA(hdr), da); 1668 ether_addr_copy(ieee80211_get_SA(hdr), sa); 1669 #elif defined(__FreeBSD__) 1670 /* ieee80211_get_[DS]A() do not take a const argument. */ 1671 hdr2 = (struct ieee80211_hdr *)msdu->data; 1672 ether_addr_copy(ieee80211_get_DA(hdr2), da); 1673 ether_addr_copy(ieee80211_get_SA(hdr2), sa); 1674 #endif 1675 } 1676 1677 static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar, 1678 struct sk_buff *msdu, 1679 enum htt_rx_mpdu_encrypt_type enctype) 1680 { 1681 struct ieee80211_hdr *hdr; 1682 struct ath10k_hw_params *hw = &ar->hw_params; 1683 struct htt_rx_desc *rxd; 1684 struct rx_msdu_end_common *rxd_msdu_end_common; 1685 u8 *rxd_rx_hdr_status; 1686 size_t hdr_len, crypto_len; 1687 #if defined(__linux__) 1688 void *rfc1042; 1689 #elif defined(__FreeBSD__) 1690 u8 *rfc1042; 1691 #endif 1692 bool is_first, is_last, is_amsdu; 1693 int bytes_aligned = ar->hw_params.decap_align_bytes; 1694 1695 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 1696 #if defined(__linux__) 1697 (void *)msdu->data - hw->rx_desc_ops->rx_desc_size); 1698 #elif defined(__FreeBSD__) 1699 (u8 *)msdu->data - hw->rx_desc_ops->rx_desc_size); 1700 #endif 1701 1702 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd); 1703 rxd_rx_hdr_status = ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd); 1704 hdr = (void *)rxd_rx_hdr_status; 1705 1706 is_first = !!(rxd_msdu_end_common->info0 & 1707 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); 1708 is_last = !!(rxd_msdu_end_common->info0 & 1709 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); 1710 is_amsdu = !(is_first && is_last); 1711 1712 #if defined(__linux__) 1713 rfc1042 = hdr; 1714 #elif defined(__FreeBSD__) 1715 rfc1042 = (void *)hdr; 1716 #endif 1717 1718 if (is_first) { 1719 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1720 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); 1721 1722 rfc1042 += round_up(hdr_len, bytes_aligned) + 1723 round_up(crypto_len, bytes_aligned); 1724 } 1725 1726 if (is_amsdu) 1727 rfc1042 += sizeof(struct amsdu_subframe_hdr); 1728 1729 return rfc1042; 1730 } 1731 1732 static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar, 1733 struct sk_buff *msdu, 1734 struct ieee80211_rx_status *status, 1735 const u8 first_hdr[64], 1736 enum htt_rx_mpdu_encrypt_type enctype) 1737 { 1738 struct ath10k_hw_params *hw = &ar->hw_params; 1739 #if defined(__linux__) 1740 struct ieee80211_hdr *hdr; 1741 #elif defined(__FreeBSD__) 1742 const struct ieee80211_hdr *hdr; 1743 struct ieee80211_hdr *hdr2; 1744 #endif 1745 struct ethhdr *eth; 1746 size_t hdr_len; 1747 void *rfc1042; 1748 u8 da[ETH_ALEN]; 1749 u8 sa[ETH_ALEN]; 1750 int l3_pad_bytes; 1751 struct htt_rx_desc *rxd; 1752 int bytes_aligned = ar->hw_params.decap_align_bytes; 1753 1754 /* Delivered decapped frame: 1755 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc 1756 * [payload] 1757 */ 1758 1759 rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype); 1760 if (WARN_ON_ONCE(!rfc1042)) 1761 return; 1762 1763 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 1764 #if defined(__linux__) 1765 (void *)msdu->data - hw->rx_desc_ops->rx_desc_size); 1766 #elif defined(__FreeBSD__) 1767 (u8 *)msdu->data - hw->rx_desc_ops->rx_desc_size); 1768 #endif 1769 1770 l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd); 1771 skb_put(msdu, l3_pad_bytes); 1772 skb_pull(msdu, l3_pad_bytes); 1773 1774 /* pull decapped header and copy SA & DA */ 1775 eth = (struct ethhdr *)msdu->data; 1776 ether_addr_copy(da, eth->h_dest); 1777 ether_addr_copy(sa, eth->h_source); 1778 skb_pull(msdu, sizeof(struct ethhdr)); 1779 1780 /* push rfc1042/llc/snap */ 1781 memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042, 1782 sizeof(struct rfc1042_hdr)); 1783 1784 /* push original 802.11 header */ 1785 #if defined(__linux__) 1786 hdr = (struct ieee80211_hdr *)first_hdr; 1787 #elif defined(__FreeBSD__) 1788 hdr = (const struct ieee80211_hdr *)first_hdr; 1789 #endif 1790 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1791 1792 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 1793 memcpy(skb_push(msdu, 1794 ath10k_htt_rx_crypto_param_len(ar, enctype)), 1795 #if defined(__linux__) 1796 (void *)hdr + round_up(hdr_len, bytes_aligned), 1797 #elif defined(__FreeBSD__) 1798 (const u8 *)hdr + round_up(hdr_len, bytes_aligned), 1799 #endif 1800 ath10k_htt_rx_crypto_param_len(ar, enctype)); 1801 } 1802 1803 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1804 1805 /* original 802.11 header has a different DA and in 1806 * case of 4addr it may also have different SA 1807 */ 1808 #if defined(__linux__) 1809 hdr = (struct ieee80211_hdr *)msdu->data; 1810 ether_addr_copy(ieee80211_get_DA(hdr), da); 1811 ether_addr_copy(ieee80211_get_SA(hdr), sa); 1812 #elif defined(__FreeBSD__) 1813 /* ieee80211_get_[DS]A() do not take a const argument. */ 1814 hdr2 = (struct ieee80211_hdr *)msdu->data; 1815 ether_addr_copy(ieee80211_get_DA(hdr2), da); 1816 ether_addr_copy(ieee80211_get_SA(hdr2), sa); 1817 #endif 1818 } 1819 1820 static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar, 1821 struct sk_buff *msdu, 1822 struct ieee80211_rx_status *status, 1823 const u8 first_hdr[64], 1824 enum htt_rx_mpdu_encrypt_type enctype) 1825 { 1826 struct ath10k_hw_params *hw = &ar->hw_params; 1827 #if defined(__linux__) 1828 struct ieee80211_hdr *hdr; 1829 #elif defined(__FreeBSD__) 1830 const struct ieee80211_hdr *hdr; 1831 #endif 1832 size_t hdr_len; 1833 int l3_pad_bytes; 1834 struct htt_rx_desc *rxd; 1835 int bytes_aligned = ar->hw_params.decap_align_bytes; 1836 1837 /* Delivered decapped frame: 1838 * [amsdu header] <-- replaced with 802.11 hdr 1839 * [rfc1042/llc] 1840 * [payload] 1841 */ 1842 1843 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 1844 #if defined(__linux__) 1845 (void *)msdu->data - hw->rx_desc_ops->rx_desc_size); 1846 #elif defined(__FreeBSD__) 1847 (u8 *)msdu->data - hw->rx_desc_ops->rx_desc_size); 1848 #endif 1849 1850 l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd); 1851 1852 skb_put(msdu, l3_pad_bytes); 1853 skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes); 1854 1855 #if defined(__linux__) 1856 hdr = (struct ieee80211_hdr *)first_hdr; 1857 #elif defined(__FreeBSD__) 1858 hdr = (const struct ieee80211_hdr *)first_hdr; 1859 #endif 1860 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1861 1862 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 1863 memcpy(skb_push(msdu, 1864 ath10k_htt_rx_crypto_param_len(ar, enctype)), 1865 #if defined(__linux__) 1866 (void *)hdr + round_up(hdr_len, bytes_aligned), 1867 #elif defined(__FreeBSD__) 1868 (const u8 *)hdr + round_up(hdr_len, bytes_aligned), 1869 #endif 1870 ath10k_htt_rx_crypto_param_len(ar, enctype)); 1871 } 1872 1873 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1874 } 1875 1876 static void ath10k_htt_rx_h_undecap(struct ath10k *ar, 1877 struct sk_buff *msdu, 1878 struct ieee80211_rx_status *status, 1879 u8 first_hdr[64], 1880 enum htt_rx_mpdu_encrypt_type enctype, 1881 bool is_decrypted) 1882 { 1883 struct ath10k_hw_params *hw = &ar->hw_params; 1884 struct htt_rx_desc *rxd; 1885 struct rx_msdu_start_common *rxd_msdu_start_common; 1886 enum rx_msdu_decap_format decap; 1887 1888 /* First msdu's decapped header: 1889 * [802.11 header] <-- padded to 4 bytes long 1890 * [crypto param] <-- padded to 4 bytes long 1891 * [amsdu header] <-- only if A-MSDU 1892 * [rfc1042/llc] 1893 * 1894 * Other (2nd, 3rd, ..) msdu's decapped header: 1895 * [amsdu header] <-- only if A-MSDU 1896 * [rfc1042/llc] 1897 */ 1898 1899 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 1900 #if defined(__linux__) 1901 (void *)msdu->data - hw->rx_desc_ops->rx_desc_size); 1902 #elif defined(__FreeBSD__) 1903 (u8 *)msdu->data - hw->rx_desc_ops->rx_desc_size); 1904 #endif 1905 1906 rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd); 1907 decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1), 1908 RX_MSDU_START_INFO1_DECAP_FORMAT); 1909 1910 switch (decap) { 1911 case RX_MSDU_DECAP_RAW: 1912 ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype, 1913 is_decrypted, first_hdr); 1914 break; 1915 case RX_MSDU_DECAP_NATIVE_WIFI: 1916 ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr, 1917 enctype); 1918 break; 1919 case RX_MSDU_DECAP_ETHERNET2_DIX: 1920 ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype); 1921 break; 1922 case RX_MSDU_DECAP_8023_SNAP_LLC: 1923 ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr, 1924 enctype); 1925 break; 1926 } 1927 } 1928 1929 static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb) 1930 { 1931 struct htt_rx_desc *rxd; 1932 struct rx_attention *rxd_attention; 1933 struct rx_msdu_start_common *rxd_msdu_start_common; 1934 u32 flags, info; 1935 bool is_ip4, is_ip6; 1936 bool is_tcp, is_udp; 1937 bool ip_csum_ok, tcpudp_csum_ok; 1938 1939 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 1940 #if defined(__linux__) 1941 (void *)skb->data - hw->rx_desc_ops->rx_desc_size); 1942 #elif defined(__FreeBSD__) 1943 (u8 *)skb->data - hw->rx_desc_ops->rx_desc_size); 1944 #endif 1945 1946 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); 1947 rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd); 1948 flags = __le32_to_cpu(rxd_attention->flags); 1949 info = __le32_to_cpu(rxd_msdu_start_common->info1); 1950 1951 is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO); 1952 is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO); 1953 is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO); 1954 is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO); 1955 ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL); 1956 tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL); 1957 1958 if (!is_ip4 && !is_ip6) 1959 return CHECKSUM_NONE; 1960 if (!is_tcp && !is_udp) 1961 return CHECKSUM_NONE; 1962 if (!ip_csum_ok) 1963 return CHECKSUM_NONE; 1964 if (!tcpudp_csum_ok) 1965 return CHECKSUM_NONE; 1966 1967 return CHECKSUM_UNNECESSARY; 1968 } 1969 1970 static void ath10k_htt_rx_h_csum_offload(struct ath10k_hw_params *hw, 1971 struct sk_buff *msdu) 1972 { 1973 msdu->ip_summed = ath10k_htt_rx_get_csum_state(hw, msdu); 1974 } 1975 1976 static u64 ath10k_htt_rx_h_get_pn(struct ath10k *ar, struct sk_buff *skb, 1977 enum htt_rx_mpdu_encrypt_type enctype) 1978 { 1979 struct ieee80211_hdr *hdr; 1980 u64 pn = 0; 1981 u8 *ehdr; 1982 1983 hdr = (struct ieee80211_hdr *)skb->data; 1984 ehdr = skb->data + ieee80211_hdrlen(hdr->frame_control); 1985 1986 if (enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) { 1987 pn = ehdr[0]; 1988 pn |= (u64)ehdr[1] << 8; 1989 pn |= (u64)ehdr[4] << 16; 1990 pn |= (u64)ehdr[5] << 24; 1991 pn |= (u64)ehdr[6] << 32; 1992 pn |= (u64)ehdr[7] << 40; 1993 } 1994 return pn; 1995 } 1996 1997 static bool ath10k_htt_rx_h_frag_multicast_check(struct ath10k *ar, 1998 struct sk_buff *skb) 1999 { 2000 struct ieee80211_hdr *hdr; 2001 2002 hdr = (struct ieee80211_hdr *)skb->data; 2003 return !is_multicast_ether_addr(hdr->addr1); 2004 } 2005 2006 static bool ath10k_htt_rx_h_frag_pn_check(struct ath10k *ar, 2007 struct sk_buff *skb, 2008 u16 peer_id, 2009 enum htt_rx_mpdu_encrypt_type enctype) 2010 { 2011 struct ath10k_peer *peer; 2012 union htt_rx_pn_t *last_pn, new_pn = {}; 2013 struct ieee80211_hdr *hdr; 2014 u8 tid, frag_number; 2015 u32 seq; 2016 2017 peer = ath10k_peer_find_by_id(ar, peer_id); 2018 if (!peer) { 2019 ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer for frag pn check\n"); 2020 return false; 2021 } 2022 2023 hdr = (struct ieee80211_hdr *)skb->data; 2024 if (ieee80211_is_data_qos(hdr->frame_control)) 2025 tid = ieee80211_get_tid(hdr); 2026 else 2027 tid = ATH10K_TXRX_NON_QOS_TID; 2028 2029 last_pn = &peer->frag_tids_last_pn[tid]; 2030 new_pn.pn48 = ath10k_htt_rx_h_get_pn(ar, skb, enctype); 2031 frag_number = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; 2032 seq = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)); 2033 2034 if (frag_number == 0) { 2035 last_pn->pn48 = new_pn.pn48; 2036 peer->frag_tids_seq[tid] = seq; 2037 } else { 2038 if (seq != peer->frag_tids_seq[tid]) 2039 return false; 2040 2041 if (new_pn.pn48 != last_pn->pn48 + 1) 2042 return false; 2043 2044 last_pn->pn48 = new_pn.pn48; 2045 } 2046 2047 return true; 2048 } 2049 2050 static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, 2051 struct sk_buff_head *amsdu, 2052 struct ieee80211_rx_status *status, 2053 bool fill_crypt_header, 2054 u8 *rx_hdr, 2055 enum ath10k_pkt_rx_err *err, 2056 u16 peer_id, 2057 bool frag) 2058 { 2059 struct sk_buff *first; 2060 struct sk_buff *last; 2061 struct sk_buff *msdu, *temp; 2062 struct ath10k_hw_params *hw = &ar->hw_params; 2063 struct htt_rx_desc *rxd; 2064 struct rx_attention *rxd_attention; 2065 struct rx_mpdu_start *rxd_mpdu_start; 2066 2067 struct ieee80211_hdr *hdr; 2068 enum htt_rx_mpdu_encrypt_type enctype; 2069 u8 first_hdr[64]; 2070 u8 *qos; 2071 bool has_fcs_err; 2072 bool has_crypto_err; 2073 bool has_tkip_err; 2074 bool has_peer_idx_invalid; 2075 bool is_decrypted; 2076 bool is_mgmt; 2077 u32 attention; 2078 bool frag_pn_check = true, multicast_check = true; 2079 2080 if (skb_queue_empty(amsdu)) 2081 return; 2082 2083 first = skb_peek(amsdu); 2084 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 2085 #if defined(__linux__) 2086 (void *)first->data - hw->rx_desc_ops->rx_desc_size); 2087 #elif defined(__FreeBSD__) 2088 (u8 *)first->data - hw->rx_desc_ops->rx_desc_size); 2089 #endif 2090 2091 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); 2092 rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd); 2093 2094 is_mgmt = !!(rxd_attention->flags & 2095 __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE)); 2096 2097 enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0), 2098 RX_MPDU_START_INFO0_ENCRYPT_TYPE); 2099 2100 /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11 2101 * decapped header. It'll be used for undecapping of each MSDU. 2102 */ 2103 hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd); 2104 memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN); 2105 2106 if (rx_hdr) 2107 memcpy(rx_hdr, hdr, RX_HTT_HDR_STATUS_LEN); 2108 2109 /* Each A-MSDU subframe will use the original header as the base and be 2110 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl. 2111 */ 2112 hdr = (void *)first_hdr; 2113 2114 if (ieee80211_is_data_qos(hdr->frame_control)) { 2115 qos = ieee80211_get_qos_ctl(hdr); 2116 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 2117 } 2118 2119 /* Some attention flags are valid only in the last MSDU. */ 2120 last = skb_peek_tail(amsdu); 2121 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 2122 #if defined(__linux__) 2123 (void *)last->data - hw->rx_desc_ops->rx_desc_size); 2124 #elif defined(__FreeBSD__) 2125 (u8 *)last->data - hw->rx_desc_ops->rx_desc_size); 2126 #endif 2127 2128 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); 2129 attention = __le32_to_cpu(rxd_attention->flags); 2130 2131 has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR); 2132 has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR); 2133 has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR); 2134 has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID); 2135 2136 /* Note: If hardware captures an encrypted frame that it can't decrypt, 2137 * e.g. due to fcs error, missing peer or invalid key data it will 2138 * report the frame as raw. 2139 */ 2140 is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE && 2141 !has_fcs_err && 2142 !has_crypto_err && 2143 !has_peer_idx_invalid); 2144 2145 /* Clear per-MPDU flags while leaving per-PPDU flags intact. */ 2146 status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | 2147 RX_FLAG_MMIC_ERROR | 2148 RX_FLAG_DECRYPTED | 2149 RX_FLAG_IV_STRIPPED | 2150 RX_FLAG_ONLY_MONITOR | 2151 RX_FLAG_MMIC_STRIPPED); 2152 2153 if (has_fcs_err) 2154 status->flag |= RX_FLAG_FAILED_FCS_CRC; 2155 2156 if (has_tkip_err) 2157 status->flag |= RX_FLAG_MMIC_ERROR; 2158 2159 if (err) { 2160 if (has_fcs_err) 2161 *err = ATH10K_PKT_RX_ERR_FCS; 2162 else if (has_tkip_err) 2163 *err = ATH10K_PKT_RX_ERR_TKIP; 2164 else if (has_crypto_err) 2165 *err = ATH10K_PKT_RX_ERR_CRYPT; 2166 else if (has_peer_idx_invalid) 2167 *err = ATH10K_PKT_RX_ERR_PEER_IDX_INVAL; 2168 } 2169 2170 /* Firmware reports all necessary management frames via WMI already. 2171 * They are not reported to monitor interfaces at all so pass the ones 2172 * coming via HTT to monitor interfaces instead. This simplifies 2173 * matters a lot. 2174 */ 2175 if (is_mgmt) 2176 status->flag |= RX_FLAG_ONLY_MONITOR; 2177 2178 if (is_decrypted) { 2179 status->flag |= RX_FLAG_DECRYPTED; 2180 2181 if (likely(!is_mgmt)) 2182 status->flag |= RX_FLAG_MMIC_STRIPPED; 2183 2184 if (fill_crypt_header) 2185 status->flag |= RX_FLAG_MIC_STRIPPED | 2186 RX_FLAG_ICV_STRIPPED; 2187 else 2188 status->flag |= RX_FLAG_IV_STRIPPED; 2189 } 2190 2191 skb_queue_walk(amsdu, msdu) { 2192 if (frag && !fill_crypt_header && is_decrypted && 2193 enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) 2194 frag_pn_check = ath10k_htt_rx_h_frag_pn_check(ar, 2195 msdu, 2196 peer_id, 2197 enctype); 2198 2199 if (frag) 2200 multicast_check = ath10k_htt_rx_h_frag_multicast_check(ar, 2201 msdu); 2202 2203 if (!frag_pn_check || !multicast_check) { 2204 /* Discard the fragment with invalid PN or multicast DA 2205 */ 2206 temp = msdu->prev; 2207 __skb_unlink(msdu, amsdu); 2208 dev_kfree_skb_any(msdu); 2209 msdu = temp; 2210 frag_pn_check = true; 2211 multicast_check = true; 2212 continue; 2213 } 2214 2215 ath10k_htt_rx_h_csum_offload(&ar->hw_params, msdu); 2216 2217 if (frag && !fill_crypt_header && 2218 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) 2219 status->flag &= ~RX_FLAG_MMIC_STRIPPED; 2220 2221 ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype, 2222 is_decrypted); 2223 2224 /* Undecapping involves copying the original 802.11 header back 2225 * to sk_buff. If frame is protected and hardware has decrypted 2226 * it then remove the protected bit. 2227 */ 2228 if (!is_decrypted) 2229 continue; 2230 if (is_mgmt) 2231 continue; 2232 2233 if (fill_crypt_header) 2234 continue; 2235 2236 hdr = (void *)msdu->data; 2237 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 2238 2239 if (frag && !fill_crypt_header && 2240 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) 2241 status->flag &= ~RX_FLAG_IV_STRIPPED & 2242 ~RX_FLAG_MMIC_STRIPPED; 2243 } 2244 } 2245 2246 static void ath10k_htt_rx_h_enqueue(struct ath10k *ar, 2247 struct sk_buff_head *amsdu, 2248 struct ieee80211_rx_status *status) 2249 { 2250 struct sk_buff *msdu; 2251 struct sk_buff *first_subframe; 2252 2253 first_subframe = skb_peek(amsdu); 2254 2255 while ((msdu = __skb_dequeue(amsdu))) { 2256 /* Setup per-MSDU flags */ 2257 if (skb_queue_empty(amsdu)) 2258 status->flag &= ~RX_FLAG_AMSDU_MORE; 2259 else 2260 status->flag |= RX_FLAG_AMSDU_MORE; 2261 2262 if (msdu == first_subframe) { 2263 first_subframe = NULL; 2264 status->flag &= ~RX_FLAG_ALLOW_SAME_PN; 2265 } else { 2266 status->flag |= RX_FLAG_ALLOW_SAME_PN; 2267 } 2268 2269 ath10k_htt_rx_h_queue_msdu(ar, status, msdu); 2270 } 2271 } 2272 2273 static int ath10k_unchain_msdu(struct sk_buff_head *amsdu, 2274 unsigned long *unchain_cnt) 2275 { 2276 struct sk_buff *skb, *first; 2277 int space; 2278 int total_len = 0; 2279 int amsdu_len = skb_queue_len(amsdu); 2280 2281 /* TODO: Might could optimize this by using 2282 * skb_try_coalesce or similar method to 2283 * decrease copying, or maybe get mac80211 to 2284 * provide a way to just receive a list of 2285 * skb? 2286 */ 2287 2288 first = __skb_dequeue(amsdu); 2289 2290 /* Allocate total length all at once. */ 2291 skb_queue_walk(amsdu, skb) 2292 total_len += skb->len; 2293 2294 space = total_len - skb_tailroom(first); 2295 if ((space > 0) && 2296 (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) { 2297 /* TODO: bump some rx-oom error stat */ 2298 /* put it back together so we can free the 2299 * whole list at once. 2300 */ 2301 __skb_queue_head(amsdu, first); 2302 return -1; 2303 } 2304 2305 /* Walk list again, copying contents into 2306 * msdu_head 2307 */ 2308 while ((skb = __skb_dequeue(amsdu))) { 2309 skb_copy_from_linear_data(skb, skb_put(first, skb->len), 2310 skb->len); 2311 dev_kfree_skb_any(skb); 2312 } 2313 2314 __skb_queue_head(amsdu, first); 2315 2316 *unchain_cnt += amsdu_len - 1; 2317 2318 return 0; 2319 } 2320 2321 static void ath10k_htt_rx_h_unchain(struct ath10k *ar, 2322 struct sk_buff_head *amsdu, 2323 unsigned long *drop_cnt, 2324 unsigned long *unchain_cnt) 2325 { 2326 struct sk_buff *first; 2327 struct ath10k_hw_params *hw = &ar->hw_params; 2328 struct htt_rx_desc *rxd; 2329 struct rx_msdu_start_common *rxd_msdu_start_common; 2330 struct rx_frag_info_common *rxd_frag_info; 2331 enum rx_msdu_decap_format decap; 2332 2333 first = skb_peek(amsdu); 2334 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 2335 #if defined(__linux__) 2336 (void *)first->data - hw->rx_desc_ops->rx_desc_size); 2337 #elif defined(__FreeBSD__) 2338 (u8 *)first->data - hw->rx_desc_ops->rx_desc_size); 2339 #endif 2340 2341 rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd); 2342 rxd_frag_info = ath10k_htt_rx_desc_get_frag_info(hw, rxd); 2343 decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1), 2344 RX_MSDU_START_INFO1_DECAP_FORMAT); 2345 2346 /* FIXME: Current unchaining logic can only handle simple case of raw 2347 * msdu chaining. If decapping is other than raw the chaining may be 2348 * more complex and this isn't handled by the current code. Don't even 2349 * try re-constructing such frames - it'll be pretty much garbage. 2350 */ 2351 if (decap != RX_MSDU_DECAP_RAW || 2352 skb_queue_len(amsdu) != 1 + rxd_frag_info->ring2_more_count) { 2353 *drop_cnt += skb_queue_len(amsdu); 2354 __skb_queue_purge(amsdu); 2355 return; 2356 } 2357 2358 ath10k_unchain_msdu(amsdu, unchain_cnt); 2359 } 2360 2361 static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar, 2362 struct sk_buff_head *amsdu) 2363 { 2364 u8 *subframe_hdr; 2365 struct sk_buff *first; 2366 bool is_first, is_last; 2367 struct ath10k_hw_params *hw = &ar->hw_params; 2368 struct htt_rx_desc *rxd; 2369 struct rx_msdu_end_common *rxd_msdu_end_common; 2370 struct rx_mpdu_start *rxd_mpdu_start; 2371 struct ieee80211_hdr *hdr; 2372 size_t hdr_len, crypto_len; 2373 enum htt_rx_mpdu_encrypt_type enctype; 2374 int bytes_aligned = ar->hw_params.decap_align_bytes; 2375 2376 first = skb_peek(amsdu); 2377 2378 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 2379 #if defined(__linux__) 2380 (void *)first->data - hw->rx_desc_ops->rx_desc_size); 2381 #elif defined(__FreeBSD__) 2382 (u8 *)first->data - hw->rx_desc_ops->rx_desc_size); 2383 #endif 2384 2385 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd); 2386 rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd); 2387 hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd); 2388 2389 is_first = !!(rxd_msdu_end_common->info0 & 2390 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); 2391 is_last = !!(rxd_msdu_end_common->info0 & 2392 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); 2393 2394 /* Return in case of non-aggregated msdu */ 2395 if (is_first && is_last) 2396 return true; 2397 2398 /* First msdu flag is not set for the first msdu of the list */ 2399 if (!is_first) 2400 return false; 2401 2402 enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0), 2403 RX_MPDU_START_INFO0_ENCRYPT_TYPE); 2404 2405 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2406 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); 2407 2408 subframe_hdr = (u8 *)hdr + round_up(hdr_len, bytes_aligned) + 2409 crypto_len; 2410 2411 /* Validate if the amsdu has a proper first subframe. 2412 * There are chances a single msdu can be received as amsdu when 2413 * the unauthenticated amsdu flag of a QoS header 2414 * gets flipped in non-SPP AMSDU's, in such cases the first 2415 * subframe has llc/snap header in place of a valid da. 2416 * return false if the da matches rfc1042 pattern 2417 */ 2418 if (ether_addr_equal(subframe_hdr, rfc1042_header)) 2419 return false; 2420 2421 return true; 2422 } 2423 2424 static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar, 2425 struct sk_buff_head *amsdu, 2426 struct ieee80211_rx_status *rx_status) 2427 { 2428 if (!rx_status->freq) { 2429 ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n"); 2430 return false; 2431 } 2432 2433 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) { 2434 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n"); 2435 return false; 2436 } 2437 2438 if (!ath10k_htt_rx_validate_amsdu(ar, amsdu)) { 2439 ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid amsdu received\n"); 2440 return false; 2441 } 2442 2443 return true; 2444 } 2445 2446 static void ath10k_htt_rx_h_filter(struct ath10k *ar, 2447 struct sk_buff_head *amsdu, 2448 struct ieee80211_rx_status *rx_status, 2449 unsigned long *drop_cnt) 2450 { 2451 if (skb_queue_empty(amsdu)) 2452 return; 2453 2454 if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status)) 2455 return; 2456 2457 if (drop_cnt) 2458 *drop_cnt += skb_queue_len(amsdu); 2459 2460 __skb_queue_purge(amsdu); 2461 } 2462 2463 static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt) 2464 { 2465 struct ath10k *ar = htt->ar; 2466 struct ieee80211_rx_status *rx_status = &htt->rx_status; 2467 struct sk_buff_head amsdu; 2468 int ret; 2469 unsigned long drop_cnt = 0; 2470 unsigned long unchain_cnt = 0; 2471 unsigned long drop_cnt_filter = 0; 2472 unsigned long msdus_to_queue, num_msdus; 2473 enum ath10k_pkt_rx_err err = ATH10K_PKT_RX_ERR_MAX; 2474 u8 first_hdr[RX_HTT_HDR_STATUS_LEN]; 2475 2476 __skb_queue_head_init(&amsdu); 2477 2478 spin_lock_bh(&htt->rx_ring.lock); 2479 if (htt->rx_confused) { 2480 spin_unlock_bh(&htt->rx_ring.lock); 2481 return -EIO; 2482 } 2483 ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu); 2484 spin_unlock_bh(&htt->rx_ring.lock); 2485 2486 if (ret < 0) { 2487 ath10k_warn(ar, "rx ring became corrupted: %d\n", ret); 2488 __skb_queue_purge(&amsdu); 2489 /* FIXME: It's probably a good idea to reboot the 2490 * device instead of leaving it inoperable. 2491 */ 2492 htt->rx_confused = true; 2493 return ret; 2494 } 2495 2496 num_msdus = skb_queue_len(&amsdu); 2497 2498 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff); 2499 2500 /* only for ret = 1 indicates chained msdus */ 2501 if (ret > 0) 2502 ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt); 2503 2504 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter); 2505 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err, 0, 2506 false); 2507 msdus_to_queue = skb_queue_len(&amsdu); 2508 ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status); 2509 2510 ath10k_sta_update_rx_tid_stats(ar, first_hdr, num_msdus, err, 2511 unchain_cnt, drop_cnt, drop_cnt_filter, 2512 msdus_to_queue); 2513 2514 return 0; 2515 } 2516 2517 static void ath10k_htt_rx_mpdu_desc_pn_hl(struct htt_hl_rx_desc *rx_desc, 2518 union htt_rx_pn_t *pn, 2519 int pn_len_bits) 2520 { 2521 switch (pn_len_bits) { 2522 case 48: 2523 pn->pn48 = __le32_to_cpu(rx_desc->pn_31_0) + 2524 ((u64)(__le32_to_cpu(rx_desc->u0.pn_63_32) & 0xFFFF) << 32); 2525 break; 2526 case 24: 2527 pn->pn24 = __le32_to_cpu(rx_desc->pn_31_0); 2528 break; 2529 } 2530 } 2531 2532 static bool ath10k_htt_rx_pn_cmp48(union htt_rx_pn_t *new_pn, 2533 union htt_rx_pn_t *old_pn) 2534 { 2535 return ((new_pn->pn48 & 0xffffffffffffULL) <= 2536 (old_pn->pn48 & 0xffffffffffffULL)); 2537 } 2538 2539 static bool ath10k_htt_rx_pn_check_replay_hl(struct ath10k *ar, 2540 struct ath10k_peer *peer, 2541 struct htt_rx_indication_hl *rx) 2542 { 2543 bool last_pn_valid, pn_invalid = false; 2544 enum htt_txrx_sec_cast_type sec_index; 2545 enum htt_security_types sec_type; 2546 union htt_rx_pn_t new_pn = {}; 2547 struct htt_hl_rx_desc *rx_desc; 2548 union htt_rx_pn_t *last_pn; 2549 u32 rx_desc_info, tid; 2550 int num_mpdu_ranges; 2551 2552 lockdep_assert_held(&ar->data_lock); 2553 2554 if (!peer) 2555 return false; 2556 2557 if (!(rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU)) 2558 return false; 2559 2560 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), 2561 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); 2562 2563 rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges]; 2564 rx_desc_info = __le32_to_cpu(rx_desc->info); 2565 2566 if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED)) 2567 return false; 2568 2569 tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID); 2570 last_pn_valid = peer->tids_last_pn_valid[tid]; 2571 last_pn = &peer->tids_last_pn[tid]; 2572 2573 if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST)) 2574 sec_index = HTT_TXRX_SEC_MCAST; 2575 else 2576 sec_index = HTT_TXRX_SEC_UCAST; 2577 2578 sec_type = peer->rx_pn[sec_index].sec_type; 2579 ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len); 2580 2581 if (sec_type != HTT_SECURITY_AES_CCMP && 2582 sec_type != HTT_SECURITY_TKIP && 2583 sec_type != HTT_SECURITY_TKIP_NOMIC) 2584 return false; 2585 2586 if (last_pn_valid) 2587 pn_invalid = ath10k_htt_rx_pn_cmp48(&new_pn, last_pn); 2588 else 2589 peer->tids_last_pn_valid[tid] = true; 2590 2591 if (!pn_invalid) 2592 last_pn->pn48 = new_pn.pn48; 2593 2594 return pn_invalid; 2595 } 2596 2597 static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt, 2598 struct htt_rx_indication_hl *rx, 2599 struct sk_buff *skb, 2600 enum htt_rx_pn_check_type check_pn_type, 2601 enum htt_rx_tkip_demic_type tkip_mic_type) 2602 { 2603 struct ath10k *ar = htt->ar; 2604 struct ath10k_peer *peer; 2605 struct htt_rx_indication_mpdu_range *mpdu_ranges; 2606 struct fw_rx_desc_hl *fw_desc; 2607 enum htt_txrx_sec_cast_type sec_index; 2608 enum htt_security_types sec_type; 2609 union htt_rx_pn_t new_pn = {}; 2610 struct htt_hl_rx_desc *rx_desc; 2611 struct ieee80211_hdr *hdr; 2612 struct ieee80211_rx_status *rx_status; 2613 u16 peer_id; 2614 u8 rx_desc_len; 2615 int num_mpdu_ranges; 2616 size_t tot_hdr_len; 2617 struct ieee80211_channel *ch; 2618 bool pn_invalid, qos, first_msdu; 2619 u32 tid, rx_desc_info; 2620 2621 peer_id = __le16_to_cpu(rx->hdr.peer_id); 2622 tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID); 2623 2624 spin_lock_bh(&ar->data_lock); 2625 peer = ath10k_peer_find_by_id(ar, peer_id); 2626 spin_unlock_bh(&ar->data_lock); 2627 if (!peer && peer_id != HTT_INVALID_PEERID) 2628 ath10k_warn(ar, "Got RX ind from invalid peer: %u\n", peer_id); 2629 2630 if (!peer) 2631 return true; 2632 2633 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), 2634 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); 2635 mpdu_ranges = htt_rx_ind_get_mpdu_ranges_hl(rx); 2636 fw_desc = &rx->fw_desc; 2637 rx_desc_len = fw_desc->len; 2638 2639 if (fw_desc->u.bits.discard) { 2640 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt discard mpdu\n"); 2641 goto err; 2642 } 2643 2644 /* I have not yet seen any case where num_mpdu_ranges > 1. 2645 * qcacld does not seem handle that case either, so we introduce the 2646 * same limitation here as well. 2647 */ 2648 if (num_mpdu_ranges > 1) 2649 ath10k_warn(ar, 2650 "Unsupported number of MPDU ranges: %d, ignoring all but the first\n", 2651 num_mpdu_ranges); 2652 2653 if (mpdu_ranges->mpdu_range_status != 2654 HTT_RX_IND_MPDU_STATUS_OK && 2655 mpdu_ranges->mpdu_range_status != 2656 HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR) { 2657 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt mpdu_range_status %d\n", 2658 mpdu_ranges->mpdu_range_status); 2659 goto err; 2660 } 2661 2662 rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges]; 2663 rx_desc_info = __le32_to_cpu(rx_desc->info); 2664 2665 if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST)) 2666 sec_index = HTT_TXRX_SEC_MCAST; 2667 else 2668 sec_index = HTT_TXRX_SEC_UCAST; 2669 2670 sec_type = peer->rx_pn[sec_index].sec_type; 2671 first_msdu = rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU; 2672 2673 ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len); 2674 2675 if (check_pn_type == HTT_RX_PN_CHECK && tid >= IEEE80211_NUM_TIDS) { 2676 spin_lock_bh(&ar->data_lock); 2677 pn_invalid = ath10k_htt_rx_pn_check_replay_hl(ar, peer, rx); 2678 spin_unlock_bh(&ar->data_lock); 2679 2680 if (pn_invalid) 2681 goto err; 2682 } 2683 2684 /* Strip off all headers before the MAC header before delivery to 2685 * mac80211 2686 */ 2687 tot_hdr_len = sizeof(struct htt_resp_hdr) + sizeof(rx->hdr) + 2688 sizeof(rx->ppdu) + sizeof(rx->prefix) + 2689 sizeof(rx->fw_desc) + 2690 sizeof(*mpdu_ranges) * num_mpdu_ranges + rx_desc_len; 2691 2692 skb_pull(skb, tot_hdr_len); 2693 2694 hdr = (struct ieee80211_hdr *)skb->data; 2695 qos = ieee80211_is_data_qos(hdr->frame_control); 2696 2697 rx_status = IEEE80211_SKB_RXCB(skb); 2698 memset(rx_status, 0, sizeof(*rx_status)); 2699 2700 if (rx->ppdu.combined_rssi == 0) { 2701 /* SDIO firmware does not provide signal */ 2702 rx_status->signal = 0; 2703 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 2704 } else { 2705 rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR + 2706 rx->ppdu.combined_rssi; 2707 rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL; 2708 } 2709 2710 spin_lock_bh(&ar->data_lock); 2711 ch = ar->scan_channel; 2712 if (!ch) 2713 ch = ar->rx_channel; 2714 if (!ch) 2715 ch = ath10k_htt_rx_h_any_channel(ar); 2716 if (!ch) 2717 ch = ar->tgt_oper_chan; 2718 spin_unlock_bh(&ar->data_lock); 2719 2720 if (ch) { 2721 rx_status->band = ch->band; 2722 rx_status->freq = ch->center_freq; 2723 } 2724 if (rx->fw_desc.flags & FW_RX_DESC_FLAGS_LAST_MSDU) 2725 rx_status->flag &= ~RX_FLAG_AMSDU_MORE; 2726 else 2727 rx_status->flag |= RX_FLAG_AMSDU_MORE; 2728 2729 /* Not entirely sure about this, but all frames from the chipset has 2730 * the protected flag set even though they have already been decrypted. 2731 * Unmasking this flag is necessary in order for mac80211 not to drop 2732 * the frame. 2733 * TODO: Verify this is always the case or find out a way to check 2734 * if there has been hw decryption. 2735 */ 2736 if (ieee80211_has_protected(hdr->frame_control)) { 2737 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 2738 rx_status->flag |= RX_FLAG_DECRYPTED | 2739 RX_FLAG_IV_STRIPPED | 2740 RX_FLAG_MMIC_STRIPPED; 2741 2742 if (tid < IEEE80211_NUM_TIDS && 2743 first_msdu && 2744 check_pn_type == HTT_RX_PN_CHECK && 2745 (sec_type == HTT_SECURITY_AES_CCMP || 2746 sec_type == HTT_SECURITY_TKIP || 2747 sec_type == HTT_SECURITY_TKIP_NOMIC)) { 2748 u8 offset, *ivp, i; 2749 s8 keyidx = 0; 2750 __le64 pn48 = cpu_to_le64(new_pn.pn48); 2751 2752 hdr = (struct ieee80211_hdr *)skb->data; 2753 offset = ieee80211_hdrlen(hdr->frame_control); 2754 hdr->frame_control |= __cpu_to_le16(IEEE80211_FCTL_PROTECTED); 2755 rx_status->flag &= ~RX_FLAG_IV_STRIPPED; 2756 2757 memmove(skb->data - IEEE80211_CCMP_HDR_LEN, 2758 skb->data, offset); 2759 skb_push(skb, IEEE80211_CCMP_HDR_LEN); 2760 ivp = skb->data + offset; 2761 memset(skb->data + offset, 0, IEEE80211_CCMP_HDR_LEN); 2762 /* Ext IV */ 2763 ivp[IEEE80211_WEP_IV_LEN - 1] |= ATH10K_IEEE80211_EXTIV; 2764 2765 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 2766 if (peer->keys[i] && 2767 peer->keys[i]->flags & IEEE80211_KEY_FLAG_PAIRWISE) 2768 keyidx = peer->keys[i]->keyidx; 2769 } 2770 2771 /* Key ID */ 2772 ivp[IEEE80211_WEP_IV_LEN - 1] |= keyidx << 6; 2773 2774 if (sec_type == HTT_SECURITY_AES_CCMP) { 2775 rx_status->flag |= RX_FLAG_MIC_STRIPPED; 2776 /* pn 0, pn 1 */ 2777 memcpy(skb->data + offset, &pn48, 2); 2778 /* pn 1, pn 3 , pn 34 , pn 5 */ 2779 memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4); 2780 } else { 2781 rx_status->flag |= RX_FLAG_ICV_STRIPPED; 2782 /* TSC 0 */ 2783 memcpy(skb->data + offset + 2, &pn48, 1); 2784 /* TSC 1 */ 2785 memcpy(skb->data + offset, ((u8 *)&pn48) + 1, 1); 2786 /* TSC 2 , TSC 3 , TSC 4 , TSC 5*/ 2787 memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4); 2788 } 2789 } 2790 } 2791 2792 if (tkip_mic_type == HTT_RX_TKIP_MIC) 2793 rx_status->flag &= ~RX_FLAG_IV_STRIPPED & 2794 ~RX_FLAG_MMIC_STRIPPED; 2795 2796 if (mpdu_ranges->mpdu_range_status == HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR) 2797 rx_status->flag |= RX_FLAG_MMIC_ERROR; 2798 2799 if (!qos && tid < IEEE80211_NUM_TIDS) { 2800 u8 offset; 2801 __le16 qos_ctrl = 0; 2802 2803 hdr = (struct ieee80211_hdr *)skb->data; 2804 offset = ieee80211_hdrlen(hdr->frame_control); 2805 2806 hdr->frame_control |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 2807 memmove(skb->data - IEEE80211_QOS_CTL_LEN, skb->data, offset); 2808 skb_push(skb, IEEE80211_QOS_CTL_LEN); 2809 qos_ctrl = cpu_to_le16(tid); 2810 memcpy(skb->data + offset, &qos_ctrl, IEEE80211_QOS_CTL_LEN); 2811 } 2812 2813 if (ar->napi.dev) 2814 ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi); 2815 else 2816 ieee80211_rx_ni(ar->hw, skb); 2817 2818 /* We have delivered the skb to the upper layers (mac80211) so we 2819 * must not free it. 2820 */ 2821 return false; 2822 err: 2823 /* Tell the caller that it must free the skb since we have not 2824 * consumed it 2825 */ 2826 return true; 2827 } 2828 2829 static int ath10k_htt_rx_frag_tkip_decap_nomic(struct sk_buff *skb, 2830 u16 head_len, 2831 u16 hdr_len) 2832 { 2833 u8 *ivp, *orig_hdr; 2834 2835 orig_hdr = skb->data; 2836 ivp = orig_hdr + hdr_len + head_len; 2837 2838 /* the ExtIV bit is always set to 1 for TKIP */ 2839 if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV)) 2840 return -EINVAL; 2841 2842 memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len); 2843 skb_pull(skb, IEEE80211_TKIP_IV_LEN); 2844 skb_trim(skb, skb->len - ATH10K_IEEE80211_TKIP_MICLEN); 2845 return 0; 2846 } 2847 2848 static int ath10k_htt_rx_frag_tkip_decap_withmic(struct sk_buff *skb, 2849 u16 head_len, 2850 u16 hdr_len) 2851 { 2852 u8 *ivp, *orig_hdr; 2853 2854 orig_hdr = skb->data; 2855 ivp = orig_hdr + hdr_len + head_len; 2856 2857 /* the ExtIV bit is always set to 1 for TKIP */ 2858 if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV)) 2859 return -EINVAL; 2860 2861 memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len); 2862 skb_pull(skb, IEEE80211_TKIP_IV_LEN); 2863 skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN); 2864 return 0; 2865 } 2866 2867 static int ath10k_htt_rx_frag_ccmp_decap(struct sk_buff *skb, 2868 u16 head_len, 2869 u16 hdr_len) 2870 { 2871 u8 *ivp, *orig_hdr; 2872 2873 orig_hdr = skb->data; 2874 ivp = orig_hdr + hdr_len + head_len; 2875 2876 /* the ExtIV bit is always set to 1 for CCMP */ 2877 if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV)) 2878 return -EINVAL; 2879 2880 skb_trim(skb, skb->len - IEEE80211_CCMP_MIC_LEN); 2881 memmove(orig_hdr + IEEE80211_CCMP_HDR_LEN, orig_hdr, head_len + hdr_len); 2882 skb_pull(skb, IEEE80211_CCMP_HDR_LEN); 2883 return 0; 2884 } 2885 2886 static int ath10k_htt_rx_frag_wep_decap(struct sk_buff *skb, 2887 u16 head_len, 2888 u16 hdr_len) 2889 { 2890 u8 *orig_hdr; 2891 2892 orig_hdr = skb->data; 2893 2894 memmove(orig_hdr + IEEE80211_WEP_IV_LEN, 2895 orig_hdr, head_len + hdr_len); 2896 skb_pull(skb, IEEE80211_WEP_IV_LEN); 2897 skb_trim(skb, skb->len - IEEE80211_WEP_ICV_LEN); 2898 return 0; 2899 } 2900 2901 static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt, 2902 struct htt_rx_fragment_indication *rx, 2903 struct sk_buff *skb) 2904 { 2905 struct ath10k *ar = htt->ar; 2906 enum htt_rx_tkip_demic_type tkip_mic = HTT_RX_NON_TKIP_MIC; 2907 enum htt_txrx_sec_cast_type sec_index; 2908 struct htt_rx_indication_hl *rx_hl; 2909 enum htt_security_types sec_type; 2910 u32 tid, frag, seq, rx_desc_info; 2911 union htt_rx_pn_t new_pn = {}; 2912 struct htt_hl_rx_desc *rx_desc; 2913 u16 peer_id, sc, hdr_space; 2914 union htt_rx_pn_t *last_pn; 2915 struct ieee80211_hdr *hdr; 2916 int ret, num_mpdu_ranges; 2917 struct ath10k_peer *peer; 2918 struct htt_resp *resp; 2919 size_t tot_hdr_len; 2920 2921 resp = (struct htt_resp *)(skb->data + HTT_RX_FRAG_IND_INFO0_HEADER_LEN); 2922 skb_pull(skb, HTT_RX_FRAG_IND_INFO0_HEADER_LEN); 2923 skb_trim(skb, skb->len - FCS_LEN); 2924 2925 peer_id = __le16_to_cpu(rx->peer_id); 2926 rx_hl = (struct htt_rx_indication_hl *)(&resp->rx_ind_hl); 2927 2928 spin_lock_bh(&ar->data_lock); 2929 peer = ath10k_peer_find_by_id(ar, peer_id); 2930 if (!peer) { 2931 ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer: %u\n", peer_id); 2932 goto err; 2933 } 2934 2935 num_mpdu_ranges = MS(__le32_to_cpu(rx_hl->hdr.info1), 2936 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); 2937 2938 tot_hdr_len = sizeof(struct htt_resp_hdr) + 2939 sizeof(rx_hl->hdr) + 2940 sizeof(rx_hl->ppdu) + 2941 sizeof(rx_hl->prefix) + 2942 sizeof(rx_hl->fw_desc) + 2943 sizeof(struct htt_rx_indication_mpdu_range) * num_mpdu_ranges; 2944 2945 tid = MS(rx_hl->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID); 2946 rx_desc = (struct htt_hl_rx_desc *)(skb->data + tot_hdr_len); 2947 rx_desc_info = __le32_to_cpu(rx_desc->info); 2948 2949 hdr = (struct ieee80211_hdr *)((u8 *)rx_desc + rx_hl->fw_desc.len); 2950 2951 if (is_multicast_ether_addr(hdr->addr1)) { 2952 /* Discard the fragment with multicast DA */ 2953 goto err; 2954 } 2955 2956 if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED)) { 2957 spin_unlock_bh(&ar->data_lock); 2958 return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb, 2959 HTT_RX_NON_PN_CHECK, 2960 HTT_RX_NON_TKIP_MIC); 2961 } 2962 2963 if (ieee80211_has_retry(hdr->frame_control)) 2964 goto err; 2965 2966 hdr_space = ieee80211_hdrlen(hdr->frame_control); 2967 sc = __le16_to_cpu(hdr->seq_ctrl); 2968 seq = IEEE80211_SEQ_TO_SN(sc); 2969 frag = sc & IEEE80211_SCTL_FRAG; 2970 2971 sec_index = MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST) ? 2972 HTT_TXRX_SEC_MCAST : HTT_TXRX_SEC_UCAST; 2973 sec_type = peer->rx_pn[sec_index].sec_type; 2974 ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len); 2975 2976 switch (sec_type) { 2977 case HTT_SECURITY_TKIP: 2978 tkip_mic = HTT_RX_TKIP_MIC; 2979 ret = ath10k_htt_rx_frag_tkip_decap_withmic(skb, 2980 tot_hdr_len + 2981 rx_hl->fw_desc.len, 2982 hdr_space); 2983 if (ret) 2984 goto err; 2985 break; 2986 case HTT_SECURITY_TKIP_NOMIC: 2987 ret = ath10k_htt_rx_frag_tkip_decap_nomic(skb, 2988 tot_hdr_len + 2989 rx_hl->fw_desc.len, 2990 hdr_space); 2991 if (ret) 2992 goto err; 2993 break; 2994 case HTT_SECURITY_AES_CCMP: 2995 ret = ath10k_htt_rx_frag_ccmp_decap(skb, 2996 tot_hdr_len + rx_hl->fw_desc.len, 2997 hdr_space); 2998 if (ret) 2999 goto err; 3000 break; 3001 case HTT_SECURITY_WEP128: 3002 case HTT_SECURITY_WEP104: 3003 case HTT_SECURITY_WEP40: 3004 ret = ath10k_htt_rx_frag_wep_decap(skb, 3005 tot_hdr_len + rx_hl->fw_desc.len, 3006 hdr_space); 3007 if (ret) 3008 goto err; 3009 break; 3010 default: 3011 break; 3012 } 3013 3014 resp = (struct htt_resp *)(skb->data); 3015 3016 if (sec_type != HTT_SECURITY_AES_CCMP && 3017 sec_type != HTT_SECURITY_TKIP && 3018 sec_type != HTT_SECURITY_TKIP_NOMIC) { 3019 spin_unlock_bh(&ar->data_lock); 3020 return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb, 3021 HTT_RX_NON_PN_CHECK, 3022 HTT_RX_NON_TKIP_MIC); 3023 } 3024 3025 last_pn = &peer->frag_tids_last_pn[tid]; 3026 3027 if (frag == 0) { 3028 if (ath10k_htt_rx_pn_check_replay_hl(ar, peer, &resp->rx_ind_hl)) 3029 goto err; 3030 3031 last_pn->pn48 = new_pn.pn48; 3032 peer->frag_tids_seq[tid] = seq; 3033 } else if (sec_type == HTT_SECURITY_AES_CCMP) { 3034 if (seq != peer->frag_tids_seq[tid]) 3035 goto err; 3036 3037 if (new_pn.pn48 != last_pn->pn48 + 1) 3038 goto err; 3039 3040 last_pn->pn48 = new_pn.pn48; 3041 last_pn = &peer->tids_last_pn[tid]; 3042 last_pn->pn48 = new_pn.pn48; 3043 } 3044 3045 spin_unlock_bh(&ar->data_lock); 3046 3047 return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb, 3048 HTT_RX_NON_PN_CHECK, tkip_mic); 3049 3050 err: 3051 spin_unlock_bh(&ar->data_lock); 3052 3053 /* Tell the caller that it must free the skb since we have not 3054 * consumed it 3055 */ 3056 return true; 3057 } 3058 3059 static void ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt *htt, 3060 struct htt_rx_indication *rx) 3061 { 3062 struct ath10k *ar = htt->ar; 3063 struct htt_rx_indication_mpdu_range *mpdu_ranges; 3064 int num_mpdu_ranges; 3065 int i, mpdu_count = 0; 3066 u16 peer_id; 3067 u8 tid; 3068 3069 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), 3070 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); 3071 peer_id = __le16_to_cpu(rx->hdr.peer_id); 3072 tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID); 3073 3074 mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx); 3075 3076 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ", 3077 rx, struct_size(rx, mpdu_ranges, num_mpdu_ranges)); 3078 3079 for (i = 0; i < num_mpdu_ranges; i++) 3080 mpdu_count += mpdu_ranges[i].mpdu_count; 3081 3082 atomic_add(mpdu_count, &htt->num_mpdus_ready); 3083 3084 ath10k_sta_update_rx_tid_stats_ampdu(ar, peer_id, tid, mpdu_ranges, 3085 num_mpdu_ranges); 3086 } 3087 3088 static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar, 3089 struct sk_buff *skb) 3090 { 3091 struct ath10k_htt *htt = &ar->htt; 3092 struct htt_resp *resp = (struct htt_resp *)skb->data; 3093 struct htt_tx_done tx_done = {}; 3094 int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS); 3095 __le16 msdu_id, *msdus; 3096 bool rssi_enabled = false; 3097 u8 msdu_count = 0, num_airtime_records, tid; 3098 int i, htt_pad = 0; 3099 struct htt_data_tx_compl_ppdu_dur *ppdu_info; 3100 struct ath10k_peer *peer; 3101 u16 ppdu_info_offset = 0, peer_id; 3102 u32 tx_duration; 3103 3104 switch (status) { 3105 case HTT_DATA_TX_STATUS_NO_ACK: 3106 tx_done.status = HTT_TX_COMPL_STATE_NOACK; 3107 break; 3108 case HTT_DATA_TX_STATUS_OK: 3109 tx_done.status = HTT_TX_COMPL_STATE_ACK; 3110 break; 3111 case HTT_DATA_TX_STATUS_DISCARD: 3112 case HTT_DATA_TX_STATUS_POSTPONE: 3113 tx_done.status = HTT_TX_COMPL_STATE_DISCARD; 3114 break; 3115 default: 3116 ath10k_warn(ar, "unhandled tx completion status %d\n", status); 3117 tx_done.status = HTT_TX_COMPL_STATE_DISCARD; 3118 break; 3119 } 3120 3121 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n", 3122 resp->data_tx_completion.num_msdus); 3123 3124 msdu_count = resp->data_tx_completion.num_msdus; 3125 msdus = resp->data_tx_completion.msdus; 3126 rssi_enabled = ath10k_is_rssi_enable(&ar->hw_params, resp); 3127 3128 if (rssi_enabled) 3129 htt_pad = ath10k_tx_data_rssi_get_pad_bytes(&ar->hw_params, 3130 resp); 3131 3132 for (i = 0; i < msdu_count; i++) { 3133 msdu_id = msdus[i]; 3134 tx_done.msdu_id = __le16_to_cpu(msdu_id); 3135 3136 if (rssi_enabled) { 3137 /* Total no of MSDUs should be even, 3138 * if odd MSDUs are sent firmware fills 3139 * last msdu id with 0xffff 3140 */ 3141 if (msdu_count & 0x01) { 3142 msdu_id = msdus[msdu_count + i + 1 + htt_pad]; 3143 tx_done.ack_rssi = __le16_to_cpu(msdu_id); 3144 } else { 3145 msdu_id = msdus[msdu_count + i + htt_pad]; 3146 tx_done.ack_rssi = __le16_to_cpu(msdu_id); 3147 } 3148 } 3149 3150 /* kfifo_put: In practice firmware shouldn't fire off per-CE 3151 * interrupt and main interrupt (MSI/-X range case) for the same 3152 * HTC service so it should be safe to use kfifo_put w/o lock. 3153 * 3154 * From kfifo_put() documentation: 3155 * Note that with only one concurrent reader and one concurrent 3156 * writer, you don't need extra locking to use these macro. 3157 */ 3158 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) { 3159 ath10k_txrx_tx_unref(htt, &tx_done); 3160 } else if (!kfifo_put(&htt->txdone_fifo, tx_done)) { 3161 ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n", 3162 tx_done.msdu_id, tx_done.status); 3163 ath10k_txrx_tx_unref(htt, &tx_done); 3164 } 3165 } 3166 3167 if (!(resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT)) 3168 return; 3169 3170 ppdu_info_offset = (msdu_count & 0x01) ? msdu_count + 1 : msdu_count; 3171 3172 if (rssi_enabled) 3173 ppdu_info_offset += ppdu_info_offset; 3174 3175 if (resp->data_tx_completion.flags2 & 3176 (HTT_TX_CMPL_FLAG_PPID_PRESENT | HTT_TX_CMPL_FLAG_PA_PRESENT)) 3177 ppdu_info_offset += 2; 3178 3179 ppdu_info = (struct htt_data_tx_compl_ppdu_dur *)&msdus[ppdu_info_offset]; 3180 num_airtime_records = FIELD_GET(HTT_TX_COMPL_PPDU_DUR_INFO0_NUM_ENTRIES_MASK, 3181 __le32_to_cpu(ppdu_info->info0)); 3182 3183 for (i = 0; i < num_airtime_records; i++) { 3184 struct htt_data_tx_ppdu_dur *ppdu_dur; 3185 u32 info0; 3186 3187 ppdu_dur = &ppdu_info->ppdu_dur[i]; 3188 info0 = __le32_to_cpu(ppdu_dur->info0); 3189 3190 peer_id = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK, 3191 info0); 3192 rcu_read_lock(); 3193 spin_lock_bh(&ar->data_lock); 3194 3195 peer = ath10k_peer_find_by_id(ar, peer_id); 3196 if (!peer || !peer->sta) { 3197 spin_unlock_bh(&ar->data_lock); 3198 rcu_read_unlock(); 3199 continue; 3200 } 3201 3202 tid = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_TID_MASK, info0) & 3203 IEEE80211_QOS_CTL_TID_MASK; 3204 tx_duration = __le32_to_cpu(ppdu_dur->tx_duration); 3205 3206 ieee80211_sta_register_airtime(peer->sta, tid, tx_duration, 0); 3207 3208 spin_unlock_bh(&ar->data_lock); 3209 rcu_read_unlock(); 3210 } 3211 } 3212 3213 static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp) 3214 { 3215 struct htt_rx_addba *ev = &resp->rx_addba; 3216 struct ath10k_peer *peer; 3217 struct ath10k_vif *arvif; 3218 u16 info0, tid, peer_id; 3219 3220 info0 = __le16_to_cpu(ev->info0); 3221 tid = MS(info0, HTT_RX_BA_INFO0_TID); 3222 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID); 3223 3224 ath10k_dbg(ar, ATH10K_DBG_HTT, 3225 "htt rx addba tid %u peer_id %u size %u\n", 3226 tid, peer_id, ev->window_size); 3227 3228 spin_lock_bh(&ar->data_lock); 3229 peer = ath10k_peer_find_by_id(ar, peer_id); 3230 if (!peer) { 3231 ath10k_warn(ar, "received addba event for invalid peer_id: %u\n", 3232 peer_id); 3233 spin_unlock_bh(&ar->data_lock); 3234 return; 3235 } 3236 3237 arvif = ath10k_get_arvif(ar, peer->vdev_id); 3238 if (!arvif) { 3239 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n", 3240 peer->vdev_id); 3241 spin_unlock_bh(&ar->data_lock); 3242 return; 3243 } 3244 3245 ath10k_dbg(ar, ATH10K_DBG_HTT, 3246 "htt rx start rx ba session sta %pM tid %u size %u\n", 3247 peer->addr, tid, ev->window_size); 3248 3249 ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid); 3250 spin_unlock_bh(&ar->data_lock); 3251 } 3252 3253 static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp) 3254 { 3255 struct htt_rx_delba *ev = &resp->rx_delba; 3256 struct ath10k_peer *peer; 3257 struct ath10k_vif *arvif; 3258 u16 info0, tid, peer_id; 3259 3260 info0 = __le16_to_cpu(ev->info0); 3261 tid = MS(info0, HTT_RX_BA_INFO0_TID); 3262 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID); 3263 3264 ath10k_dbg(ar, ATH10K_DBG_HTT, 3265 "htt rx delba tid %u peer_id %u\n", 3266 tid, peer_id); 3267 3268 spin_lock_bh(&ar->data_lock); 3269 peer = ath10k_peer_find_by_id(ar, peer_id); 3270 if (!peer) { 3271 ath10k_warn(ar, "received addba event for invalid peer_id: %u\n", 3272 peer_id); 3273 spin_unlock_bh(&ar->data_lock); 3274 return; 3275 } 3276 3277 arvif = ath10k_get_arvif(ar, peer->vdev_id); 3278 if (!arvif) { 3279 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n", 3280 peer->vdev_id); 3281 spin_unlock_bh(&ar->data_lock); 3282 return; 3283 } 3284 3285 ath10k_dbg(ar, ATH10K_DBG_HTT, 3286 "htt rx stop rx ba session sta %pM tid %u\n", 3287 peer->addr, tid); 3288 3289 ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid); 3290 spin_unlock_bh(&ar->data_lock); 3291 } 3292 3293 static int ath10k_htt_rx_extract_amsdu(struct ath10k_hw_params *hw, 3294 struct sk_buff_head *list, 3295 struct sk_buff_head *amsdu) 3296 { 3297 struct sk_buff *msdu; 3298 struct htt_rx_desc *rxd; 3299 struct rx_msdu_end_common *rxd_msdu_end_common; 3300 3301 if (skb_queue_empty(list)) 3302 return -ENOBUFS; 3303 3304 if (WARN_ON(!skb_queue_empty(amsdu))) 3305 return -EINVAL; 3306 3307 while ((msdu = __skb_dequeue(list))) { 3308 __skb_queue_tail(amsdu, msdu); 3309 3310 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 3311 #if defined(__linux__) 3312 (void *)msdu->data - 3313 #elif defined(__FreeBSD__) 3314 (u8 *)msdu->data - 3315 #endif 3316 hw->rx_desc_ops->rx_desc_size); 3317 3318 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd); 3319 if (rxd_msdu_end_common->info0 & 3320 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)) 3321 break; 3322 } 3323 3324 msdu = skb_peek_tail(amsdu); 3325 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 3326 #if defined(__linux__) 3327 (void *)msdu->data - hw->rx_desc_ops->rx_desc_size); 3328 #elif defined(__FreeBSD__) 3329 (u8 *)msdu->data - hw->rx_desc_ops->rx_desc_size); 3330 #endif 3331 3332 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd); 3333 if (!(rxd_msdu_end_common->info0 & 3334 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) { 3335 skb_queue_splice_init(amsdu, list); 3336 return -EAGAIN; 3337 } 3338 3339 return 0; 3340 } 3341 3342 static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status, 3343 struct sk_buff *skb) 3344 { 3345 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 3346 3347 if (!ieee80211_has_protected(hdr->frame_control)) 3348 return; 3349 3350 /* Offloaded frames are already decrypted but firmware insists they are 3351 * protected in the 802.11 header. Strip the flag. Otherwise mac80211 3352 * will drop the frame. 3353 */ 3354 3355 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 3356 status->flag |= RX_FLAG_DECRYPTED | 3357 RX_FLAG_IV_STRIPPED | 3358 RX_FLAG_MMIC_STRIPPED; 3359 } 3360 3361 static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar, 3362 struct sk_buff_head *list) 3363 { 3364 struct ath10k_htt *htt = &ar->htt; 3365 struct ieee80211_rx_status *status = &htt->rx_status; 3366 struct htt_rx_offload_msdu *rx; 3367 struct sk_buff *msdu; 3368 size_t offset; 3369 3370 while ((msdu = __skb_dequeue(list))) { 3371 /* Offloaded frames don't have Rx descriptor. Instead they have 3372 * a short meta information header. 3373 */ 3374 3375 rx = (void *)msdu->data; 3376 3377 skb_put(msdu, sizeof(*rx)); 3378 skb_pull(msdu, sizeof(*rx)); 3379 3380 if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) { 3381 ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n"); 3382 dev_kfree_skb_any(msdu); 3383 continue; 3384 } 3385 3386 skb_put(msdu, __le16_to_cpu(rx->msdu_len)); 3387 3388 /* Offloaded rx header length isn't multiple of 2 nor 4 so the 3389 * actual payload is unaligned. Align the frame. Otherwise 3390 * mac80211 complains. This shouldn't reduce performance much 3391 * because these offloaded frames are rare. 3392 */ 3393 offset = 4 - ((unsigned long)msdu->data & 3); 3394 skb_put(msdu, offset); 3395 memmove(msdu->data + offset, msdu->data, msdu->len); 3396 skb_pull(msdu, offset); 3397 3398 /* FIXME: The frame is NWifi. Re-construct QoS Control 3399 * if possible later. 3400 */ 3401 3402 memset(status, 0, sizeof(*status)); 3403 status->flag |= RX_FLAG_NO_SIGNAL_VAL; 3404 3405 ath10k_htt_rx_h_rx_offload_prot(status, msdu); 3406 ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id); 3407 ath10k_htt_rx_h_queue_msdu(ar, status, msdu); 3408 } 3409 } 3410 3411 static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) 3412 { 3413 struct ath10k_htt *htt = &ar->htt; 3414 struct htt_resp *resp = (void *)skb->data; 3415 struct ieee80211_rx_status *status = &htt->rx_status; 3416 struct sk_buff_head list; 3417 struct sk_buff_head amsdu; 3418 u16 peer_id; 3419 u16 msdu_count; 3420 u8 vdev_id; 3421 u8 tid; 3422 bool offload; 3423 bool frag; 3424 int ret; 3425 3426 lockdep_assert_held(&htt->rx_ring.lock); 3427 3428 if (htt->rx_confused) 3429 return -EIO; 3430 3431 skb_pull(skb, sizeof(resp->hdr)); 3432 skb_pull(skb, sizeof(resp->rx_in_ord_ind)); 3433 3434 peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id); 3435 msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count); 3436 vdev_id = resp->rx_in_ord_ind.vdev_id; 3437 tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID); 3438 offload = !!(resp->rx_in_ord_ind.info & 3439 HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK); 3440 frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK); 3441 3442 ath10k_dbg(ar, ATH10K_DBG_HTT, 3443 "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n", 3444 vdev_id, peer_id, tid, offload, frag, msdu_count); 3445 3446 if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) { 3447 ath10k_warn(ar, "dropping invalid in order rx indication\n"); 3448 return -EINVAL; 3449 } 3450 3451 /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later 3452 * extracted and processed. 3453 */ 3454 __skb_queue_head_init(&list); 3455 if (ar->hw_params.target_64bit) 3456 ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind, 3457 &list); 3458 else 3459 ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind, 3460 &list); 3461 3462 if (ret < 0) { 3463 ath10k_warn(ar, "failed to pop paddr list: %d\n", ret); 3464 htt->rx_confused = true; 3465 return -EIO; 3466 } 3467 3468 /* Offloaded frames are very different and need to be handled 3469 * separately. 3470 */ 3471 if (offload) 3472 ath10k_htt_rx_h_rx_offload(ar, &list); 3473 3474 while (!skb_queue_empty(&list)) { 3475 __skb_queue_head_init(&amsdu); 3476 ret = ath10k_htt_rx_extract_amsdu(&ar->hw_params, &list, &amsdu); 3477 switch (ret) { 3478 case 0: 3479 /* Note: The in-order indication may report interleaved 3480 * frames from different PPDUs meaning reported rx rate 3481 * to mac80211 isn't accurate/reliable. It's still 3482 * better to report something than nothing though. This 3483 * should still give an idea about rx rate to the user. 3484 */ 3485 ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id); 3486 ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL); 3487 ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL, 3488 NULL, peer_id, frag); 3489 ath10k_htt_rx_h_enqueue(ar, &amsdu, status); 3490 break; 3491 case -EAGAIN: 3492 fallthrough; 3493 default: 3494 /* Should not happen. */ 3495 ath10k_warn(ar, "failed to extract amsdu: %d\n", ret); 3496 htt->rx_confused = true; 3497 __skb_queue_purge(&list); 3498 return -EIO; 3499 } 3500 } 3501 return ret; 3502 } 3503 3504 static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar, 3505 const __le32 *resp_ids, 3506 int num_resp_ids) 3507 { 3508 int i; 3509 u32 resp_id; 3510 3511 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n", 3512 num_resp_ids); 3513 3514 for (i = 0; i < num_resp_ids; i++) { 3515 resp_id = le32_to_cpu(resp_ids[i]); 3516 3517 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n", 3518 resp_id); 3519 3520 /* TODO: free resp_id */ 3521 } 3522 } 3523 3524 static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb) 3525 { 3526 struct ieee80211_hw *hw = ar->hw; 3527 struct ieee80211_txq *txq; 3528 struct htt_resp *resp = (struct htt_resp *)skb->data; 3529 struct htt_tx_fetch_record *record; 3530 size_t len; 3531 size_t max_num_bytes; 3532 size_t max_num_msdus; 3533 size_t num_bytes; 3534 size_t num_msdus; 3535 const __le32 *resp_ids; 3536 u16 num_records; 3537 u16 num_resp_ids; 3538 u16 peer_id; 3539 u8 tid; 3540 int ret; 3541 int i; 3542 bool may_tx; 3543 3544 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n"); 3545 3546 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind); 3547 if (unlikely(skb->len < len)) { 3548 ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n"); 3549 return; 3550 } 3551 3552 num_records = le16_to_cpu(resp->tx_fetch_ind.num_records); 3553 num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids); 3554 3555 len += sizeof(resp->tx_fetch_ind.records[0]) * num_records; 3556 len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids; 3557 3558 if (unlikely(skb->len < len)) { 3559 ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n"); 3560 return; 3561 } 3562 3563 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %u num resps %u seq %u\n", 3564 num_records, num_resp_ids, 3565 le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num)); 3566 3567 if (!ar->htt.tx_q_state.enabled) { 3568 ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n"); 3569 return; 3570 } 3571 3572 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) { 3573 ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n"); 3574 return; 3575 } 3576 3577 rcu_read_lock(); 3578 3579 for (i = 0; i < num_records; i++) { 3580 record = &resp->tx_fetch_ind.records[i]; 3581 peer_id = MS(le16_to_cpu(record->info), 3582 HTT_TX_FETCH_RECORD_INFO_PEER_ID); 3583 tid = MS(le16_to_cpu(record->info), 3584 HTT_TX_FETCH_RECORD_INFO_TID); 3585 max_num_msdus = le16_to_cpu(record->num_msdus); 3586 max_num_bytes = le32_to_cpu(record->num_bytes); 3587 3588 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %u tid %u msdus %zu bytes %zu\n", 3589 i, peer_id, tid, max_num_msdus, max_num_bytes); 3590 3591 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) || 3592 unlikely(tid >= ar->htt.tx_q_state.num_tids)) { 3593 ath10k_warn(ar, "received out of range peer_id %u tid %u\n", 3594 peer_id, tid); 3595 continue; 3596 } 3597 3598 spin_lock_bh(&ar->data_lock); 3599 txq = ath10k_mac_txq_lookup(ar, peer_id, tid); 3600 spin_unlock_bh(&ar->data_lock); 3601 3602 /* It is okay to release the lock and use txq because RCU read 3603 * lock is held. 3604 */ 3605 3606 if (unlikely(!txq)) { 3607 ath10k_warn(ar, "failed to lookup txq for peer_id %u tid %u\n", 3608 peer_id, tid); 3609 continue; 3610 } 3611 3612 num_msdus = 0; 3613 num_bytes = 0; 3614 3615 ieee80211_txq_schedule_start(hw, txq->ac); 3616 may_tx = ieee80211_txq_may_transmit(hw, txq); 3617 while (num_msdus < max_num_msdus && 3618 num_bytes < max_num_bytes) { 3619 if (!may_tx) 3620 break; 3621 3622 ret = ath10k_mac_tx_push_txq(hw, txq); 3623 if (ret < 0) 3624 break; 3625 3626 num_msdus++; 3627 num_bytes += ret; 3628 } 3629 ieee80211_return_txq(hw, txq, false); 3630 ieee80211_txq_schedule_end(hw, txq->ac); 3631 3632 record->num_msdus = cpu_to_le16(num_msdus); 3633 record->num_bytes = cpu_to_le32(num_bytes); 3634 3635 ath10k_htt_tx_txq_recalc(hw, txq); 3636 } 3637 3638 rcu_read_unlock(); 3639 3640 resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind); 3641 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids); 3642 3643 ret = ath10k_htt_tx_fetch_resp(ar, 3644 resp->tx_fetch_ind.token, 3645 resp->tx_fetch_ind.fetch_seq_num, 3646 resp->tx_fetch_ind.records, 3647 num_records); 3648 if (unlikely(ret)) { 3649 ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n", 3650 le32_to_cpu(resp->tx_fetch_ind.token), ret); 3651 /* FIXME: request fw restart */ 3652 } 3653 3654 ath10k_htt_tx_txq_sync(ar); 3655 } 3656 3657 static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar, 3658 struct sk_buff *skb) 3659 { 3660 const struct htt_resp *resp = (void *)skb->data; 3661 size_t len; 3662 int num_resp_ids; 3663 3664 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n"); 3665 3666 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm); 3667 if (unlikely(skb->len < len)) { 3668 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n"); 3669 return; 3670 } 3671 3672 num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids); 3673 len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids; 3674 3675 if (unlikely(skb->len < len)) { 3676 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n"); 3677 return; 3678 } 3679 3680 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, 3681 resp->tx_fetch_confirm.resp_ids, 3682 num_resp_ids); 3683 } 3684 3685 static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar, 3686 struct sk_buff *skb) 3687 { 3688 const struct htt_resp *resp = (void *)skb->data; 3689 const struct htt_tx_mode_switch_record *record; 3690 struct ieee80211_txq *txq; 3691 struct ath10k_txq *artxq; 3692 size_t len; 3693 size_t num_records; 3694 enum htt_tx_mode_switch_mode mode; 3695 bool enable; 3696 u16 info0; 3697 u16 info1; 3698 u16 threshold; 3699 u16 peer_id; 3700 u8 tid; 3701 int i; 3702 3703 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n"); 3704 3705 len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind); 3706 if (unlikely(skb->len < len)) { 3707 ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n"); 3708 return; 3709 } 3710 3711 info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0); 3712 info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1); 3713 3714 enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE); 3715 num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD); 3716 mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE); 3717 threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD); 3718 3719 ath10k_dbg(ar, ATH10K_DBG_HTT, 3720 "htt rx tx mode switch ind info0 0x%04x info1 0x%04x enable %d num records %zd mode %d threshold %u\n", 3721 info0, info1, enable, num_records, mode, threshold); 3722 3723 len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records; 3724 3725 if (unlikely(skb->len < len)) { 3726 ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n"); 3727 return; 3728 } 3729 3730 switch (mode) { 3731 case HTT_TX_MODE_SWITCH_PUSH: 3732 case HTT_TX_MODE_SWITCH_PUSH_PULL: 3733 break; 3734 default: 3735 ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n", 3736 mode); 3737 return; 3738 } 3739 3740 if (!enable) 3741 return; 3742 3743 ar->htt.tx_q_state.enabled = enable; 3744 ar->htt.tx_q_state.mode = mode; 3745 ar->htt.tx_q_state.num_push_allowed = threshold; 3746 3747 rcu_read_lock(); 3748 3749 for (i = 0; i < num_records; i++) { 3750 record = &resp->tx_mode_switch_ind.records[i]; 3751 info0 = le16_to_cpu(record->info0); 3752 peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID); 3753 tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID); 3754 3755 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) || 3756 unlikely(tid >= ar->htt.tx_q_state.num_tids)) { 3757 ath10k_warn(ar, "received out of range peer_id %u tid %u\n", 3758 peer_id, tid); 3759 continue; 3760 } 3761 3762 spin_lock_bh(&ar->data_lock); 3763 txq = ath10k_mac_txq_lookup(ar, peer_id, tid); 3764 spin_unlock_bh(&ar->data_lock); 3765 3766 /* It is okay to release the lock and use txq because RCU read 3767 * lock is held. 3768 */ 3769 3770 if (unlikely(!txq)) { 3771 ath10k_warn(ar, "failed to lookup txq for peer_id %u tid %u\n", 3772 peer_id, tid); 3773 continue; 3774 } 3775 3776 spin_lock_bh(&ar->htt.tx_lock); 3777 artxq = (void *)txq->drv_priv; 3778 artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus); 3779 spin_unlock_bh(&ar->htt.tx_lock); 3780 } 3781 3782 rcu_read_unlock(); 3783 3784 ath10k_mac_tx_push_pending(ar); 3785 } 3786 3787 void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) 3788 { 3789 bool release; 3790 3791 release = ath10k_htt_t2h_msg_handler(ar, skb); 3792 3793 /* Free the indication buffer */ 3794 if (release) 3795 dev_kfree_skb_any(skb); 3796 } 3797 3798 static inline s8 ath10k_get_legacy_rate_idx(struct ath10k *ar, u8 rate) 3799 { 3800 static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12, 3801 18, 24, 36, 48, 54}; 3802 int i; 3803 3804 for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) { 3805 if (rate == legacy_rates[i]) 3806 return i; 3807 } 3808 3809 ath10k_warn(ar, "Invalid legacy rate %d peer stats", rate); 3810 return -EINVAL; 3811 } 3812 3813 static void 3814 ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar, 3815 struct ath10k_sta *arsta, 3816 struct ath10k_per_peer_tx_stats *pstats, 3817 s8 legacy_rate_idx) 3818 { 3819 struct rate_info *txrate = &arsta->txrate; 3820 struct ath10k_htt_tx_stats *tx_stats; 3821 int idx, ht_idx, gi, mcs, bw, nss; 3822 unsigned long flags; 3823 3824 if (!arsta->tx_stats) 3825 return; 3826 3827 tx_stats = arsta->tx_stats; 3828 flags = txrate->flags; 3829 gi = test_bit(ATH10K_RATE_INFO_FLAGS_SGI_BIT, &flags); 3830 mcs = ATH10K_HW_MCS_RATE(pstats->ratecode); 3831 bw = txrate->bw; 3832 nss = txrate->nss; 3833 ht_idx = mcs + (nss - 1) * 8; 3834 idx = mcs * 8 + 8 * 10 * (nss - 1); 3835 idx += bw * 2 + gi; 3836 3837 #define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name] 3838 3839 if (txrate->flags & RATE_INFO_FLAGS_VHT_MCS) { 3840 STATS_OP_FMT(SUCC).vht[0][mcs] += pstats->succ_bytes; 3841 STATS_OP_FMT(SUCC).vht[1][mcs] += pstats->succ_pkts; 3842 STATS_OP_FMT(FAIL).vht[0][mcs] += pstats->failed_bytes; 3843 STATS_OP_FMT(FAIL).vht[1][mcs] += pstats->failed_pkts; 3844 STATS_OP_FMT(RETRY).vht[0][mcs] += pstats->retry_bytes; 3845 STATS_OP_FMT(RETRY).vht[1][mcs] += pstats->retry_pkts; 3846 } else if (txrate->flags & RATE_INFO_FLAGS_MCS) { 3847 STATS_OP_FMT(SUCC).ht[0][ht_idx] += pstats->succ_bytes; 3848 STATS_OP_FMT(SUCC).ht[1][ht_idx] += pstats->succ_pkts; 3849 STATS_OP_FMT(FAIL).ht[0][ht_idx] += pstats->failed_bytes; 3850 STATS_OP_FMT(FAIL).ht[1][ht_idx] += pstats->failed_pkts; 3851 STATS_OP_FMT(RETRY).ht[0][ht_idx] += pstats->retry_bytes; 3852 STATS_OP_FMT(RETRY).ht[1][ht_idx] += pstats->retry_pkts; 3853 } else { 3854 mcs = legacy_rate_idx; 3855 3856 STATS_OP_FMT(SUCC).legacy[0][mcs] += pstats->succ_bytes; 3857 STATS_OP_FMT(SUCC).legacy[1][mcs] += pstats->succ_pkts; 3858 STATS_OP_FMT(FAIL).legacy[0][mcs] += pstats->failed_bytes; 3859 STATS_OP_FMT(FAIL).legacy[1][mcs] += pstats->failed_pkts; 3860 STATS_OP_FMT(RETRY).legacy[0][mcs] += pstats->retry_bytes; 3861 STATS_OP_FMT(RETRY).legacy[1][mcs] += pstats->retry_pkts; 3862 } 3863 3864 if (ATH10K_HW_AMPDU(pstats->flags)) { 3865 tx_stats->ba_fails += ATH10K_HW_BA_FAIL(pstats->flags); 3866 3867 if (txrate->flags & RATE_INFO_FLAGS_MCS) { 3868 STATS_OP_FMT(AMPDU).ht[0][ht_idx] += 3869 pstats->succ_bytes + pstats->retry_bytes; 3870 STATS_OP_FMT(AMPDU).ht[1][ht_idx] += 3871 pstats->succ_pkts + pstats->retry_pkts; 3872 } else { 3873 STATS_OP_FMT(AMPDU).vht[0][mcs] += 3874 pstats->succ_bytes + pstats->retry_bytes; 3875 STATS_OP_FMT(AMPDU).vht[1][mcs] += 3876 pstats->succ_pkts + pstats->retry_pkts; 3877 } 3878 STATS_OP_FMT(AMPDU).bw[0][bw] += 3879 pstats->succ_bytes + pstats->retry_bytes; 3880 STATS_OP_FMT(AMPDU).nss[0][nss - 1] += 3881 pstats->succ_bytes + pstats->retry_bytes; 3882 STATS_OP_FMT(AMPDU).gi[0][gi] += 3883 pstats->succ_bytes + pstats->retry_bytes; 3884 STATS_OP_FMT(AMPDU).rate_table[0][idx] += 3885 pstats->succ_bytes + pstats->retry_bytes; 3886 STATS_OP_FMT(AMPDU).bw[1][bw] += 3887 pstats->succ_pkts + pstats->retry_pkts; 3888 STATS_OP_FMT(AMPDU).nss[1][nss - 1] += 3889 pstats->succ_pkts + pstats->retry_pkts; 3890 STATS_OP_FMT(AMPDU).gi[1][gi] += 3891 pstats->succ_pkts + pstats->retry_pkts; 3892 STATS_OP_FMT(AMPDU).rate_table[1][idx] += 3893 pstats->succ_pkts + pstats->retry_pkts; 3894 } else { 3895 tx_stats->ack_fails += 3896 ATH10K_HW_BA_FAIL(pstats->flags); 3897 } 3898 3899 STATS_OP_FMT(SUCC).bw[0][bw] += pstats->succ_bytes; 3900 STATS_OP_FMT(SUCC).nss[0][nss - 1] += pstats->succ_bytes; 3901 STATS_OP_FMT(SUCC).gi[0][gi] += pstats->succ_bytes; 3902 3903 STATS_OP_FMT(SUCC).bw[1][bw] += pstats->succ_pkts; 3904 STATS_OP_FMT(SUCC).nss[1][nss - 1] += pstats->succ_pkts; 3905 STATS_OP_FMT(SUCC).gi[1][gi] += pstats->succ_pkts; 3906 3907 STATS_OP_FMT(FAIL).bw[0][bw] += pstats->failed_bytes; 3908 STATS_OP_FMT(FAIL).nss[0][nss - 1] += pstats->failed_bytes; 3909 STATS_OP_FMT(FAIL).gi[0][gi] += pstats->failed_bytes; 3910 3911 STATS_OP_FMT(FAIL).bw[1][bw] += pstats->failed_pkts; 3912 STATS_OP_FMT(FAIL).nss[1][nss - 1] += pstats->failed_pkts; 3913 STATS_OP_FMT(FAIL).gi[1][gi] += pstats->failed_pkts; 3914 3915 STATS_OP_FMT(RETRY).bw[0][bw] += pstats->retry_bytes; 3916 STATS_OP_FMT(RETRY).nss[0][nss - 1] += pstats->retry_bytes; 3917 STATS_OP_FMT(RETRY).gi[0][gi] += pstats->retry_bytes; 3918 3919 STATS_OP_FMT(RETRY).bw[1][bw] += pstats->retry_pkts; 3920 STATS_OP_FMT(RETRY).nss[1][nss - 1] += pstats->retry_pkts; 3921 STATS_OP_FMT(RETRY).gi[1][gi] += pstats->retry_pkts; 3922 3923 if (txrate->flags >= RATE_INFO_FLAGS_MCS) { 3924 STATS_OP_FMT(SUCC).rate_table[0][idx] += pstats->succ_bytes; 3925 STATS_OP_FMT(SUCC).rate_table[1][idx] += pstats->succ_pkts; 3926 STATS_OP_FMT(FAIL).rate_table[0][idx] += pstats->failed_bytes; 3927 STATS_OP_FMT(FAIL).rate_table[1][idx] += pstats->failed_pkts; 3928 STATS_OP_FMT(RETRY).rate_table[0][idx] += pstats->retry_bytes; 3929 STATS_OP_FMT(RETRY).rate_table[1][idx] += pstats->retry_pkts; 3930 } 3931 3932 tx_stats->tx_duration += pstats->duration; 3933 } 3934 3935 static void 3936 ath10k_update_per_peer_tx_stats(struct ath10k *ar, 3937 struct ieee80211_sta *sta, 3938 struct ath10k_per_peer_tx_stats *peer_stats) 3939 { 3940 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 3941 struct ieee80211_chanctx_conf *conf = NULL; 3942 u8 rate = 0, sgi; 3943 s8 rate_idx = 0; 3944 bool skip_auto_rate; 3945 struct rate_info txrate; 3946 3947 lockdep_assert_held(&ar->data_lock); 3948 3949 txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode); 3950 txrate.bw = ATH10K_HW_BW(peer_stats->flags); 3951 txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode); 3952 txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode); 3953 sgi = ATH10K_HW_GI(peer_stats->flags); 3954 skip_auto_rate = ATH10K_FW_SKIPPED_RATE_CTRL(peer_stats->flags); 3955 3956 /* Firmware's rate control skips broadcast/management frames, 3957 * if host has configure fixed rates and in some other special cases. 3958 */ 3959 if (skip_auto_rate) 3960 return; 3961 3962 if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) { 3963 ath10k_warn(ar, "Invalid VHT mcs %d peer stats", txrate.mcs); 3964 return; 3965 } 3966 3967 if (txrate.flags == WMI_RATE_PREAMBLE_HT && 3968 (txrate.mcs > 7 || txrate.nss < 1)) { 3969 ath10k_warn(ar, "Invalid HT mcs %d nss %d peer stats", 3970 txrate.mcs, txrate.nss); 3971 return; 3972 } 3973 3974 memset(&arsta->txrate, 0, sizeof(arsta->txrate)); 3975 memset(&arsta->tx_info.status, 0, sizeof(arsta->tx_info.status)); 3976 if (txrate.flags == WMI_RATE_PREAMBLE_CCK || 3977 txrate.flags == WMI_RATE_PREAMBLE_OFDM) { 3978 rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode); 3979 /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */ 3980 if (rate == 6 && txrate.flags == WMI_RATE_PREAMBLE_CCK) 3981 rate = 5; 3982 rate_idx = ath10k_get_legacy_rate_idx(ar, rate); 3983 if (rate_idx < 0) 3984 return; 3985 arsta->txrate.legacy = rate; 3986 } else if (txrate.flags == WMI_RATE_PREAMBLE_HT) { 3987 arsta->txrate.flags = RATE_INFO_FLAGS_MCS; 3988 arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1); 3989 } else { 3990 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; 3991 arsta->txrate.mcs = txrate.mcs; 3992 } 3993 3994 switch (txrate.flags) { 3995 case WMI_RATE_PREAMBLE_OFDM: 3996 if (arsta->arvif && arsta->arvif->vif) 3997 conf = rcu_dereference(arsta->arvif->vif->bss_conf.chanctx_conf); 3998 if (conf && conf->def.chan->band == NL80211_BAND_5GHZ) 3999 arsta->tx_info.status.rates[0].idx = rate_idx - 4; 4000 break; 4001 case WMI_RATE_PREAMBLE_CCK: 4002 arsta->tx_info.status.rates[0].idx = rate_idx; 4003 if (sgi) 4004 arsta->tx_info.status.rates[0].flags |= 4005 (IEEE80211_TX_RC_USE_SHORT_PREAMBLE | 4006 IEEE80211_TX_RC_SHORT_GI); 4007 break; 4008 case WMI_RATE_PREAMBLE_HT: 4009 arsta->tx_info.status.rates[0].idx = 4010 txrate.mcs + ((txrate.nss - 1) * 8); 4011 if (sgi) 4012 arsta->tx_info.status.rates[0].flags |= 4013 IEEE80211_TX_RC_SHORT_GI; 4014 arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_MCS; 4015 break; 4016 case WMI_RATE_PREAMBLE_VHT: 4017 ieee80211_rate_set_vht(&arsta->tx_info.status.rates[0], 4018 txrate.mcs, txrate.nss); 4019 if (sgi) 4020 arsta->tx_info.status.rates[0].flags |= 4021 IEEE80211_TX_RC_SHORT_GI; 4022 arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_VHT_MCS; 4023 break; 4024 } 4025 4026 arsta->txrate.nss = txrate.nss; 4027 arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw); 4028 arsta->last_tx_bitrate = cfg80211_calculate_bitrate(&arsta->txrate); 4029 if (sgi) 4030 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 4031 4032 switch (arsta->txrate.bw) { 4033 case RATE_INFO_BW_40: 4034 arsta->tx_info.status.rates[0].flags |= 4035 IEEE80211_TX_RC_40_MHZ_WIDTH; 4036 break; 4037 case RATE_INFO_BW_80: 4038 arsta->tx_info.status.rates[0].flags |= 4039 IEEE80211_TX_RC_80_MHZ_WIDTH; 4040 break; 4041 case RATE_INFO_BW_160: 4042 arsta->tx_info.status.rates[0].flags |= 4043 IEEE80211_TX_RC_160_MHZ_WIDTH; 4044 break; 4045 } 4046 4047 if (peer_stats->succ_pkts) { 4048 arsta->tx_info.flags = IEEE80211_TX_STAT_ACK; 4049 arsta->tx_info.status.rates[0].count = 1; 4050 ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info); 4051 } 4052 4053 if (ar->htt.disable_tx_comp) { 4054 arsta->tx_failed += peer_stats->failed_pkts; 4055 ath10k_dbg(ar, ATH10K_DBG_HTT, "tx failed %d\n", 4056 arsta->tx_failed); 4057 } 4058 4059 arsta->tx_retries += peer_stats->retry_pkts; 4060 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx retries %d", arsta->tx_retries); 4061 4062 if (ath10k_debug_is_extd_tx_stats_enabled(ar)) 4063 ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats, 4064 rate_idx); 4065 } 4066 4067 static void ath10k_htt_fetch_peer_stats(struct ath10k *ar, 4068 struct sk_buff *skb) 4069 { 4070 struct htt_resp *resp = (struct htt_resp *)skb->data; 4071 struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats; 4072 struct htt_per_peer_tx_stats_ind *tx_stats; 4073 struct ieee80211_sta *sta; 4074 struct ath10k_peer *peer; 4075 int peer_id, i; 4076 u8 ppdu_len, num_ppdu; 4077 4078 num_ppdu = resp->peer_tx_stats.num_ppdu; 4079 ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32); 4080 4081 if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) { 4082 ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len); 4083 return; 4084 } 4085 4086 tx_stats = (struct htt_per_peer_tx_stats_ind *) 4087 (resp->peer_tx_stats.payload); 4088 peer_id = __le16_to_cpu(tx_stats->peer_id); 4089 4090 rcu_read_lock(); 4091 spin_lock_bh(&ar->data_lock); 4092 peer = ath10k_peer_find_by_id(ar, peer_id); 4093 if (!peer || !peer->sta) { 4094 ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n", 4095 peer_id); 4096 goto out; 4097 } 4098 4099 sta = peer->sta; 4100 for (i = 0; i < num_ppdu; i++) { 4101 tx_stats = (struct htt_per_peer_tx_stats_ind *) 4102 (resp->peer_tx_stats.payload + i * ppdu_len); 4103 4104 p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes); 4105 p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes); 4106 p_tx_stats->failed_bytes = 4107 __le32_to_cpu(tx_stats->failed_bytes); 4108 p_tx_stats->ratecode = tx_stats->ratecode; 4109 p_tx_stats->flags = tx_stats->flags; 4110 p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts); 4111 p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts); 4112 p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts); 4113 p_tx_stats->duration = __le16_to_cpu(tx_stats->tx_duration); 4114 4115 ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats); 4116 } 4117 4118 out: 4119 spin_unlock_bh(&ar->data_lock); 4120 rcu_read_unlock(); 4121 } 4122 4123 static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data) 4124 { 4125 struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data; 4126 struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats; 4127 struct ath10k_10_2_peer_tx_stats *tx_stats; 4128 struct ieee80211_sta *sta; 4129 struct ath10k_peer *peer; 4130 u16 log_type = __le16_to_cpu(hdr->log_type); 4131 u32 peer_id = 0, i; 4132 4133 if (log_type != ATH_PKTLOG_TYPE_TX_STAT) 4134 return; 4135 4136 tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) + 4137 ATH10K_10_2_TX_STATS_OFFSET); 4138 4139 if (!tx_stats->tx_ppdu_cnt) 4140 return; 4141 4142 peer_id = tx_stats->peer_id; 4143 4144 rcu_read_lock(); 4145 spin_lock_bh(&ar->data_lock); 4146 peer = ath10k_peer_find_by_id(ar, peer_id); 4147 if (!peer || !peer->sta) { 4148 ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n", 4149 peer_id); 4150 goto out; 4151 } 4152 4153 sta = peer->sta; 4154 for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) { 4155 p_tx_stats->succ_bytes = 4156 __le16_to_cpu(tx_stats->success_bytes[i]); 4157 p_tx_stats->retry_bytes = 4158 __le16_to_cpu(tx_stats->retry_bytes[i]); 4159 p_tx_stats->failed_bytes = 4160 __le16_to_cpu(tx_stats->failed_bytes[i]); 4161 p_tx_stats->ratecode = tx_stats->ratecode[i]; 4162 p_tx_stats->flags = tx_stats->flags[i]; 4163 p_tx_stats->succ_pkts = tx_stats->success_pkts[i]; 4164 p_tx_stats->retry_pkts = tx_stats->retry_pkts[i]; 4165 p_tx_stats->failed_pkts = tx_stats->failed_pkts[i]; 4166 4167 ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats); 4168 } 4169 spin_unlock_bh(&ar->data_lock); 4170 rcu_read_unlock(); 4171 4172 return; 4173 4174 out: 4175 spin_unlock_bh(&ar->data_lock); 4176 rcu_read_unlock(); 4177 } 4178 4179 static int ath10k_htt_rx_pn_len(enum htt_security_types sec_type) 4180 { 4181 switch (sec_type) { 4182 case HTT_SECURITY_TKIP: 4183 case HTT_SECURITY_TKIP_NOMIC: 4184 case HTT_SECURITY_AES_CCMP: 4185 return 48; 4186 default: 4187 return 0; 4188 } 4189 } 4190 4191 static void ath10k_htt_rx_sec_ind_handler(struct ath10k *ar, 4192 struct htt_security_indication *ev) 4193 { 4194 enum htt_txrx_sec_cast_type sec_index; 4195 enum htt_security_types sec_type; 4196 struct ath10k_peer *peer; 4197 4198 spin_lock_bh(&ar->data_lock); 4199 4200 peer = ath10k_peer_find_by_id(ar, __le16_to_cpu(ev->peer_id)); 4201 if (!peer) { 4202 ath10k_warn(ar, "failed to find peer id %d for security indication", 4203 __le16_to_cpu(ev->peer_id)); 4204 goto out; 4205 } 4206 4207 sec_type = MS(ev->flags, HTT_SECURITY_TYPE); 4208 4209 if (ev->flags & HTT_SECURITY_IS_UNICAST) 4210 sec_index = HTT_TXRX_SEC_UCAST; 4211 else 4212 sec_index = HTT_TXRX_SEC_MCAST; 4213 4214 peer->rx_pn[sec_index].sec_type = sec_type; 4215 peer->rx_pn[sec_index].pn_len = ath10k_htt_rx_pn_len(sec_type); 4216 4217 memset(peer->tids_last_pn_valid, 0, sizeof(peer->tids_last_pn_valid)); 4218 memset(peer->tids_last_pn, 0, sizeof(peer->tids_last_pn)); 4219 4220 out: 4221 spin_unlock_bh(&ar->data_lock); 4222 } 4223 4224 bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) 4225 { 4226 struct ath10k_htt *htt = &ar->htt; 4227 struct htt_resp *resp = (struct htt_resp *)skb->data; 4228 enum htt_t2h_msg_type type; 4229 4230 /* confirm alignment */ 4231 if (!IS_ALIGNED((unsigned long)skb->data, 4)) 4232 ath10k_warn(ar, "unaligned htt message, expect trouble\n"); 4233 4234 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n", 4235 resp->hdr.msg_type); 4236 4237 if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) { 4238 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X", 4239 resp->hdr.msg_type, ar->htt.t2h_msg_types_max); 4240 return true; 4241 } 4242 type = ar->htt.t2h_msg_types[resp->hdr.msg_type]; 4243 4244 switch (type) { 4245 case HTT_T2H_MSG_TYPE_VERSION_CONF: { 4246 htt->target_version_major = resp->ver_resp.major; 4247 htt->target_version_minor = resp->ver_resp.minor; 4248 complete(&htt->target_version_received); 4249 break; 4250 } 4251 case HTT_T2H_MSG_TYPE_RX_IND: 4252 if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) { 4253 ath10k_htt_rx_proc_rx_ind_ll(htt, &resp->rx_ind); 4254 } else { 4255 skb_queue_tail(&htt->rx_indication_head, skb); 4256 return false; 4257 } 4258 break; 4259 case HTT_T2H_MSG_TYPE_PEER_MAP: { 4260 struct htt_peer_map_event ev = { 4261 .vdev_id = resp->peer_map.vdev_id, 4262 .peer_id = __le16_to_cpu(resp->peer_map.peer_id), 4263 }; 4264 memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr)); 4265 ath10k_peer_map_event(htt, &ev); 4266 break; 4267 } 4268 case HTT_T2H_MSG_TYPE_PEER_UNMAP: { 4269 struct htt_peer_unmap_event ev = { 4270 .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id), 4271 }; 4272 ath10k_peer_unmap_event(htt, &ev); 4273 break; 4274 } 4275 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: { 4276 struct htt_tx_done tx_done = {}; 4277 struct ath10k_htt *htt = &ar->htt; 4278 struct ath10k_htc *htc = &ar->htc; 4279 struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid]; 4280 int status = __le32_to_cpu(resp->mgmt_tx_completion.status); 4281 int info = __le32_to_cpu(resp->mgmt_tx_completion.info); 4282 4283 tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id); 4284 4285 switch (status) { 4286 case HTT_MGMT_TX_STATUS_OK: 4287 tx_done.status = HTT_TX_COMPL_STATE_ACK; 4288 if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, 4289 ar->wmi.svc_map) && 4290 (resp->mgmt_tx_completion.flags & 4291 HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI)) { 4292 tx_done.ack_rssi = 4293 FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK, 4294 info); 4295 } 4296 break; 4297 case HTT_MGMT_TX_STATUS_RETRY: 4298 tx_done.status = HTT_TX_COMPL_STATE_NOACK; 4299 break; 4300 case HTT_MGMT_TX_STATUS_DROP: 4301 tx_done.status = HTT_TX_COMPL_STATE_DISCARD; 4302 break; 4303 } 4304 4305 if (htt->disable_tx_comp) { 4306 spin_lock_bh(&htc->tx_lock); 4307 ep->tx_credits++; 4308 spin_unlock_bh(&htc->tx_lock); 4309 } 4310 4311 status = ath10k_txrx_tx_unref(htt, &tx_done); 4312 if (!status) { 4313 spin_lock_bh(&htt->tx_lock); 4314 ath10k_htt_tx_mgmt_dec_pending(htt); 4315 spin_unlock_bh(&htt->tx_lock); 4316 } 4317 break; 4318 } 4319 case HTT_T2H_MSG_TYPE_TX_COMPL_IND: 4320 ath10k_htt_rx_tx_compl_ind(htt->ar, skb); 4321 break; 4322 case HTT_T2H_MSG_TYPE_SEC_IND: { 4323 struct ath10k *ar = htt->ar; 4324 struct htt_security_indication *ev = &resp->security_indication; 4325 4326 ath10k_htt_rx_sec_ind_handler(ar, ev); 4327 ath10k_dbg(ar, ATH10K_DBG_HTT, 4328 "sec ind peer_id %d unicast %d type %d\n", 4329 __le16_to_cpu(ev->peer_id), 4330 !!(ev->flags & HTT_SECURITY_IS_UNICAST), 4331 MS(ev->flags, HTT_SECURITY_TYPE)); 4332 complete(&ar->install_key_done); 4333 break; 4334 } 4335 case HTT_T2H_MSG_TYPE_RX_FRAG_IND: { 4336 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", 4337 skb->data, skb->len); 4338 atomic_inc(&htt->num_mpdus_ready); 4339 4340 return ath10k_htt_rx_proc_rx_frag_ind(htt, 4341 &resp->rx_frag_ind, 4342 skb); 4343 } 4344 case HTT_T2H_MSG_TYPE_TEST: 4345 break; 4346 case HTT_T2H_MSG_TYPE_STATS_CONF: 4347 trace_ath10k_htt_stats(ar, skb->data, skb->len); 4348 break; 4349 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND: 4350 /* Firmware can return tx frames if it's unable to fully 4351 * process them and suspects host may be able to fix it. ath10k 4352 * sends all tx frames as already inspected so this shouldn't 4353 * happen unless fw has a bug. 4354 */ 4355 ath10k_warn(ar, "received an unexpected htt tx inspect event\n"); 4356 break; 4357 case HTT_T2H_MSG_TYPE_RX_ADDBA: 4358 ath10k_htt_rx_addba(ar, resp); 4359 break; 4360 case HTT_T2H_MSG_TYPE_RX_DELBA: 4361 ath10k_htt_rx_delba(ar, resp); 4362 break; 4363 case HTT_T2H_MSG_TYPE_PKTLOG: { 4364 trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload, 4365 skb->len - 4366 offsetof(struct htt_resp, 4367 pktlog_msg.payload)); 4368 4369 if (ath10k_peer_stats_enabled(ar)) 4370 ath10k_fetch_10_2_tx_stats(ar, 4371 resp->pktlog_msg.payload); 4372 break; 4373 } 4374 case HTT_T2H_MSG_TYPE_RX_FLUSH: { 4375 /* Ignore this event because mac80211 takes care of Rx 4376 * aggregation reordering. 4377 */ 4378 break; 4379 } 4380 case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: { 4381 skb_queue_tail(&htt->rx_in_ord_compl_q, skb); 4382 return false; 4383 } 4384 case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: { 4385 struct ath10k_htt *htt = &ar->htt; 4386 struct ath10k_htc *htc = &ar->htc; 4387 struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid]; 4388 u32 msg_word = __le32_to_cpu(*(__le32 *)resp); 4389 int htt_credit_delta; 4390 4391 htt_credit_delta = HTT_TX_CREDIT_DELTA_ABS_GET(msg_word); 4392 if (HTT_TX_CREDIT_SIGN_BIT_GET(msg_word)) 4393 htt_credit_delta = -htt_credit_delta; 4394 4395 ath10k_dbg(ar, ATH10K_DBG_HTT, 4396 "htt credit update delta %d\n", 4397 htt_credit_delta); 4398 4399 if (htt->disable_tx_comp) { 4400 spin_lock_bh(&htc->tx_lock); 4401 ep->tx_credits += htt_credit_delta; 4402 spin_unlock_bh(&htc->tx_lock); 4403 ath10k_dbg(ar, ATH10K_DBG_HTT, 4404 "htt credit total %d\n", 4405 ep->tx_credits); 4406 ep->ep_ops.ep_tx_credits(htc->ar); 4407 } 4408 break; 4409 } 4410 case HTT_T2H_MSG_TYPE_CHAN_CHANGE: { 4411 u32 phymode = __le32_to_cpu(resp->chan_change.phymode); 4412 u32 freq = __le32_to_cpu(resp->chan_change.freq); 4413 4414 ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq); 4415 ath10k_dbg(ar, ATH10K_DBG_HTT, 4416 "htt chan change freq %u phymode %s\n", 4417 freq, ath10k_wmi_phymode_str(phymode)); 4418 break; 4419 } 4420 case HTT_T2H_MSG_TYPE_AGGR_CONF: 4421 break; 4422 case HTT_T2H_MSG_TYPE_TX_FETCH_IND: { 4423 struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC); 4424 4425 if (!tx_fetch_ind) { 4426 ath10k_warn(ar, "failed to copy htt tx fetch ind\n"); 4427 break; 4428 } 4429 skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind); 4430 break; 4431 } 4432 case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM: 4433 ath10k_htt_rx_tx_fetch_confirm(ar, skb); 4434 break; 4435 case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND: 4436 ath10k_htt_rx_tx_mode_switch_ind(ar, skb); 4437 break; 4438 case HTT_T2H_MSG_TYPE_PEER_STATS: 4439 ath10k_htt_fetch_peer_stats(ar, skb); 4440 break; 4441 case HTT_T2H_MSG_TYPE_EN_STATS: 4442 default: 4443 ath10k_warn(ar, "htt event (%d) not handled\n", 4444 resp->hdr.msg_type); 4445 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", 4446 skb->data, skb->len); 4447 break; 4448 } 4449 return true; 4450 } 4451 EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler); 4452 4453 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar, 4454 struct sk_buff *skb) 4455 { 4456 trace_ath10k_htt_pktlog(ar, skb->data, skb->len); 4457 dev_kfree_skb_any(skb); 4458 } 4459 EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler); 4460 4461 static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget) 4462 { 4463 struct sk_buff *skb; 4464 4465 while (quota < budget) { 4466 if (skb_queue_empty(&ar->htt.rx_msdus_q)) 4467 break; 4468 4469 skb = skb_dequeue(&ar->htt.rx_msdus_q); 4470 if (!skb) 4471 break; 4472 ath10k_process_rx(ar, skb); 4473 quota++; 4474 } 4475 4476 return quota; 4477 } 4478 4479 int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget) 4480 { 4481 struct htt_resp *resp; 4482 struct ath10k_htt *htt = &ar->htt; 4483 struct sk_buff *skb; 4484 bool release; 4485 int quota; 4486 4487 for (quota = 0; quota < budget; quota++) { 4488 skb = skb_dequeue(&htt->rx_indication_head); 4489 if (!skb) 4490 break; 4491 4492 resp = (struct htt_resp *)skb->data; 4493 4494 release = ath10k_htt_rx_proc_rx_ind_hl(htt, 4495 &resp->rx_ind_hl, 4496 skb, 4497 HTT_RX_PN_CHECK, 4498 HTT_RX_NON_TKIP_MIC); 4499 4500 if (release) 4501 dev_kfree_skb_any(skb); 4502 4503 ath10k_dbg(ar, ATH10K_DBG_HTT, "rx indication poll pending count:%d\n", 4504 skb_queue_len(&htt->rx_indication_head)); 4505 } 4506 return quota; 4507 } 4508 EXPORT_SYMBOL(ath10k_htt_rx_hl_indication); 4509 4510 int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget) 4511 { 4512 struct ath10k_htt *htt = &ar->htt; 4513 struct htt_tx_done tx_done = {}; 4514 struct sk_buff_head tx_ind_q; 4515 struct sk_buff *skb; 4516 unsigned long flags; 4517 int quota = 0, done, ret; 4518 bool resched_napi = false; 4519 4520 __skb_queue_head_init(&tx_ind_q); 4521 4522 /* Process pending frames before dequeuing more data 4523 * from hardware. 4524 */ 4525 quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget); 4526 if (quota == budget) { 4527 resched_napi = true; 4528 goto exit; 4529 } 4530 4531 while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) { 4532 spin_lock_bh(&htt->rx_ring.lock); 4533 ret = ath10k_htt_rx_in_ord_ind(ar, skb); 4534 spin_unlock_bh(&htt->rx_ring.lock); 4535 4536 dev_kfree_skb_any(skb); 4537 if (ret == -EIO) { 4538 resched_napi = true; 4539 goto exit; 4540 } 4541 } 4542 4543 while (atomic_read(&htt->num_mpdus_ready)) { 4544 ret = ath10k_htt_rx_handle_amsdu(htt); 4545 if (ret == -EIO) { 4546 resched_napi = true; 4547 goto exit; 4548 } 4549 atomic_dec(&htt->num_mpdus_ready); 4550 } 4551 4552 /* Deliver received data after processing data from hardware */ 4553 quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget); 4554 4555 /* From NAPI documentation: 4556 * The napi poll() function may also process TX completions, in which 4557 * case if it processes the entire TX ring then it should count that 4558 * work as the rest of the budget. 4559 */ 4560 if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo)) 4561 quota = budget; 4562 4563 /* kfifo_get: called only within txrx_tasklet so it's neatly serialized. 4564 * From kfifo_get() documentation: 4565 * Note that with only one concurrent reader and one concurrent writer, 4566 * you don't need extra locking to use these macro. 4567 */ 4568 while (kfifo_get(&htt->txdone_fifo, &tx_done)) 4569 ath10k_txrx_tx_unref(htt, &tx_done); 4570 4571 ath10k_mac_tx_push_pending(ar); 4572 4573 spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags); 4574 skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q); 4575 spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags); 4576 4577 while ((skb = __skb_dequeue(&tx_ind_q))) { 4578 ath10k_htt_rx_tx_fetch_ind(ar, skb); 4579 dev_kfree_skb_any(skb); 4580 } 4581 4582 exit: 4583 ath10k_htt_rx_msdu_buff_replenish(htt); 4584 /* In case of rx failure or more data to read, report budget 4585 * to reschedule NAPI poll 4586 */ 4587 done = resched_napi ? budget : quota; 4588 4589 return done; 4590 } 4591 EXPORT_SYMBOL(ath10k_htt_txrx_compl_task); 4592 4593 static const struct ath10k_htt_rx_ops htt_rx_ops_32 = { 4594 .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32, 4595 .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32, 4596 .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32, 4597 .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32, 4598 .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32, 4599 }; 4600 4601 static const struct ath10k_htt_rx_ops htt_rx_ops_64 = { 4602 .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64, 4603 .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64, 4604 .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64, 4605 .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64, 4606 .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64, 4607 }; 4608 4609 static const struct ath10k_htt_rx_ops htt_rx_ops_hl = { 4610 .htt_rx_proc_rx_frag_ind = ath10k_htt_rx_proc_rx_frag_ind_hl, 4611 }; 4612 4613 void ath10k_htt_set_rx_ops(struct ath10k_htt *htt) 4614 { 4615 struct ath10k *ar = htt->ar; 4616 4617 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) 4618 htt->rx_ops = &htt_rx_ops_hl; 4619 else if (ar->hw_params.target_64bit) 4620 htt->rx_ops = &htt_rx_ops_64; 4621 else 4622 htt->rx_ops = &htt_rx_ops_32; 4623 } 4624