1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (c) 2005-2011 Atheros Communications Inc. 4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 5 * Copyright (c) 2018, The Linux Foundation. All rights reserved. 6 */ 7 8 #include "core.h" 9 #include "htc.h" 10 #include "htt.h" 11 #include "txrx.h" 12 #include "debug.h" 13 #include "trace.h" 14 #include "mac.h" 15 16 #include <linux/log2.h> 17 #include <linux/bitfield.h> 18 19 /* when under memory pressure rx ring refill may fail and needs a retry */ 20 #define HTT_RX_RING_REFILL_RETRY_MS 50 21 22 #define HTT_RX_RING_REFILL_RESCHED_MS 5 23 24 /* shortcut to interpret a raw memory buffer as a rx descriptor */ 25 #define HTT_RX_BUF_TO_RX_DESC(hw, buf) ath10k_htt_rx_desc_from_raw_buffer(hw, buf) 26 27 static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb); 28 29 static struct sk_buff * 30 ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr) 31 { 32 struct ath10k_skb_rxcb *rxcb; 33 34 hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr) 35 if (rxcb->paddr == paddr) 36 return ATH10K_RXCB_SKB(rxcb); 37 38 WARN_ON_ONCE(1); 39 return NULL; 40 } 41 42 static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt) 43 { 44 struct sk_buff *skb; 45 struct ath10k_skb_rxcb *rxcb; 46 struct hlist_node *n; 47 int i; 48 49 if (htt->rx_ring.in_ord_rx) { 50 hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) { 51 skb = ATH10K_RXCB_SKB(rxcb); 52 dma_unmap_single(htt->ar->dev, rxcb->paddr, 53 skb->len + skb_tailroom(skb), 54 DMA_FROM_DEVICE); 55 hash_del(&rxcb->hlist); 56 dev_kfree_skb_any(skb); 57 } 58 } else { 59 for (i = 0; i < htt->rx_ring.size; i++) { 60 skb = htt->rx_ring.netbufs_ring[i]; 61 if (!skb) 62 continue; 63 64 rxcb = ATH10K_SKB_RXCB(skb); 65 dma_unmap_single(htt->ar->dev, rxcb->paddr, 66 skb->len + skb_tailroom(skb), 67 DMA_FROM_DEVICE); 68 dev_kfree_skb_any(skb); 69 } 70 } 71 72 htt->rx_ring.fill_cnt = 0; 73 hash_init(htt->rx_ring.skb_table); 74 memset(htt->rx_ring.netbufs_ring, 0, 75 htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0])); 76 } 77 78 static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt) 79 { 80 return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32); 81 } 82 83 static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt) 84 { 85 return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64); 86 } 87 88 static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt, 89 void *vaddr) 90 { 91 htt->rx_ring.paddrs_ring_32 = vaddr; 92 } 93 94 static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt, 95 void *vaddr) 96 { 97 htt->rx_ring.paddrs_ring_64 = vaddr; 98 } 99 100 static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt, 101 dma_addr_t paddr, int idx) 102 { 103 htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr); 104 } 105 106 static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt, 107 dma_addr_t paddr, int idx) 108 { 109 htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr); 110 } 111 112 static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx) 113 { 114 htt->rx_ring.paddrs_ring_32[idx] = 0; 115 } 116 117 static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx) 118 { 119 htt->rx_ring.paddrs_ring_64[idx] = 0; 120 } 121 122 static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt) 123 { 124 return (void *)htt->rx_ring.paddrs_ring_32; 125 } 126 127 static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt) 128 { 129 return (void *)htt->rx_ring.paddrs_ring_64; 130 } 131 132 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) 133 { 134 struct ath10k_hw_params *hw = &htt->ar->hw_params; 135 struct htt_rx_desc *rx_desc; 136 struct ath10k_skb_rxcb *rxcb; 137 struct sk_buff *skb; 138 dma_addr_t paddr; 139 int ret = 0, idx; 140 141 /* The Full Rx Reorder firmware has no way of telling the host 142 * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring. 143 * To keep things simple make sure ring is always half empty. This 144 * guarantees there'll be no replenishment overruns possible. 145 */ 146 BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2); 147 148 idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); 149 150 if (idx < 0 || idx >= htt->rx_ring.size) { 151 ath10k_err(htt->ar, "rx ring index is not valid, firmware malfunctioning?\n"); 152 idx &= htt->rx_ring.size_mask; 153 ret = -ENOMEM; 154 goto fail; 155 } 156 157 while (num > 0) { 158 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN); 159 if (!skb) { 160 ret = -ENOMEM; 161 goto fail; 162 } 163 164 if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN)) 165 skb_pull(skb, 166 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) - 167 skb->data); 168 169 /* Clear rx_desc attention word before posting to Rx ring */ 170 rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, skb->data); 171 ath10k_htt_rx_desc_get_attention(hw, rx_desc)->flags = __cpu_to_le32(0); 172 173 paddr = dma_map_single(htt->ar->dev, skb->data, 174 skb->len + skb_tailroom(skb), 175 DMA_FROM_DEVICE); 176 177 if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) { 178 dev_kfree_skb_any(skb); 179 ret = -ENOMEM; 180 goto fail; 181 } 182 183 rxcb = ATH10K_SKB_RXCB(skb); 184 rxcb->paddr = paddr; 185 htt->rx_ring.netbufs_ring[idx] = skb; 186 ath10k_htt_set_paddrs_ring(htt, paddr, idx); 187 htt->rx_ring.fill_cnt++; 188 189 if (htt->rx_ring.in_ord_rx) { 190 hash_add(htt->rx_ring.skb_table, 191 &ATH10K_SKB_RXCB(skb)->hlist, 192 paddr); 193 } 194 195 num--; 196 idx++; 197 idx &= htt->rx_ring.size_mask; 198 } 199 200 fail: 201 /* 202 * Make sure the rx buffer is updated before available buffer 203 * index to avoid any potential rx ring corruption. 204 */ 205 mb(); 206 *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx); 207 return ret; 208 } 209 210 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) 211 { 212 lockdep_assert_held(&htt->rx_ring.lock); 213 return __ath10k_htt_rx_ring_fill_n(htt, num); 214 } 215 216 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt) 217 { 218 int ret, num_deficit, num_to_fill; 219 220 /* Refilling the whole RX ring buffer proves to be a bad idea. The 221 * reason is RX may take up significant amount of CPU cycles and starve 222 * other tasks, e.g. TX on an ethernet device while acting as a bridge 223 * with ath10k wlan interface. This ended up with very poor performance 224 * once CPU the host system was overwhelmed with RX on ath10k. 225 * 226 * By limiting the number of refills the replenishing occurs 227 * progressively. This in turns makes use of the fact tasklets are 228 * processed in FIFO order. This means actual RX processing can starve 229 * out refilling. If there's not enough buffers on RX ring FW will not 230 * report RX until it is refilled with enough buffers. This 231 * automatically balances load wrt to CPU power. 232 * 233 * This probably comes at a cost of lower maximum throughput but 234 * improves the average and stability. 235 */ 236 spin_lock_bh(&htt->rx_ring.lock); 237 num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt; 238 num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit); 239 num_deficit -= num_to_fill; 240 ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill); 241 if (ret == -ENOMEM) { 242 /* 243 * Failed to fill it to the desired level - 244 * we'll start a timer and try again next time. 245 * As long as enough buffers are left in the ring for 246 * another A-MPDU rx, no special recovery is needed. 247 */ 248 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + 249 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS)); 250 } else if (num_deficit > 0) { 251 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + 252 msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS)); 253 } 254 spin_unlock_bh(&htt->rx_ring.lock); 255 } 256 257 static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t) 258 { 259 struct ath10k_htt *htt = from_timer(htt, t, rx_ring.refill_retry_timer); 260 261 ath10k_htt_rx_msdu_buff_replenish(htt); 262 } 263 264 int ath10k_htt_rx_ring_refill(struct ath10k *ar) 265 { 266 struct ath10k_htt *htt = &ar->htt; 267 int ret; 268 269 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) 270 return 0; 271 272 spin_lock_bh(&htt->rx_ring.lock); 273 ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level - 274 htt->rx_ring.fill_cnt)); 275 276 if (ret) 277 ath10k_htt_rx_ring_free(htt); 278 279 spin_unlock_bh(&htt->rx_ring.lock); 280 281 return ret; 282 } 283 284 void ath10k_htt_rx_free(struct ath10k_htt *htt) 285 { 286 if (htt->ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) 287 return; 288 289 del_timer_sync(&htt->rx_ring.refill_retry_timer); 290 291 skb_queue_purge(&htt->rx_msdus_q); 292 skb_queue_purge(&htt->rx_in_ord_compl_q); 293 skb_queue_purge(&htt->tx_fetch_ind_q); 294 295 spin_lock_bh(&htt->rx_ring.lock); 296 ath10k_htt_rx_ring_free(htt); 297 spin_unlock_bh(&htt->rx_ring.lock); 298 299 dma_free_coherent(htt->ar->dev, 300 ath10k_htt_get_rx_ring_size(htt), 301 ath10k_htt_get_vaddr_ring(htt), 302 htt->rx_ring.base_paddr); 303 304 dma_free_coherent(htt->ar->dev, 305 sizeof(*htt->rx_ring.alloc_idx.vaddr), 306 htt->rx_ring.alloc_idx.vaddr, 307 htt->rx_ring.alloc_idx.paddr); 308 309 kfree(htt->rx_ring.netbufs_ring); 310 } 311 312 static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt) 313 { 314 struct ath10k *ar = htt->ar; 315 int idx; 316 struct sk_buff *msdu; 317 318 lockdep_assert_held(&htt->rx_ring.lock); 319 320 if (htt->rx_ring.fill_cnt == 0) { 321 ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n"); 322 return NULL; 323 } 324 325 idx = htt->rx_ring.sw_rd_idx.msdu_payld; 326 msdu = htt->rx_ring.netbufs_ring[idx]; 327 htt->rx_ring.netbufs_ring[idx] = NULL; 328 ath10k_htt_reset_paddrs_ring(htt, idx); 329 330 idx++; 331 idx &= htt->rx_ring.size_mask; 332 htt->rx_ring.sw_rd_idx.msdu_payld = idx; 333 htt->rx_ring.fill_cnt--; 334 335 dma_unmap_single(htt->ar->dev, 336 ATH10K_SKB_RXCB(msdu)->paddr, 337 msdu->len + skb_tailroom(msdu), 338 DMA_FROM_DEVICE); 339 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ", 340 msdu->data, msdu->len + skb_tailroom(msdu)); 341 342 return msdu; 343 } 344 345 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */ 346 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, 347 struct sk_buff_head *amsdu) 348 { 349 struct ath10k *ar = htt->ar; 350 struct ath10k_hw_params *hw = &ar->hw_params; 351 int msdu_len, msdu_chaining = 0; 352 struct sk_buff *msdu; 353 struct htt_rx_desc *rx_desc; 354 struct rx_attention *rx_desc_attention; 355 struct rx_frag_info_common *rx_desc_frag_info_common; 356 struct rx_msdu_start_common *rx_desc_msdu_start_common; 357 struct rx_msdu_end_common *rx_desc_msdu_end_common; 358 359 lockdep_assert_held(&htt->rx_ring.lock); 360 361 for (;;) { 362 int last_msdu, msdu_len_invalid, msdu_chained; 363 364 msdu = ath10k_htt_rx_netbuf_pop(htt); 365 if (!msdu) { 366 __skb_queue_purge(amsdu); 367 return -ENOENT; 368 } 369 370 __skb_queue_tail(amsdu, msdu); 371 372 rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data); 373 rx_desc_attention = ath10k_htt_rx_desc_get_attention(hw, rx_desc); 374 rx_desc_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, 375 rx_desc); 376 rx_desc_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rx_desc); 377 rx_desc_frag_info_common = ath10k_htt_rx_desc_get_frag_info(hw, rx_desc); 378 379 /* FIXME: we must report msdu payload since this is what caller 380 * expects now 381 */ 382 skb_put(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset); 383 skb_pull(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset); 384 385 /* 386 * Sanity check - confirm the HW is finished filling in the 387 * rx data. 388 * If the HW and SW are working correctly, then it's guaranteed 389 * that the HW's MAC DMA is done before this point in the SW. 390 * To prevent the case that we handle a stale Rx descriptor, 391 * just assert for now until we have a way to recover. 392 */ 393 if (!(__le32_to_cpu(rx_desc_attention->flags) 394 & RX_ATTENTION_FLAGS_MSDU_DONE)) { 395 __skb_queue_purge(amsdu); 396 return -EIO; 397 } 398 399 msdu_len_invalid = !!(__le32_to_cpu(rx_desc_attention->flags) 400 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR | 401 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR)); 402 msdu_len = MS(__le32_to_cpu(rx_desc_msdu_start_common->info0), 403 RX_MSDU_START_INFO0_MSDU_LENGTH); 404 msdu_chained = rx_desc_frag_info_common->ring2_more_count; 405 406 if (msdu_len_invalid) 407 msdu_len = 0; 408 409 skb_trim(msdu, 0); 410 skb_put(msdu, min(msdu_len, ath10k_htt_rx_msdu_size(hw))); 411 msdu_len -= msdu->len; 412 413 /* Note: Chained buffers do not contain rx descriptor */ 414 while (msdu_chained--) { 415 msdu = ath10k_htt_rx_netbuf_pop(htt); 416 if (!msdu) { 417 __skb_queue_purge(amsdu); 418 return -ENOENT; 419 } 420 421 __skb_queue_tail(amsdu, msdu); 422 skb_trim(msdu, 0); 423 skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE)); 424 msdu_len -= msdu->len; 425 msdu_chaining = 1; 426 } 427 428 last_msdu = __le32_to_cpu(rx_desc_msdu_end_common->info0) & 429 RX_MSDU_END_INFO0_LAST_MSDU; 430 431 /* FIXME: why are we skipping the first part of the rx_desc? */ 432 trace_ath10k_htt_rx_desc(ar, rx_desc + sizeof(u32), 433 hw->rx_desc_ops->rx_desc_size - sizeof(u32)); 434 435 if (last_msdu) 436 break; 437 } 438 439 if (skb_queue_empty(amsdu)) 440 msdu_chaining = -1; 441 442 /* 443 * Don't refill the ring yet. 444 * 445 * First, the elements popped here are still in use - it is not 446 * safe to overwrite them until the matching call to 447 * mpdu_desc_list_next. Second, for efficiency it is preferable to 448 * refill the rx ring with 1 PPDU's worth of rx buffers (something 449 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers 450 * (something like 3 buffers). Consequently, we'll rely on the txrx 451 * SW to tell us when it is done pulling all the PPDU's rx buffers 452 * out of the rx ring, and then refill it just once. 453 */ 454 455 return msdu_chaining; 456 } 457 458 static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt, 459 u64 paddr) 460 { 461 struct ath10k *ar = htt->ar; 462 struct ath10k_skb_rxcb *rxcb; 463 struct sk_buff *msdu; 464 465 lockdep_assert_held(&htt->rx_ring.lock); 466 467 msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr); 468 if (!msdu) 469 return NULL; 470 471 rxcb = ATH10K_SKB_RXCB(msdu); 472 hash_del(&rxcb->hlist); 473 htt->rx_ring.fill_cnt--; 474 475 dma_unmap_single(htt->ar->dev, rxcb->paddr, 476 msdu->len + skb_tailroom(msdu), 477 DMA_FROM_DEVICE); 478 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ", 479 msdu->data, msdu->len + skb_tailroom(msdu)); 480 481 return msdu; 482 } 483 484 static inline void ath10k_htt_append_frag_list(struct sk_buff *skb_head, 485 struct sk_buff *frag_list, 486 unsigned int frag_len) 487 { 488 skb_shinfo(skb_head)->frag_list = frag_list; 489 skb_head->data_len = frag_len; 490 skb_head->len += skb_head->data_len; 491 } 492 493 static int ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt *htt, 494 struct sk_buff *msdu, 495 struct htt_rx_in_ord_msdu_desc **msdu_desc) 496 { 497 struct ath10k *ar = htt->ar; 498 struct ath10k_hw_params *hw = &ar->hw_params; 499 u32 paddr; 500 struct sk_buff *frag_buf; 501 struct sk_buff *prev_frag_buf; 502 u8 last_frag; 503 struct htt_rx_in_ord_msdu_desc *ind_desc = *msdu_desc; 504 struct htt_rx_desc *rxd; 505 int amsdu_len = __le16_to_cpu(ind_desc->msdu_len); 506 507 rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data); 508 trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size); 509 510 skb_put(msdu, hw->rx_desc_ops->rx_desc_size); 511 skb_pull(msdu, hw->rx_desc_ops->rx_desc_size); 512 skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw))); 513 amsdu_len -= msdu->len; 514 515 last_frag = ind_desc->reserved; 516 if (last_frag) { 517 if (amsdu_len) { 518 ath10k_warn(ar, "invalid amsdu len %u, left %d", 519 __le16_to_cpu(ind_desc->msdu_len), 520 amsdu_len); 521 } 522 return 0; 523 } 524 525 ind_desc++; 526 paddr = __le32_to_cpu(ind_desc->msdu_paddr); 527 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr); 528 if (!frag_buf) { 529 ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%x", paddr); 530 return -ENOENT; 531 } 532 533 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE)); 534 ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len); 535 536 amsdu_len -= frag_buf->len; 537 prev_frag_buf = frag_buf; 538 last_frag = ind_desc->reserved; 539 while (!last_frag) { 540 ind_desc++; 541 paddr = __le32_to_cpu(ind_desc->msdu_paddr); 542 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr); 543 if (!frag_buf) { 544 ath10k_warn(ar, "failed to pop frag-n paddr: 0x%x", 545 paddr); 546 prev_frag_buf->next = NULL; 547 return -ENOENT; 548 } 549 550 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE)); 551 last_frag = ind_desc->reserved; 552 amsdu_len -= frag_buf->len; 553 554 prev_frag_buf->next = frag_buf; 555 prev_frag_buf = frag_buf; 556 } 557 558 if (amsdu_len) { 559 ath10k_warn(ar, "invalid amsdu len %u, left %d", 560 __le16_to_cpu(ind_desc->msdu_len), amsdu_len); 561 } 562 563 *msdu_desc = ind_desc; 564 565 prev_frag_buf->next = NULL; 566 return 0; 567 } 568 569 static int 570 ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt *htt, 571 struct sk_buff *msdu, 572 struct htt_rx_in_ord_msdu_desc_ext **msdu_desc) 573 { 574 struct ath10k *ar = htt->ar; 575 struct ath10k_hw_params *hw = &ar->hw_params; 576 u64 paddr; 577 struct sk_buff *frag_buf; 578 struct sk_buff *prev_frag_buf; 579 u8 last_frag; 580 struct htt_rx_in_ord_msdu_desc_ext *ind_desc = *msdu_desc; 581 struct htt_rx_desc *rxd; 582 int amsdu_len = __le16_to_cpu(ind_desc->msdu_len); 583 584 rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data); 585 trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size); 586 587 skb_put(msdu, hw->rx_desc_ops->rx_desc_size); 588 skb_pull(msdu, hw->rx_desc_ops->rx_desc_size); 589 skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw))); 590 amsdu_len -= msdu->len; 591 592 last_frag = ind_desc->reserved; 593 if (last_frag) { 594 if (amsdu_len) { 595 ath10k_warn(ar, "invalid amsdu len %u, left %d", 596 __le16_to_cpu(ind_desc->msdu_len), 597 amsdu_len); 598 } 599 return 0; 600 } 601 602 ind_desc++; 603 paddr = __le64_to_cpu(ind_desc->msdu_paddr); 604 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr); 605 if (!frag_buf) { 606 #if defined(__linux__) 607 ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%llx", paddr); 608 #elif defined(__FreeBSD__) 609 ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%jx", (uintmax_t)paddr); 610 #endif 611 return -ENOENT; 612 } 613 614 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE)); 615 ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len); 616 617 amsdu_len -= frag_buf->len; 618 prev_frag_buf = frag_buf; 619 last_frag = ind_desc->reserved; 620 while (!last_frag) { 621 ind_desc++; 622 paddr = __le64_to_cpu(ind_desc->msdu_paddr); 623 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr); 624 if (!frag_buf) { 625 #if defined(__linux__) 626 ath10k_warn(ar, "failed to pop frag-n paddr: 0x%llx", 627 paddr); 628 #elif defined(__FreeBSD__) 629 ath10k_warn(ar, "failed to pop frag-n paddr: 0x%jx", 630 (uintmax_t)paddr); 631 #endif 632 prev_frag_buf->next = NULL; 633 return -ENOENT; 634 } 635 636 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE)); 637 last_frag = ind_desc->reserved; 638 amsdu_len -= frag_buf->len; 639 640 prev_frag_buf->next = frag_buf; 641 prev_frag_buf = frag_buf; 642 } 643 644 if (amsdu_len) { 645 ath10k_warn(ar, "invalid amsdu len %u, left %d", 646 __le16_to_cpu(ind_desc->msdu_len), amsdu_len); 647 } 648 649 *msdu_desc = ind_desc; 650 651 prev_frag_buf->next = NULL; 652 return 0; 653 } 654 655 static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt, 656 struct htt_rx_in_ord_ind *ev, 657 struct sk_buff_head *list) 658 { 659 struct ath10k *ar = htt->ar; 660 struct ath10k_hw_params *hw = &ar->hw_params; 661 struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32; 662 struct htt_rx_desc *rxd; 663 struct rx_attention *rxd_attention; 664 struct sk_buff *msdu; 665 int msdu_count, ret; 666 bool is_offload; 667 u32 paddr; 668 669 lockdep_assert_held(&htt->rx_ring.lock); 670 671 msdu_count = __le16_to_cpu(ev->msdu_count); 672 is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK); 673 674 while (msdu_count--) { 675 paddr = __le32_to_cpu(msdu_desc->msdu_paddr); 676 677 msdu = ath10k_htt_rx_pop_paddr(htt, paddr); 678 if (!msdu) { 679 __skb_queue_purge(list); 680 return -ENOENT; 681 } 682 683 if (!is_offload && ar->monitor_arvif) { 684 ret = ath10k_htt_rx_handle_amsdu_mon_32(htt, msdu, 685 &msdu_desc); 686 if (ret) { 687 __skb_queue_purge(list); 688 return ret; 689 } 690 __skb_queue_tail(list, msdu); 691 msdu_desc++; 692 continue; 693 } 694 695 __skb_queue_tail(list, msdu); 696 697 if (!is_offload) { 698 rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data); 699 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); 700 701 trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size); 702 703 skb_put(msdu, hw->rx_desc_ops->rx_desc_size); 704 skb_pull(msdu, hw->rx_desc_ops->rx_desc_size); 705 skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len)); 706 707 if (!(__le32_to_cpu(rxd_attention->flags) & 708 RX_ATTENTION_FLAGS_MSDU_DONE)) { 709 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n"); 710 return -EIO; 711 } 712 } 713 714 msdu_desc++; 715 } 716 717 return 0; 718 } 719 720 static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt, 721 struct htt_rx_in_ord_ind *ev, 722 struct sk_buff_head *list) 723 { 724 struct ath10k *ar = htt->ar; 725 struct ath10k_hw_params *hw = &ar->hw_params; 726 struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64; 727 struct htt_rx_desc *rxd; 728 struct rx_attention *rxd_attention; 729 struct sk_buff *msdu; 730 int msdu_count, ret; 731 bool is_offload; 732 u64 paddr; 733 734 lockdep_assert_held(&htt->rx_ring.lock); 735 736 msdu_count = __le16_to_cpu(ev->msdu_count); 737 is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK); 738 739 while (msdu_count--) { 740 paddr = __le64_to_cpu(msdu_desc->msdu_paddr); 741 msdu = ath10k_htt_rx_pop_paddr(htt, paddr); 742 if (!msdu) { 743 __skb_queue_purge(list); 744 return -ENOENT; 745 } 746 747 if (!is_offload && ar->monitor_arvif) { 748 ret = ath10k_htt_rx_handle_amsdu_mon_64(htt, msdu, 749 &msdu_desc); 750 if (ret) { 751 __skb_queue_purge(list); 752 return ret; 753 } 754 __skb_queue_tail(list, msdu); 755 msdu_desc++; 756 continue; 757 } 758 759 __skb_queue_tail(list, msdu); 760 761 if (!is_offload) { 762 rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data); 763 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); 764 765 trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size); 766 767 skb_put(msdu, hw->rx_desc_ops->rx_desc_size); 768 skb_pull(msdu, hw->rx_desc_ops->rx_desc_size); 769 skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len)); 770 771 if (!(__le32_to_cpu(rxd_attention->flags) & 772 RX_ATTENTION_FLAGS_MSDU_DONE)) { 773 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n"); 774 return -EIO; 775 } 776 } 777 778 msdu_desc++; 779 } 780 781 return 0; 782 } 783 784 int ath10k_htt_rx_alloc(struct ath10k_htt *htt) 785 { 786 struct ath10k *ar = htt->ar; 787 dma_addr_t paddr; 788 void *vaddr, *vaddr_ring; 789 size_t size; 790 struct timer_list *timer = &htt->rx_ring.refill_retry_timer; 791 792 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) 793 return 0; 794 795 htt->rx_confused = false; 796 797 /* XXX: The fill level could be changed during runtime in response to 798 * the host processing latency. Is this really worth it? 799 */ 800 htt->rx_ring.size = HTT_RX_RING_SIZE; 801 htt->rx_ring.size_mask = htt->rx_ring.size - 1; 802 htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level; 803 804 if (!is_power_of_2(htt->rx_ring.size)) { 805 ath10k_warn(ar, "htt rx ring size is not power of 2\n"); 806 return -EINVAL; 807 } 808 809 htt->rx_ring.netbufs_ring = 810 kcalloc(htt->rx_ring.size, sizeof(struct sk_buff *), 811 GFP_KERNEL); 812 if (!htt->rx_ring.netbufs_ring) 813 goto err_netbuf; 814 815 size = ath10k_htt_get_rx_ring_size(htt); 816 817 vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL); 818 if (!vaddr_ring) 819 goto err_dma_ring; 820 821 ath10k_htt_config_paddrs_ring(htt, vaddr_ring); 822 htt->rx_ring.base_paddr = paddr; 823 824 vaddr = dma_alloc_coherent(htt->ar->dev, 825 sizeof(*htt->rx_ring.alloc_idx.vaddr), 826 &paddr, GFP_KERNEL); 827 if (!vaddr) 828 goto err_dma_idx; 829 830 htt->rx_ring.alloc_idx.vaddr = vaddr; 831 htt->rx_ring.alloc_idx.paddr = paddr; 832 htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask; 833 *htt->rx_ring.alloc_idx.vaddr = 0; 834 835 /* Initialize the Rx refill retry timer */ 836 timer_setup(timer, ath10k_htt_rx_ring_refill_retry, 0); 837 838 spin_lock_init(&htt->rx_ring.lock); 839 #if defined(__FreeBSD__) 840 spin_lock_init(&htt->tx_fetch_ind_q.lock); 841 #endif 842 843 htt->rx_ring.fill_cnt = 0; 844 htt->rx_ring.sw_rd_idx.msdu_payld = 0; 845 hash_init(htt->rx_ring.skb_table); 846 847 skb_queue_head_init(&htt->rx_msdus_q); 848 skb_queue_head_init(&htt->rx_in_ord_compl_q); 849 skb_queue_head_init(&htt->tx_fetch_ind_q); 850 atomic_set(&htt->num_mpdus_ready, 0); 851 852 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n", 853 htt->rx_ring.size, htt->rx_ring.fill_level); 854 return 0; 855 856 err_dma_idx: 857 dma_free_coherent(htt->ar->dev, 858 ath10k_htt_get_rx_ring_size(htt), 859 vaddr_ring, 860 htt->rx_ring.base_paddr); 861 err_dma_ring: 862 kfree(htt->rx_ring.netbufs_ring); 863 err_netbuf: 864 return -ENOMEM; 865 } 866 867 static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar, 868 enum htt_rx_mpdu_encrypt_type type) 869 { 870 switch (type) { 871 case HTT_RX_MPDU_ENCRYPT_NONE: 872 return 0; 873 case HTT_RX_MPDU_ENCRYPT_WEP40: 874 case HTT_RX_MPDU_ENCRYPT_WEP104: 875 return IEEE80211_WEP_IV_LEN; 876 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: 877 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: 878 return IEEE80211_TKIP_IV_LEN; 879 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: 880 return IEEE80211_CCMP_HDR_LEN; 881 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: 882 return IEEE80211_CCMP_256_HDR_LEN; 883 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2: 884 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2: 885 return IEEE80211_GCMP_HDR_LEN; 886 case HTT_RX_MPDU_ENCRYPT_WEP128: 887 case HTT_RX_MPDU_ENCRYPT_WAPI: 888 break; 889 } 890 891 ath10k_warn(ar, "unsupported encryption type %d\n", type); 892 return 0; 893 } 894 895 #define MICHAEL_MIC_LEN 8 896 897 static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar, 898 enum htt_rx_mpdu_encrypt_type type) 899 { 900 switch (type) { 901 case HTT_RX_MPDU_ENCRYPT_NONE: 902 case HTT_RX_MPDU_ENCRYPT_WEP40: 903 case HTT_RX_MPDU_ENCRYPT_WEP104: 904 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: 905 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: 906 return 0; 907 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: 908 return IEEE80211_CCMP_MIC_LEN; 909 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: 910 return IEEE80211_CCMP_256_MIC_LEN; 911 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2: 912 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2: 913 return IEEE80211_GCMP_MIC_LEN; 914 case HTT_RX_MPDU_ENCRYPT_WEP128: 915 case HTT_RX_MPDU_ENCRYPT_WAPI: 916 break; 917 } 918 919 ath10k_warn(ar, "unsupported encryption type %d\n", type); 920 return 0; 921 } 922 923 static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar, 924 enum htt_rx_mpdu_encrypt_type type) 925 { 926 switch (type) { 927 case HTT_RX_MPDU_ENCRYPT_NONE: 928 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: 929 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: 930 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2: 931 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2: 932 return 0; 933 case HTT_RX_MPDU_ENCRYPT_WEP40: 934 case HTT_RX_MPDU_ENCRYPT_WEP104: 935 return IEEE80211_WEP_ICV_LEN; 936 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: 937 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: 938 return IEEE80211_TKIP_ICV_LEN; 939 case HTT_RX_MPDU_ENCRYPT_WEP128: 940 case HTT_RX_MPDU_ENCRYPT_WAPI: 941 break; 942 } 943 944 ath10k_warn(ar, "unsupported encryption type %d\n", type); 945 return 0; 946 } 947 948 struct amsdu_subframe_hdr { 949 u8 dst[ETH_ALEN]; 950 u8 src[ETH_ALEN]; 951 __be16 len; 952 } __packed; 953 954 #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63) 955 956 static inline u8 ath10k_bw_to_mac80211_bw(u8 bw) 957 { 958 u8 ret = 0; 959 960 switch (bw) { 961 case 0: 962 ret = RATE_INFO_BW_20; 963 break; 964 case 1: 965 ret = RATE_INFO_BW_40; 966 break; 967 case 2: 968 ret = RATE_INFO_BW_80; 969 break; 970 case 3: 971 ret = RATE_INFO_BW_160; 972 break; 973 } 974 975 return ret; 976 } 977 978 static void ath10k_htt_rx_h_rates(struct ath10k *ar, 979 struct ieee80211_rx_status *status, 980 struct htt_rx_desc *rxd) 981 { 982 struct ath10k_hw_params *hw = &ar->hw_params; 983 struct rx_attention *rxd_attention; 984 struct rx_mpdu_start *rxd_mpdu_start; 985 struct rx_mpdu_end *rxd_mpdu_end; 986 struct rx_msdu_start_common *rxd_msdu_start_common; 987 struct rx_msdu_end_common *rxd_msdu_end_common; 988 struct rx_ppdu_start *rxd_ppdu_start; 989 struct ieee80211_supported_band *sband; 990 u8 cck, rate, bw, sgi, mcs, nss; 991 u8 *rxd_msdu_payload; 992 u8 preamble = 0; 993 u8 group_id; 994 u32 info1, info2, info3; 995 u32 stbc, nsts_su; 996 997 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); 998 rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd); 999 rxd_mpdu_end = ath10k_htt_rx_desc_get_mpdu_end(hw, rxd); 1000 rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd); 1001 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd); 1002 rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd); 1003 rxd_msdu_payload = ath10k_htt_rx_desc_get_msdu_payload(hw, rxd); 1004 1005 info1 = __le32_to_cpu(rxd_ppdu_start->info1); 1006 info2 = __le32_to_cpu(rxd_ppdu_start->info2); 1007 info3 = __le32_to_cpu(rxd_ppdu_start->info3); 1008 1009 preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE); 1010 1011 switch (preamble) { 1012 case HTT_RX_LEGACY: 1013 /* To get legacy rate index band is required. Since band can't 1014 * be undefined check if freq is non-zero. 1015 */ 1016 if (!status->freq) 1017 return; 1018 1019 cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT; 1020 rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE); 1021 rate &= ~RX_PPDU_START_RATE_FLAG; 1022 1023 sband = &ar->mac.sbands[status->band]; 1024 status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck); 1025 break; 1026 case HTT_RX_HT: 1027 case HTT_RX_HT_WITH_TXBF: 1028 /* HT-SIG - Table 20-11 in info2 and info3 */ 1029 mcs = info2 & 0x1F; 1030 nss = mcs >> 3; 1031 bw = (info2 >> 7) & 1; 1032 sgi = (info3 >> 7) & 1; 1033 1034 status->rate_idx = mcs; 1035 status->encoding = RX_ENC_HT; 1036 if (sgi) 1037 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 1038 if (bw) 1039 status->bw = RATE_INFO_BW_40; 1040 break; 1041 case HTT_RX_VHT: 1042 case HTT_RX_VHT_WITH_TXBF: 1043 /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3 1044 * TODO check this 1045 */ 1046 bw = info2 & 3; 1047 sgi = info3 & 1; 1048 stbc = (info2 >> 3) & 1; 1049 group_id = (info2 >> 4) & 0x3F; 1050 1051 if (GROUP_ID_IS_SU_MIMO(group_id)) { 1052 mcs = (info3 >> 4) & 0x0F; 1053 nsts_su = ((info2 >> 10) & 0x07); 1054 if (stbc) 1055 nss = (nsts_su >> 2) + 1; 1056 else 1057 nss = (nsts_su + 1); 1058 } else { 1059 /* Hardware doesn't decode VHT-SIG-B into Rx descriptor 1060 * so it's impossible to decode MCS. Also since 1061 * firmware consumes Group Id Management frames host 1062 * has no knowledge regarding group/user position 1063 * mapping so it's impossible to pick the correct Nsts 1064 * from VHT-SIG-A1. 1065 * 1066 * Bandwidth and SGI are valid so report the rateinfo 1067 * on best-effort basis. 1068 */ 1069 mcs = 0; 1070 nss = 1; 1071 } 1072 1073 if (mcs > 0x09) { 1074 ath10k_warn(ar, "invalid MCS received %u\n", mcs); 1075 ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n", 1076 __le32_to_cpu(rxd_attention->flags), 1077 __le32_to_cpu(rxd_mpdu_start->info0), 1078 __le32_to_cpu(rxd_mpdu_start->info1), 1079 __le32_to_cpu(rxd_msdu_start_common->info0), 1080 __le32_to_cpu(rxd_msdu_start_common->info1), 1081 rxd_ppdu_start->info0, 1082 __le32_to_cpu(rxd_ppdu_start->info1), 1083 __le32_to_cpu(rxd_ppdu_start->info2), 1084 __le32_to_cpu(rxd_ppdu_start->info3), 1085 __le32_to_cpu(rxd_ppdu_start->info4)); 1086 1087 ath10k_warn(ar, "msdu end %08x mpdu end %08x\n", 1088 __le32_to_cpu(rxd_msdu_end_common->info0), 1089 __le32_to_cpu(rxd_mpdu_end->info0)); 1090 1091 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, 1092 "rx desc msdu payload: ", 1093 rxd_msdu_payload, 50); 1094 } 1095 1096 status->rate_idx = mcs; 1097 status->nss = nss; 1098 1099 if (sgi) 1100 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 1101 1102 status->bw = ath10k_bw_to_mac80211_bw(bw); 1103 status->encoding = RX_ENC_VHT; 1104 break; 1105 default: 1106 break; 1107 } 1108 } 1109 1110 static struct ieee80211_channel * 1111 ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd) 1112 { 1113 struct ath10k_hw_params *hw = &ar->hw_params; 1114 struct rx_attention *rxd_attention; 1115 struct rx_msdu_end_common *rxd_msdu_end_common; 1116 struct rx_mpdu_start *rxd_mpdu_start; 1117 struct ath10k_peer *peer; 1118 struct ath10k_vif *arvif; 1119 struct cfg80211_chan_def def; 1120 u16 peer_id; 1121 1122 lockdep_assert_held(&ar->data_lock); 1123 1124 if (!rxd) 1125 return NULL; 1126 1127 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); 1128 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd); 1129 rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd); 1130 1131 if (rxd_attention->flags & 1132 __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID)) 1133 return NULL; 1134 1135 if (!(rxd_msdu_end_common->info0 & 1136 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU))) 1137 return NULL; 1138 1139 peer_id = MS(__le32_to_cpu(rxd_mpdu_start->info0), 1140 RX_MPDU_START_INFO0_PEER_IDX); 1141 1142 peer = ath10k_peer_find_by_id(ar, peer_id); 1143 if (!peer) 1144 return NULL; 1145 1146 arvif = ath10k_get_arvif(ar, peer->vdev_id); 1147 if (WARN_ON_ONCE(!arvif)) 1148 return NULL; 1149 1150 if (ath10k_mac_vif_chan(arvif->vif, &def)) 1151 return NULL; 1152 1153 return def.chan; 1154 } 1155 1156 static struct ieee80211_channel * 1157 ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id) 1158 { 1159 struct ath10k_vif *arvif; 1160 struct cfg80211_chan_def def; 1161 1162 lockdep_assert_held(&ar->data_lock); 1163 1164 list_for_each_entry(arvif, &ar->arvifs, list) { 1165 if (arvif->vdev_id == vdev_id && 1166 ath10k_mac_vif_chan(arvif->vif, &def) == 0) 1167 return def.chan; 1168 } 1169 1170 return NULL; 1171 } 1172 1173 static void 1174 ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw, 1175 struct ieee80211_chanctx_conf *conf, 1176 void *data) 1177 { 1178 struct cfg80211_chan_def *def = data; 1179 1180 *def = conf->def; 1181 } 1182 1183 static struct ieee80211_channel * 1184 ath10k_htt_rx_h_any_channel(struct ath10k *ar) 1185 { 1186 struct cfg80211_chan_def def = {}; 1187 1188 ieee80211_iter_chan_contexts_atomic(ar->hw, 1189 ath10k_htt_rx_h_any_chan_iter, 1190 &def); 1191 1192 return def.chan; 1193 } 1194 1195 static bool ath10k_htt_rx_h_channel(struct ath10k *ar, 1196 struct ieee80211_rx_status *status, 1197 struct htt_rx_desc *rxd, 1198 u32 vdev_id) 1199 { 1200 struct ieee80211_channel *ch; 1201 1202 spin_lock_bh(&ar->data_lock); 1203 ch = ar->scan_channel; 1204 if (!ch) 1205 ch = ar->rx_channel; 1206 if (!ch) 1207 ch = ath10k_htt_rx_h_peer_channel(ar, rxd); 1208 if (!ch) 1209 ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id); 1210 if (!ch) 1211 ch = ath10k_htt_rx_h_any_channel(ar); 1212 if (!ch) 1213 ch = ar->tgt_oper_chan; 1214 spin_unlock_bh(&ar->data_lock); 1215 1216 if (!ch) 1217 return false; 1218 1219 status->band = ch->band; 1220 status->freq = ch->center_freq; 1221 1222 return true; 1223 } 1224 1225 static void ath10k_htt_rx_h_signal(struct ath10k *ar, 1226 struct ieee80211_rx_status *status, 1227 struct htt_rx_desc *rxd) 1228 { 1229 struct ath10k_hw_params *hw = &ar->hw_params; 1230 struct rx_ppdu_start *rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd); 1231 int i; 1232 1233 for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) { 1234 status->chains &= ~BIT(i); 1235 1236 if (rxd_ppdu_start->rssi_chains[i].pri20_mhz != 0x80) { 1237 status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR + 1238 rxd_ppdu_start->rssi_chains[i].pri20_mhz; 1239 1240 status->chains |= BIT(i); 1241 } 1242 } 1243 1244 /* FIXME: Get real NF */ 1245 status->signal = ATH10K_DEFAULT_NOISE_FLOOR + 1246 rxd_ppdu_start->rssi_comb; 1247 status->flag &= ~RX_FLAG_NO_SIGNAL_VAL; 1248 } 1249 1250 static void ath10k_htt_rx_h_mactime(struct ath10k *ar, 1251 struct ieee80211_rx_status *status, 1252 struct htt_rx_desc *rxd) 1253 { 1254 struct ath10k_hw_params *hw = &ar->hw_params; 1255 struct rx_ppdu_end_common *rxd_ppdu_end_common; 1256 1257 rxd_ppdu_end_common = ath10k_htt_rx_desc_get_ppdu_end(hw, rxd); 1258 1259 /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This 1260 * means all prior MSDUs in a PPDU are reported to mac80211 without the 1261 * TSF. Is it worth holding frames until end of PPDU is known? 1262 * 1263 * FIXME: Can we get/compute 64bit TSF? 1264 */ 1265 status->mactime = __le32_to_cpu(rxd_ppdu_end_common->tsf_timestamp); 1266 status->flag |= RX_FLAG_MACTIME_END; 1267 } 1268 1269 static void ath10k_htt_rx_h_ppdu(struct ath10k *ar, 1270 struct sk_buff_head *amsdu, 1271 struct ieee80211_rx_status *status, 1272 u32 vdev_id) 1273 { 1274 struct sk_buff *first; 1275 struct ath10k_hw_params *hw = &ar->hw_params; 1276 struct htt_rx_desc *rxd; 1277 struct rx_attention *rxd_attention; 1278 bool is_first_ppdu; 1279 bool is_last_ppdu; 1280 1281 if (skb_queue_empty(amsdu)) 1282 return; 1283 1284 first = skb_peek(amsdu); 1285 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 1286 #if defined(__linux__) 1287 (void *)first->data - hw->rx_desc_ops->rx_desc_size); 1288 #elif defined(__FreeBSD__) 1289 (u8 *)first->data - hw->rx_desc_ops->rx_desc_size); 1290 #endif 1291 1292 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); 1293 1294 is_first_ppdu = !!(rxd_attention->flags & 1295 __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU)); 1296 is_last_ppdu = !!(rxd_attention->flags & 1297 __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU)); 1298 1299 if (is_first_ppdu) { 1300 /* New PPDU starts so clear out the old per-PPDU status. */ 1301 status->freq = 0; 1302 status->rate_idx = 0; 1303 status->nss = 0; 1304 status->encoding = RX_ENC_LEGACY; 1305 status->bw = RATE_INFO_BW_20; 1306 1307 status->flag &= ~RX_FLAG_MACTIME_END; 1308 status->flag |= RX_FLAG_NO_SIGNAL_VAL; 1309 1310 status->flag &= ~(RX_FLAG_AMPDU_IS_LAST); 1311 status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN; 1312 status->ampdu_reference = ar->ampdu_reference; 1313 1314 ath10k_htt_rx_h_signal(ar, status, rxd); 1315 ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id); 1316 ath10k_htt_rx_h_rates(ar, status, rxd); 1317 } 1318 1319 if (is_last_ppdu) { 1320 ath10k_htt_rx_h_mactime(ar, status, rxd); 1321 1322 /* set ampdu last segment flag */ 1323 status->flag |= RX_FLAG_AMPDU_IS_LAST; 1324 ar->ampdu_reference++; 1325 } 1326 } 1327 1328 static const char * const tid_to_ac[] = { 1329 "BE", 1330 "BK", 1331 "BK", 1332 "BE", 1333 "VI", 1334 "VI", 1335 "VO", 1336 "VO", 1337 }; 1338 1339 static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size) 1340 { 1341 u8 *qc; 1342 int tid; 1343 1344 if (!ieee80211_is_data_qos(hdr->frame_control)) 1345 return ""; 1346 1347 qc = ieee80211_get_qos_ctl(hdr); 1348 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 1349 if (tid < 8) 1350 snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]); 1351 else 1352 snprintf(out, size, "tid %d", tid); 1353 1354 return out; 1355 } 1356 1357 static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar, 1358 struct ieee80211_rx_status *rx_status, 1359 struct sk_buff *skb) 1360 { 1361 struct ieee80211_rx_status *status; 1362 1363 status = IEEE80211_SKB_RXCB(skb); 1364 *status = *rx_status; 1365 1366 skb_queue_tail(&ar->htt.rx_msdus_q, skb); 1367 } 1368 1369 static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb) 1370 { 1371 struct ieee80211_rx_status *status; 1372 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1373 char tid[32]; 1374 1375 status = IEEE80211_SKB_RXCB(skb); 1376 1377 if (!(ar->filter_flags & FIF_FCSFAIL) && 1378 status->flag & RX_FLAG_FAILED_FCS_CRC) { 1379 ar->stats.rx_crc_err_drop++; 1380 dev_kfree_skb_any(skb); 1381 return; 1382 } 1383 1384 ath10k_dbg(ar, ATH10K_DBG_DATA, 1385 "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", 1386 skb, 1387 skb->len, 1388 ieee80211_get_SA(hdr), 1389 ath10k_get_tid(hdr, tid, sizeof(tid)), 1390 is_multicast_ether_addr(ieee80211_get_DA(hdr)) ? 1391 "mcast" : "ucast", 1392 (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4, 1393 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", 1394 (status->encoding == RX_ENC_HT) ? "ht" : "", 1395 (status->encoding == RX_ENC_VHT) ? "vht" : "", 1396 (status->bw == RATE_INFO_BW_40) ? "40" : "", 1397 (status->bw == RATE_INFO_BW_80) ? "80" : "", 1398 (status->bw == RATE_INFO_BW_160) ? "160" : "", 1399 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", 1400 status->rate_idx, 1401 status->nss, 1402 status->freq, 1403 status->band, status->flag, 1404 !!(status->flag & RX_FLAG_FAILED_FCS_CRC), 1405 !!(status->flag & RX_FLAG_MMIC_ERROR), 1406 !!(status->flag & RX_FLAG_AMSDU_MORE)); 1407 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ", 1408 skb->data, skb->len); 1409 trace_ath10k_rx_hdr(ar, skb->data, skb->len); 1410 trace_ath10k_rx_payload(ar, skb->data, skb->len); 1411 1412 ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi); 1413 } 1414 1415 static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar, 1416 struct ieee80211_hdr *hdr) 1417 { 1418 int len = ieee80211_hdrlen(hdr->frame_control); 1419 1420 if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING, 1421 ar->running_fw->fw_file.fw_features)) 1422 len = round_up(len, 4); 1423 1424 return len; 1425 } 1426 1427 static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar, 1428 struct sk_buff *msdu, 1429 struct ieee80211_rx_status *status, 1430 enum htt_rx_mpdu_encrypt_type enctype, 1431 bool is_decrypted, 1432 const u8 first_hdr[64]) 1433 { 1434 struct ieee80211_hdr *hdr; 1435 struct ath10k_hw_params *hw = &ar->hw_params; 1436 struct htt_rx_desc *rxd; 1437 struct rx_msdu_end_common *rxd_msdu_end_common; 1438 size_t hdr_len; 1439 size_t crypto_len; 1440 bool is_first; 1441 bool is_last; 1442 bool msdu_limit_err; 1443 int bytes_aligned = ar->hw_params.decap_align_bytes; 1444 u8 *qos; 1445 1446 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 1447 #if defined(__linux__) 1448 (void *)msdu->data - hw->rx_desc_ops->rx_desc_size); 1449 #elif defined(__FreeBSD__) 1450 (u8 *)msdu->data - hw->rx_desc_ops->rx_desc_size); 1451 #endif 1452 1453 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd); 1454 is_first = !!(rxd_msdu_end_common->info0 & 1455 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); 1456 is_last = !!(rxd_msdu_end_common->info0 & 1457 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); 1458 1459 /* Delivered decapped frame: 1460 * [802.11 header] 1461 * [crypto param] <-- can be trimmed if !fcs_err && 1462 * !decrypt_err && !peer_idx_invalid 1463 * [amsdu header] <-- only if A-MSDU 1464 * [rfc1042/llc] 1465 * [payload] 1466 * [FCS] <-- at end, needs to be trimmed 1467 */ 1468 1469 /* Some hardwares(QCA99x0 variants) limit number of msdus in a-msdu when 1470 * deaggregate, so that unwanted MSDU-deaggregation is avoided for 1471 * error packets. If limit exceeds, hw sends all remaining MSDUs as 1472 * a single last MSDU with this msdu limit error set. 1473 */ 1474 msdu_limit_err = ath10k_htt_rx_desc_msdu_limit_error(hw, rxd); 1475 1476 /* If MSDU limit error happens, then don't warn on, the partial raw MSDU 1477 * without first MSDU is expected in that case, and handled later here. 1478 */ 1479 /* This probably shouldn't happen but warn just in case */ 1480 if (WARN_ON_ONCE(!is_first && !msdu_limit_err)) 1481 return; 1482 1483 /* This probably shouldn't happen but warn just in case */ 1484 if (WARN_ON_ONCE(!(is_first && is_last) && !msdu_limit_err)) 1485 return; 1486 1487 skb_trim(msdu, msdu->len - FCS_LEN); 1488 1489 /* Push original 80211 header */ 1490 if (unlikely(msdu_limit_err)) { 1491 #if defined(__linux__) 1492 hdr = (struct ieee80211_hdr *)first_hdr; 1493 #elif defined(__FreeBSD__) 1494 hdr = __DECONST(struct ieee80211_hdr *, first_hdr); 1495 #endif 1496 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1497 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); 1498 1499 if (ieee80211_is_data_qos(hdr->frame_control)) { 1500 qos = ieee80211_get_qos_ctl(hdr); 1501 qos[0] |= IEEE80211_QOS_CTL_A_MSDU_PRESENT; 1502 } 1503 1504 if (crypto_len) 1505 memcpy(skb_push(msdu, crypto_len), 1506 #if defined(__linux__) 1507 (void *)hdr + round_up(hdr_len, bytes_aligned), 1508 #elif defined(__FreeBSD__) 1509 (u8 *)hdr + round_up(hdr_len, bytes_aligned), 1510 #endif 1511 crypto_len); 1512 1513 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1514 } 1515 1516 /* In most cases this will be true for sniffed frames. It makes sense 1517 * to deliver them as-is without stripping the crypto param. This is 1518 * necessary for software based decryption. 1519 * 1520 * If there's no error then the frame is decrypted. At least that is 1521 * the case for frames that come in via fragmented rx indication. 1522 */ 1523 if (!is_decrypted) 1524 return; 1525 1526 /* The payload is decrypted so strip crypto params. Start from tail 1527 * since hdr is used to compute some stuff. 1528 */ 1529 1530 hdr = (void *)msdu->data; 1531 1532 /* Tail */ 1533 if (status->flag & RX_FLAG_IV_STRIPPED) { 1534 skb_trim(msdu, msdu->len - 1535 ath10k_htt_rx_crypto_mic_len(ar, enctype)); 1536 1537 skb_trim(msdu, msdu->len - 1538 ath10k_htt_rx_crypto_icv_len(ar, enctype)); 1539 } else { 1540 /* MIC */ 1541 if (status->flag & RX_FLAG_MIC_STRIPPED) 1542 skb_trim(msdu, msdu->len - 1543 ath10k_htt_rx_crypto_mic_len(ar, enctype)); 1544 1545 /* ICV */ 1546 if (status->flag & RX_FLAG_ICV_STRIPPED) 1547 skb_trim(msdu, msdu->len - 1548 ath10k_htt_rx_crypto_icv_len(ar, enctype)); 1549 } 1550 1551 /* MMIC */ 1552 if ((status->flag & RX_FLAG_MMIC_STRIPPED) && 1553 !ieee80211_has_morefrags(hdr->frame_control) && 1554 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) 1555 skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN); 1556 1557 /* Head */ 1558 if (status->flag & RX_FLAG_IV_STRIPPED) { 1559 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1560 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); 1561 1562 #if defined(__linux__) 1563 memmove((void *)msdu->data + crypto_len, 1564 #elif defined(__FreeBSD__) 1565 memmove((u8 *)msdu->data + crypto_len, 1566 #endif 1567 (void *)msdu->data, hdr_len); 1568 skb_pull(msdu, crypto_len); 1569 } 1570 } 1571 1572 static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar, 1573 struct sk_buff *msdu, 1574 struct ieee80211_rx_status *status, 1575 const u8 first_hdr[64], 1576 enum htt_rx_mpdu_encrypt_type enctype) 1577 { 1578 struct ath10k_hw_params *hw = &ar->hw_params; 1579 #if defined(__linux__) 1580 struct ieee80211_hdr *hdr; 1581 #elif defined(__FreeBSD__) 1582 const struct ieee80211_hdr *hdr; 1583 struct ieee80211_hdr *hdr2; 1584 #endif 1585 struct htt_rx_desc *rxd; 1586 size_t hdr_len; 1587 u8 da[ETH_ALEN]; 1588 u8 sa[ETH_ALEN]; 1589 int l3_pad_bytes; 1590 int bytes_aligned = ar->hw_params.decap_align_bytes; 1591 1592 /* Delivered decapped frame: 1593 * [nwifi 802.11 header] <-- replaced with 802.11 hdr 1594 * [rfc1042/llc] 1595 * 1596 * Note: The nwifi header doesn't have QoS Control and is 1597 * (always?) a 3addr frame. 1598 * 1599 * Note2: There's no A-MSDU subframe header. Even if it's part 1600 * of an A-MSDU. 1601 */ 1602 1603 /* pull decapped header and copy SA & DA */ 1604 #if defined(__linux__) 1605 rxd = HTT_RX_BUF_TO_RX_DESC(hw, (void *)msdu->data - 1606 #elif defined(__FreeBSD__) 1607 rxd = HTT_RX_BUF_TO_RX_DESC(hw, (u8 *)msdu->data - 1608 #endif 1609 hw->rx_desc_ops->rx_desc_size); 1610 1611 l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd); 1612 skb_put(msdu, l3_pad_bytes); 1613 1614 #if defined(__linux__) 1615 hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes); 1616 1617 hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr); 1618 ether_addr_copy(da, ieee80211_get_DA(hdr)); 1619 ether_addr_copy(sa, ieee80211_get_SA(hdr)); 1620 #elif defined(__FreeBSD__) 1621 hdr2 = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes); 1622 1623 hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr2); 1624 ether_addr_copy(da, ieee80211_get_DA(hdr2)); 1625 ether_addr_copy(sa, ieee80211_get_SA(hdr2)); 1626 #endif 1627 skb_pull(msdu, hdr_len); 1628 1629 /* push original 802.11 header */ 1630 #if defined(__linux__) 1631 hdr = (struct ieee80211_hdr *)first_hdr; 1632 #elif defined(__FreeBSD__) 1633 hdr = (const struct ieee80211_hdr *)first_hdr; 1634 #endif 1635 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1636 1637 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 1638 memcpy(skb_push(msdu, 1639 ath10k_htt_rx_crypto_param_len(ar, enctype)), 1640 #if defined(__linux__) 1641 (void *)hdr + round_up(hdr_len, bytes_aligned), 1642 #elif defined(__FreeBSD__) 1643 (const u8 *)hdr + round_up(hdr_len, bytes_aligned), 1644 #endif 1645 ath10k_htt_rx_crypto_param_len(ar, enctype)); 1646 } 1647 1648 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1649 1650 /* original 802.11 header has a different DA and in 1651 * case of 4addr it may also have different SA 1652 */ 1653 #if defined(__linux__) 1654 hdr = (struct ieee80211_hdr *)msdu->data; 1655 ether_addr_copy(ieee80211_get_DA(hdr), da); 1656 ether_addr_copy(ieee80211_get_SA(hdr), sa); 1657 #elif defined(__FreeBSD__) 1658 hdr2 = (struct ieee80211_hdr *)msdu->data; 1659 ether_addr_copy(ieee80211_get_DA(hdr2), da); 1660 ether_addr_copy(ieee80211_get_SA(hdr2), sa); 1661 #endif 1662 } 1663 1664 static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar, 1665 struct sk_buff *msdu, 1666 enum htt_rx_mpdu_encrypt_type enctype) 1667 { 1668 struct ieee80211_hdr *hdr; 1669 struct ath10k_hw_params *hw = &ar->hw_params; 1670 struct htt_rx_desc *rxd; 1671 struct rx_msdu_end_common *rxd_msdu_end_common; 1672 u8 *rxd_rx_hdr_status; 1673 size_t hdr_len, crypto_len; 1674 #if defined(__linux__) 1675 void *rfc1042; 1676 #elif defined(__FreeBSD__) 1677 u8 *rfc1042; 1678 #endif 1679 bool is_first, is_last, is_amsdu; 1680 int bytes_aligned = ar->hw_params.decap_align_bytes; 1681 1682 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 1683 #if defined(__linux__) 1684 (void *)msdu->data - hw->rx_desc_ops->rx_desc_size); 1685 #elif defined(__FreeBSD__) 1686 (u8 *)msdu->data - hw->rx_desc_ops->rx_desc_size); 1687 #endif 1688 1689 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd); 1690 rxd_rx_hdr_status = ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd); 1691 hdr = (void *)rxd_rx_hdr_status; 1692 1693 is_first = !!(rxd_msdu_end_common->info0 & 1694 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); 1695 is_last = !!(rxd_msdu_end_common->info0 & 1696 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); 1697 is_amsdu = !(is_first && is_last); 1698 1699 #if defined(__linux__) 1700 rfc1042 = hdr; 1701 #elif defined(__FreeBSD__) 1702 rfc1042 = (void *)hdr; 1703 #endif 1704 1705 if (is_first) { 1706 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1707 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); 1708 1709 rfc1042 += round_up(hdr_len, bytes_aligned) + 1710 round_up(crypto_len, bytes_aligned); 1711 } 1712 1713 if (is_amsdu) 1714 rfc1042 += sizeof(struct amsdu_subframe_hdr); 1715 1716 return rfc1042; 1717 } 1718 1719 static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar, 1720 struct sk_buff *msdu, 1721 struct ieee80211_rx_status *status, 1722 const u8 first_hdr[64], 1723 enum htt_rx_mpdu_encrypt_type enctype) 1724 { 1725 struct ath10k_hw_params *hw = &ar->hw_params; 1726 #if defined(__linux__) 1727 struct ieee80211_hdr *hdr; 1728 #elif defined(__FreeBSD__) 1729 const struct ieee80211_hdr *hdr; 1730 struct ieee80211_hdr *hdr2; 1731 #endif 1732 struct ethhdr *eth; 1733 size_t hdr_len; 1734 void *rfc1042; 1735 u8 da[ETH_ALEN]; 1736 u8 sa[ETH_ALEN]; 1737 int l3_pad_bytes; 1738 struct htt_rx_desc *rxd; 1739 int bytes_aligned = ar->hw_params.decap_align_bytes; 1740 1741 /* Delivered decapped frame: 1742 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc 1743 * [payload] 1744 */ 1745 1746 rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype); 1747 if (WARN_ON_ONCE(!rfc1042)) 1748 return; 1749 1750 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 1751 #if defined(__linux__) 1752 (void *)msdu->data - hw->rx_desc_ops->rx_desc_size); 1753 #elif defined(__FreeBSD__) 1754 (u8 *)msdu->data - hw->rx_desc_ops->rx_desc_size); 1755 #endif 1756 1757 l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd); 1758 skb_put(msdu, l3_pad_bytes); 1759 skb_pull(msdu, l3_pad_bytes); 1760 1761 /* pull decapped header and copy SA & DA */ 1762 eth = (struct ethhdr *)msdu->data; 1763 ether_addr_copy(da, eth->h_dest); 1764 ether_addr_copy(sa, eth->h_source); 1765 skb_pull(msdu, sizeof(struct ethhdr)); 1766 1767 /* push rfc1042/llc/snap */ 1768 memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042, 1769 sizeof(struct rfc1042_hdr)); 1770 1771 /* push original 802.11 header */ 1772 #if defined(__linux__) 1773 hdr = (struct ieee80211_hdr *)first_hdr; 1774 #elif defined(__FreeBSD__) 1775 hdr = (const struct ieee80211_hdr *)first_hdr; 1776 #endif 1777 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1778 1779 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 1780 memcpy(skb_push(msdu, 1781 ath10k_htt_rx_crypto_param_len(ar, enctype)), 1782 #if defined(__linux__) 1783 (void *)hdr + round_up(hdr_len, bytes_aligned), 1784 #elif defined(__FreeBSD__) 1785 (const u8 *)hdr + round_up(hdr_len, bytes_aligned), 1786 #endif 1787 ath10k_htt_rx_crypto_param_len(ar, enctype)); 1788 } 1789 1790 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1791 1792 /* original 802.11 header has a different DA and in 1793 * case of 4addr it may also have different SA 1794 */ 1795 #if defined(__linux__) 1796 hdr = (struct ieee80211_hdr *)msdu->data; 1797 ether_addr_copy(ieee80211_get_DA(hdr), da); 1798 ether_addr_copy(ieee80211_get_SA(hdr), sa); 1799 #elif defined(__FreeBSD__) 1800 hdr2 = (struct ieee80211_hdr *)msdu->data; 1801 ether_addr_copy(ieee80211_get_DA(hdr2), da); 1802 ether_addr_copy(ieee80211_get_SA(hdr2), sa); 1803 #endif 1804 } 1805 1806 static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar, 1807 struct sk_buff *msdu, 1808 struct ieee80211_rx_status *status, 1809 const u8 first_hdr[64], 1810 enum htt_rx_mpdu_encrypt_type enctype) 1811 { 1812 struct ath10k_hw_params *hw = &ar->hw_params; 1813 #if defined(__linux__) 1814 struct ieee80211_hdr *hdr; 1815 #elif defined(__FreeBSD__) 1816 const struct ieee80211_hdr *hdr; 1817 #endif 1818 size_t hdr_len; 1819 int l3_pad_bytes; 1820 struct htt_rx_desc *rxd; 1821 int bytes_aligned = ar->hw_params.decap_align_bytes; 1822 1823 /* Delivered decapped frame: 1824 * [amsdu header] <-- replaced with 802.11 hdr 1825 * [rfc1042/llc] 1826 * [payload] 1827 */ 1828 1829 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 1830 #if defined(__linux__) 1831 (void *)msdu->data - hw->rx_desc_ops->rx_desc_size); 1832 #elif defined(__FreeBSD__) 1833 (u8 *)msdu->data - hw->rx_desc_ops->rx_desc_size); 1834 #endif 1835 1836 l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd); 1837 1838 skb_put(msdu, l3_pad_bytes); 1839 skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes); 1840 1841 #if defined(__linux__) 1842 hdr = (struct ieee80211_hdr *)first_hdr; 1843 #elif defined(__FreeBSD__) 1844 hdr = (const struct ieee80211_hdr *)first_hdr; 1845 #endif 1846 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1847 1848 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 1849 memcpy(skb_push(msdu, 1850 ath10k_htt_rx_crypto_param_len(ar, enctype)), 1851 #if defined(__linux__) 1852 (void *)hdr + round_up(hdr_len, bytes_aligned), 1853 #elif defined(__FreeBSD__) 1854 (const u8 *)hdr + round_up(hdr_len, bytes_aligned), 1855 #endif 1856 ath10k_htt_rx_crypto_param_len(ar, enctype)); 1857 } 1858 1859 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1860 } 1861 1862 static void ath10k_htt_rx_h_undecap(struct ath10k *ar, 1863 struct sk_buff *msdu, 1864 struct ieee80211_rx_status *status, 1865 u8 first_hdr[64], 1866 enum htt_rx_mpdu_encrypt_type enctype, 1867 bool is_decrypted) 1868 { 1869 struct ath10k_hw_params *hw = &ar->hw_params; 1870 struct htt_rx_desc *rxd; 1871 struct rx_msdu_start_common *rxd_msdu_start_common; 1872 enum rx_msdu_decap_format decap; 1873 1874 /* First msdu's decapped header: 1875 * [802.11 header] <-- padded to 4 bytes long 1876 * [crypto param] <-- padded to 4 bytes long 1877 * [amsdu header] <-- only if A-MSDU 1878 * [rfc1042/llc] 1879 * 1880 * Other (2nd, 3rd, ..) msdu's decapped header: 1881 * [amsdu header] <-- only if A-MSDU 1882 * [rfc1042/llc] 1883 */ 1884 1885 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 1886 #if defined(__linux__) 1887 (void *)msdu->data - hw->rx_desc_ops->rx_desc_size); 1888 #elif defined(__FreeBSD__) 1889 (u8 *)msdu->data - hw->rx_desc_ops->rx_desc_size); 1890 #endif 1891 1892 rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd); 1893 decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1), 1894 RX_MSDU_START_INFO1_DECAP_FORMAT); 1895 1896 switch (decap) { 1897 case RX_MSDU_DECAP_RAW: 1898 ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype, 1899 is_decrypted, first_hdr); 1900 break; 1901 case RX_MSDU_DECAP_NATIVE_WIFI: 1902 ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr, 1903 enctype); 1904 break; 1905 case RX_MSDU_DECAP_ETHERNET2_DIX: 1906 ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype); 1907 break; 1908 case RX_MSDU_DECAP_8023_SNAP_LLC: 1909 ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr, 1910 enctype); 1911 break; 1912 } 1913 } 1914 1915 static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb) 1916 { 1917 struct htt_rx_desc *rxd; 1918 struct rx_attention *rxd_attention; 1919 struct rx_msdu_start_common *rxd_msdu_start_common; 1920 u32 flags, info; 1921 bool is_ip4, is_ip6; 1922 bool is_tcp, is_udp; 1923 bool ip_csum_ok, tcpudp_csum_ok; 1924 1925 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 1926 #if defined(__linux__) 1927 (void *)skb->data - hw->rx_desc_ops->rx_desc_size); 1928 #elif defined(__FreeBSD__) 1929 (u8 *)skb->data - hw->rx_desc_ops->rx_desc_size); 1930 #endif 1931 1932 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); 1933 rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd); 1934 flags = __le32_to_cpu(rxd_attention->flags); 1935 info = __le32_to_cpu(rxd_msdu_start_common->info1); 1936 1937 is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO); 1938 is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO); 1939 is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO); 1940 is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO); 1941 ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL); 1942 tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL); 1943 1944 if (!is_ip4 && !is_ip6) 1945 return CHECKSUM_NONE; 1946 if (!is_tcp && !is_udp) 1947 return CHECKSUM_NONE; 1948 if (!ip_csum_ok) 1949 return CHECKSUM_NONE; 1950 if (!tcpudp_csum_ok) 1951 return CHECKSUM_NONE; 1952 1953 return CHECKSUM_UNNECESSARY; 1954 } 1955 1956 static void ath10k_htt_rx_h_csum_offload(struct ath10k_hw_params *hw, 1957 struct sk_buff *msdu) 1958 { 1959 msdu->ip_summed = ath10k_htt_rx_get_csum_state(hw, msdu); 1960 } 1961 1962 static u64 ath10k_htt_rx_h_get_pn(struct ath10k *ar, struct sk_buff *skb, 1963 u16 offset, 1964 enum htt_rx_mpdu_encrypt_type enctype) 1965 { 1966 struct ieee80211_hdr *hdr; 1967 u64 pn = 0; 1968 u8 *ehdr; 1969 1970 hdr = (struct ieee80211_hdr *)(skb->data + offset); 1971 ehdr = skb->data + offset + ieee80211_hdrlen(hdr->frame_control); 1972 1973 if (enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) { 1974 pn = ehdr[0]; 1975 pn |= (u64)ehdr[1] << 8; 1976 pn |= (u64)ehdr[4] << 16; 1977 pn |= (u64)ehdr[5] << 24; 1978 pn |= (u64)ehdr[6] << 32; 1979 pn |= (u64)ehdr[7] << 40; 1980 } 1981 return pn; 1982 } 1983 1984 static bool ath10k_htt_rx_h_frag_multicast_check(struct ath10k *ar, 1985 struct sk_buff *skb, 1986 u16 offset) 1987 { 1988 struct ieee80211_hdr *hdr; 1989 1990 hdr = (struct ieee80211_hdr *)(skb->data + offset); 1991 return !is_multicast_ether_addr(hdr->addr1); 1992 } 1993 1994 static bool ath10k_htt_rx_h_frag_pn_check(struct ath10k *ar, 1995 struct sk_buff *skb, 1996 u16 peer_id, 1997 u16 offset, 1998 enum htt_rx_mpdu_encrypt_type enctype) 1999 { 2000 struct ath10k_peer *peer; 2001 union htt_rx_pn_t *last_pn, new_pn = {0}; 2002 struct ieee80211_hdr *hdr; 2003 u8 tid, frag_number; 2004 u32 seq; 2005 2006 peer = ath10k_peer_find_by_id(ar, peer_id); 2007 if (!peer) { 2008 ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer for frag pn check\n"); 2009 return false; 2010 } 2011 2012 hdr = (struct ieee80211_hdr *)(skb->data + offset); 2013 if (ieee80211_is_data_qos(hdr->frame_control)) 2014 tid = ieee80211_get_tid(hdr); 2015 else 2016 tid = ATH10K_TXRX_NON_QOS_TID; 2017 2018 last_pn = &peer->frag_tids_last_pn[tid]; 2019 new_pn.pn48 = ath10k_htt_rx_h_get_pn(ar, skb, offset, enctype); 2020 frag_number = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; 2021 seq = (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; 2022 2023 if (frag_number == 0) { 2024 last_pn->pn48 = new_pn.pn48; 2025 peer->frag_tids_seq[tid] = seq; 2026 } else { 2027 if (seq != peer->frag_tids_seq[tid]) 2028 return false; 2029 2030 if (new_pn.pn48 != last_pn->pn48 + 1) 2031 return false; 2032 2033 last_pn->pn48 = new_pn.pn48; 2034 } 2035 2036 return true; 2037 } 2038 2039 static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, 2040 struct sk_buff_head *amsdu, 2041 struct ieee80211_rx_status *status, 2042 bool fill_crypt_header, 2043 u8 *rx_hdr, 2044 enum ath10k_pkt_rx_err *err, 2045 u16 peer_id, 2046 bool frag) 2047 { 2048 struct sk_buff *first; 2049 struct sk_buff *last; 2050 struct sk_buff *msdu, *temp; 2051 struct ath10k_hw_params *hw = &ar->hw_params; 2052 struct htt_rx_desc *rxd; 2053 struct rx_attention *rxd_attention; 2054 struct rx_mpdu_start *rxd_mpdu_start; 2055 2056 struct ieee80211_hdr *hdr; 2057 enum htt_rx_mpdu_encrypt_type enctype; 2058 u8 first_hdr[64]; 2059 u8 *qos; 2060 bool has_fcs_err; 2061 bool has_crypto_err; 2062 bool has_tkip_err; 2063 bool has_peer_idx_invalid; 2064 bool is_decrypted; 2065 bool is_mgmt; 2066 u32 attention; 2067 bool frag_pn_check = true, multicast_check = true; 2068 2069 if (skb_queue_empty(amsdu)) 2070 return; 2071 2072 first = skb_peek(amsdu); 2073 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 2074 #if defined(__linux__) 2075 (void *)first->data - hw->rx_desc_ops->rx_desc_size); 2076 #elif defined(__FreeBSD__) 2077 (u8 *)first->data - hw->rx_desc_ops->rx_desc_size); 2078 #endif 2079 2080 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); 2081 rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd); 2082 2083 is_mgmt = !!(rxd_attention->flags & 2084 __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE)); 2085 2086 enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0), 2087 RX_MPDU_START_INFO0_ENCRYPT_TYPE); 2088 2089 /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11 2090 * decapped header. It'll be used for undecapping of each MSDU. 2091 */ 2092 hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd); 2093 memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN); 2094 2095 if (rx_hdr) 2096 memcpy(rx_hdr, hdr, RX_HTT_HDR_STATUS_LEN); 2097 2098 /* Each A-MSDU subframe will use the original header as the base and be 2099 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl. 2100 */ 2101 hdr = (void *)first_hdr; 2102 2103 if (ieee80211_is_data_qos(hdr->frame_control)) { 2104 qos = ieee80211_get_qos_ctl(hdr); 2105 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 2106 } 2107 2108 /* Some attention flags are valid only in the last MSDU. */ 2109 last = skb_peek_tail(amsdu); 2110 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 2111 #if defined(__linux__) 2112 (void *)last->data - hw->rx_desc_ops->rx_desc_size); 2113 #elif defined(__FreeBSD__) 2114 (u8 *)last->data - hw->rx_desc_ops->rx_desc_size); 2115 #endif 2116 2117 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); 2118 attention = __le32_to_cpu(rxd_attention->flags); 2119 2120 has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR); 2121 has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR); 2122 has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR); 2123 has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID); 2124 2125 /* Note: If hardware captures an encrypted frame that it can't decrypt, 2126 * e.g. due to fcs error, missing peer or invalid key data it will 2127 * report the frame as raw. 2128 */ 2129 is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE && 2130 !has_fcs_err && 2131 !has_crypto_err && 2132 !has_peer_idx_invalid); 2133 2134 /* Clear per-MPDU flags while leaving per-PPDU flags intact. */ 2135 status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | 2136 RX_FLAG_MMIC_ERROR | 2137 RX_FLAG_DECRYPTED | 2138 RX_FLAG_IV_STRIPPED | 2139 RX_FLAG_ONLY_MONITOR | 2140 RX_FLAG_MMIC_STRIPPED); 2141 2142 if (has_fcs_err) 2143 status->flag |= RX_FLAG_FAILED_FCS_CRC; 2144 2145 if (has_tkip_err) 2146 status->flag |= RX_FLAG_MMIC_ERROR; 2147 2148 if (err) { 2149 if (has_fcs_err) 2150 *err = ATH10K_PKT_RX_ERR_FCS; 2151 else if (has_tkip_err) 2152 *err = ATH10K_PKT_RX_ERR_TKIP; 2153 else if (has_crypto_err) 2154 *err = ATH10K_PKT_RX_ERR_CRYPT; 2155 else if (has_peer_idx_invalid) 2156 *err = ATH10K_PKT_RX_ERR_PEER_IDX_INVAL; 2157 } 2158 2159 /* Firmware reports all necessary management frames via WMI already. 2160 * They are not reported to monitor interfaces at all so pass the ones 2161 * coming via HTT to monitor interfaces instead. This simplifies 2162 * matters a lot. 2163 */ 2164 if (is_mgmt) 2165 status->flag |= RX_FLAG_ONLY_MONITOR; 2166 2167 if (is_decrypted) { 2168 status->flag |= RX_FLAG_DECRYPTED; 2169 2170 if (likely(!is_mgmt)) 2171 status->flag |= RX_FLAG_MMIC_STRIPPED; 2172 2173 if (fill_crypt_header) 2174 status->flag |= RX_FLAG_MIC_STRIPPED | 2175 RX_FLAG_ICV_STRIPPED; 2176 else 2177 status->flag |= RX_FLAG_IV_STRIPPED; 2178 } 2179 2180 skb_queue_walk(amsdu, msdu) { 2181 if (frag && !fill_crypt_header && is_decrypted && 2182 enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) 2183 frag_pn_check = ath10k_htt_rx_h_frag_pn_check(ar, 2184 msdu, 2185 peer_id, 2186 0, 2187 enctype); 2188 2189 if (frag) 2190 multicast_check = ath10k_htt_rx_h_frag_multicast_check(ar, 2191 msdu, 2192 0); 2193 2194 if (!frag_pn_check || !multicast_check) { 2195 /* Discard the fragment with invalid PN or multicast DA 2196 */ 2197 temp = msdu->prev; 2198 __skb_unlink(msdu, amsdu); 2199 dev_kfree_skb_any(msdu); 2200 msdu = temp; 2201 frag_pn_check = true; 2202 multicast_check = true; 2203 continue; 2204 } 2205 2206 ath10k_htt_rx_h_csum_offload(&ar->hw_params, msdu); 2207 2208 if (frag && !fill_crypt_header && 2209 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) 2210 status->flag &= ~RX_FLAG_MMIC_STRIPPED; 2211 2212 ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype, 2213 is_decrypted); 2214 2215 /* Undecapping involves copying the original 802.11 header back 2216 * to sk_buff. If frame is protected and hardware has decrypted 2217 * it then remove the protected bit. 2218 */ 2219 if (!is_decrypted) 2220 continue; 2221 if (is_mgmt) 2222 continue; 2223 2224 if (fill_crypt_header) 2225 continue; 2226 2227 hdr = (void *)msdu->data; 2228 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 2229 2230 if (frag && !fill_crypt_header && 2231 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) 2232 status->flag &= ~RX_FLAG_IV_STRIPPED & 2233 ~RX_FLAG_MMIC_STRIPPED; 2234 } 2235 } 2236 2237 static void ath10k_htt_rx_h_enqueue(struct ath10k *ar, 2238 struct sk_buff_head *amsdu, 2239 struct ieee80211_rx_status *status) 2240 { 2241 struct sk_buff *msdu; 2242 struct sk_buff *first_subframe; 2243 2244 first_subframe = skb_peek(amsdu); 2245 2246 while ((msdu = __skb_dequeue(amsdu))) { 2247 /* Setup per-MSDU flags */ 2248 if (skb_queue_empty(amsdu)) 2249 status->flag &= ~RX_FLAG_AMSDU_MORE; 2250 else 2251 status->flag |= RX_FLAG_AMSDU_MORE; 2252 2253 if (msdu == first_subframe) { 2254 first_subframe = NULL; 2255 status->flag &= ~RX_FLAG_ALLOW_SAME_PN; 2256 } else { 2257 status->flag |= RX_FLAG_ALLOW_SAME_PN; 2258 } 2259 2260 ath10k_htt_rx_h_queue_msdu(ar, status, msdu); 2261 } 2262 } 2263 2264 static int ath10k_unchain_msdu(struct sk_buff_head *amsdu, 2265 unsigned long *unchain_cnt) 2266 { 2267 struct sk_buff *skb, *first; 2268 int space; 2269 int total_len = 0; 2270 int amsdu_len = skb_queue_len(amsdu); 2271 2272 /* TODO: Might could optimize this by using 2273 * skb_try_coalesce or similar method to 2274 * decrease copying, or maybe get mac80211 to 2275 * provide a way to just receive a list of 2276 * skb? 2277 */ 2278 2279 first = __skb_dequeue(amsdu); 2280 2281 /* Allocate total length all at once. */ 2282 skb_queue_walk(amsdu, skb) 2283 total_len += skb->len; 2284 2285 space = total_len - skb_tailroom(first); 2286 if ((space > 0) && 2287 (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) { 2288 /* TODO: bump some rx-oom error stat */ 2289 /* put it back together so we can free the 2290 * whole list at once. 2291 */ 2292 __skb_queue_head(amsdu, first); 2293 return -1; 2294 } 2295 2296 /* Walk list again, copying contents into 2297 * msdu_head 2298 */ 2299 while ((skb = __skb_dequeue(amsdu))) { 2300 skb_copy_from_linear_data(skb, skb_put(first, skb->len), 2301 skb->len); 2302 dev_kfree_skb_any(skb); 2303 } 2304 2305 __skb_queue_head(amsdu, first); 2306 2307 *unchain_cnt += amsdu_len - 1; 2308 2309 return 0; 2310 } 2311 2312 static void ath10k_htt_rx_h_unchain(struct ath10k *ar, 2313 struct sk_buff_head *amsdu, 2314 unsigned long *drop_cnt, 2315 unsigned long *unchain_cnt) 2316 { 2317 struct sk_buff *first; 2318 struct ath10k_hw_params *hw = &ar->hw_params; 2319 struct htt_rx_desc *rxd; 2320 struct rx_msdu_start_common *rxd_msdu_start_common; 2321 struct rx_frag_info_common *rxd_frag_info; 2322 enum rx_msdu_decap_format decap; 2323 2324 first = skb_peek(amsdu); 2325 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 2326 #if defined(__linux__) 2327 (void *)first->data - hw->rx_desc_ops->rx_desc_size); 2328 #elif defined(__FreeBSD__) 2329 (u8 *)first->data - hw->rx_desc_ops->rx_desc_size); 2330 #endif 2331 2332 rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd); 2333 rxd_frag_info = ath10k_htt_rx_desc_get_frag_info(hw, rxd); 2334 decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1), 2335 RX_MSDU_START_INFO1_DECAP_FORMAT); 2336 2337 /* FIXME: Current unchaining logic can only handle simple case of raw 2338 * msdu chaining. If decapping is other than raw the chaining may be 2339 * more complex and this isn't handled by the current code. Don't even 2340 * try re-constructing such frames - it'll be pretty much garbage. 2341 */ 2342 if (decap != RX_MSDU_DECAP_RAW || 2343 skb_queue_len(amsdu) != 1 + rxd_frag_info->ring2_more_count) { 2344 *drop_cnt += skb_queue_len(amsdu); 2345 __skb_queue_purge(amsdu); 2346 return; 2347 } 2348 2349 ath10k_unchain_msdu(amsdu, unchain_cnt); 2350 } 2351 2352 static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar, 2353 struct sk_buff_head *amsdu) 2354 { 2355 u8 *subframe_hdr; 2356 struct sk_buff *first; 2357 bool is_first, is_last; 2358 struct ath10k_hw_params *hw = &ar->hw_params; 2359 struct htt_rx_desc *rxd; 2360 struct rx_msdu_end_common *rxd_msdu_end_common; 2361 struct rx_mpdu_start *rxd_mpdu_start; 2362 struct ieee80211_hdr *hdr; 2363 size_t hdr_len, crypto_len; 2364 enum htt_rx_mpdu_encrypt_type enctype; 2365 int bytes_aligned = ar->hw_params.decap_align_bytes; 2366 2367 first = skb_peek(amsdu); 2368 2369 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 2370 #if defined(__linux__) 2371 (void *)first->data - hw->rx_desc_ops->rx_desc_size); 2372 #elif defined(__FreeBSD__) 2373 (u8 *)first->data - hw->rx_desc_ops->rx_desc_size); 2374 #endif 2375 2376 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd); 2377 rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd); 2378 hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd); 2379 2380 is_first = !!(rxd_msdu_end_common->info0 & 2381 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); 2382 is_last = !!(rxd_msdu_end_common->info0 & 2383 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); 2384 2385 /* Return in case of non-aggregated msdu */ 2386 if (is_first && is_last) 2387 return true; 2388 2389 /* First msdu flag is not set for the first msdu of the list */ 2390 if (!is_first) 2391 return false; 2392 2393 enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0), 2394 RX_MPDU_START_INFO0_ENCRYPT_TYPE); 2395 2396 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2397 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); 2398 2399 subframe_hdr = (u8 *)hdr + round_up(hdr_len, bytes_aligned) + 2400 crypto_len; 2401 2402 /* Validate if the amsdu has a proper first subframe. 2403 * There are chances a single msdu can be received as amsdu when 2404 * the unauthenticated amsdu flag of a QoS header 2405 * gets flipped in non-SPP AMSDU's, in such cases the first 2406 * subframe has llc/snap header in place of a valid da. 2407 * return false if the da matches rfc1042 pattern 2408 */ 2409 if (ether_addr_equal(subframe_hdr, rfc1042_header)) 2410 return false; 2411 2412 return true; 2413 } 2414 2415 static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar, 2416 struct sk_buff_head *amsdu, 2417 struct ieee80211_rx_status *rx_status) 2418 { 2419 if (!rx_status->freq) { 2420 ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n"); 2421 return false; 2422 } 2423 2424 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) { 2425 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n"); 2426 return false; 2427 } 2428 2429 if (!ath10k_htt_rx_validate_amsdu(ar, amsdu)) { 2430 ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid amsdu received\n"); 2431 return false; 2432 } 2433 2434 return true; 2435 } 2436 2437 static void ath10k_htt_rx_h_filter(struct ath10k *ar, 2438 struct sk_buff_head *amsdu, 2439 struct ieee80211_rx_status *rx_status, 2440 unsigned long *drop_cnt) 2441 { 2442 if (skb_queue_empty(amsdu)) 2443 return; 2444 2445 if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status)) 2446 return; 2447 2448 if (drop_cnt) 2449 *drop_cnt += skb_queue_len(amsdu); 2450 2451 __skb_queue_purge(amsdu); 2452 } 2453 2454 static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt) 2455 { 2456 struct ath10k *ar = htt->ar; 2457 struct ieee80211_rx_status *rx_status = &htt->rx_status; 2458 struct sk_buff_head amsdu; 2459 int ret; 2460 unsigned long drop_cnt = 0; 2461 unsigned long unchain_cnt = 0; 2462 unsigned long drop_cnt_filter = 0; 2463 unsigned long msdus_to_queue, num_msdus; 2464 enum ath10k_pkt_rx_err err = ATH10K_PKT_RX_ERR_MAX; 2465 u8 first_hdr[RX_HTT_HDR_STATUS_LEN]; 2466 2467 __skb_queue_head_init(&amsdu); 2468 2469 spin_lock_bh(&htt->rx_ring.lock); 2470 if (htt->rx_confused) { 2471 spin_unlock_bh(&htt->rx_ring.lock); 2472 return -EIO; 2473 } 2474 ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu); 2475 spin_unlock_bh(&htt->rx_ring.lock); 2476 2477 if (ret < 0) { 2478 ath10k_warn(ar, "rx ring became corrupted: %d\n", ret); 2479 __skb_queue_purge(&amsdu); 2480 /* FIXME: It's probably a good idea to reboot the 2481 * device instead of leaving it inoperable. 2482 */ 2483 htt->rx_confused = true; 2484 return ret; 2485 } 2486 2487 num_msdus = skb_queue_len(&amsdu); 2488 2489 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff); 2490 2491 /* only for ret = 1 indicates chained msdus */ 2492 if (ret > 0) 2493 ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt); 2494 2495 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter); 2496 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err, 0, 2497 false); 2498 msdus_to_queue = skb_queue_len(&amsdu); 2499 ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status); 2500 2501 ath10k_sta_update_rx_tid_stats(ar, first_hdr, num_msdus, err, 2502 unchain_cnt, drop_cnt, drop_cnt_filter, 2503 msdus_to_queue); 2504 2505 return 0; 2506 } 2507 2508 static void ath10k_htt_rx_mpdu_desc_pn_hl(struct htt_hl_rx_desc *rx_desc, 2509 union htt_rx_pn_t *pn, 2510 int pn_len_bits) 2511 { 2512 switch (pn_len_bits) { 2513 case 48: 2514 pn->pn48 = __le32_to_cpu(rx_desc->pn_31_0) + 2515 ((u64)(__le32_to_cpu(rx_desc->u0.pn_63_32) & 0xFFFF) << 32); 2516 break; 2517 case 24: 2518 pn->pn24 = __le32_to_cpu(rx_desc->pn_31_0); 2519 break; 2520 } 2521 } 2522 2523 static bool ath10k_htt_rx_pn_cmp48(union htt_rx_pn_t *new_pn, 2524 union htt_rx_pn_t *old_pn) 2525 { 2526 return ((new_pn->pn48 & 0xffffffffffffULL) <= 2527 (old_pn->pn48 & 0xffffffffffffULL)); 2528 } 2529 2530 static bool ath10k_htt_rx_pn_check_replay_hl(struct ath10k *ar, 2531 struct ath10k_peer *peer, 2532 struct htt_rx_indication_hl *rx) 2533 { 2534 bool last_pn_valid, pn_invalid = false; 2535 enum htt_txrx_sec_cast_type sec_index; 2536 enum htt_security_types sec_type; 2537 union htt_rx_pn_t new_pn = {0}; 2538 struct htt_hl_rx_desc *rx_desc; 2539 union htt_rx_pn_t *last_pn; 2540 u32 rx_desc_info, tid; 2541 int num_mpdu_ranges; 2542 2543 lockdep_assert_held(&ar->data_lock); 2544 2545 if (!peer) 2546 return false; 2547 2548 if (!(rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU)) 2549 return false; 2550 2551 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), 2552 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); 2553 2554 rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges]; 2555 rx_desc_info = __le32_to_cpu(rx_desc->info); 2556 2557 if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED)) 2558 return false; 2559 2560 tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID); 2561 last_pn_valid = peer->tids_last_pn_valid[tid]; 2562 last_pn = &peer->tids_last_pn[tid]; 2563 2564 if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST)) 2565 sec_index = HTT_TXRX_SEC_MCAST; 2566 else 2567 sec_index = HTT_TXRX_SEC_UCAST; 2568 2569 sec_type = peer->rx_pn[sec_index].sec_type; 2570 ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len); 2571 2572 if (sec_type != HTT_SECURITY_AES_CCMP && 2573 sec_type != HTT_SECURITY_TKIP && 2574 sec_type != HTT_SECURITY_TKIP_NOMIC) 2575 return false; 2576 2577 if (last_pn_valid) 2578 pn_invalid = ath10k_htt_rx_pn_cmp48(&new_pn, last_pn); 2579 else 2580 peer->tids_last_pn_valid[tid] = true; 2581 2582 if (!pn_invalid) 2583 last_pn->pn48 = new_pn.pn48; 2584 2585 return pn_invalid; 2586 } 2587 2588 static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt, 2589 struct htt_rx_indication_hl *rx, 2590 struct sk_buff *skb, 2591 enum htt_rx_pn_check_type check_pn_type, 2592 enum htt_rx_tkip_demic_type tkip_mic_type) 2593 { 2594 struct ath10k *ar = htt->ar; 2595 struct ath10k_peer *peer; 2596 struct htt_rx_indication_mpdu_range *mpdu_ranges; 2597 struct fw_rx_desc_hl *fw_desc; 2598 enum htt_txrx_sec_cast_type sec_index; 2599 enum htt_security_types sec_type; 2600 union htt_rx_pn_t new_pn = {0}; 2601 struct htt_hl_rx_desc *rx_desc; 2602 struct ieee80211_hdr *hdr; 2603 struct ieee80211_rx_status *rx_status; 2604 u16 peer_id; 2605 u8 rx_desc_len; 2606 int num_mpdu_ranges; 2607 size_t tot_hdr_len; 2608 struct ieee80211_channel *ch; 2609 bool pn_invalid, qos, first_msdu; 2610 u32 tid, rx_desc_info; 2611 2612 peer_id = __le16_to_cpu(rx->hdr.peer_id); 2613 tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID); 2614 2615 spin_lock_bh(&ar->data_lock); 2616 peer = ath10k_peer_find_by_id(ar, peer_id); 2617 spin_unlock_bh(&ar->data_lock); 2618 if (!peer && peer_id != HTT_INVALID_PEERID) 2619 ath10k_warn(ar, "Got RX ind from invalid peer: %u\n", peer_id); 2620 2621 if (!peer) 2622 return true; 2623 2624 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), 2625 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); 2626 mpdu_ranges = htt_rx_ind_get_mpdu_ranges_hl(rx); 2627 fw_desc = &rx->fw_desc; 2628 rx_desc_len = fw_desc->len; 2629 2630 if (fw_desc->u.bits.discard) { 2631 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt discard mpdu\n"); 2632 goto err; 2633 } 2634 2635 /* I have not yet seen any case where num_mpdu_ranges > 1. 2636 * qcacld does not seem handle that case either, so we introduce the 2637 * same limitiation here as well. 2638 */ 2639 if (num_mpdu_ranges > 1) 2640 ath10k_warn(ar, 2641 "Unsupported number of MPDU ranges: %d, ignoring all but the first\n", 2642 num_mpdu_ranges); 2643 2644 if (mpdu_ranges->mpdu_range_status != 2645 HTT_RX_IND_MPDU_STATUS_OK && 2646 mpdu_ranges->mpdu_range_status != 2647 HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR) { 2648 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt mpdu_range_status %d\n", 2649 mpdu_ranges->mpdu_range_status); 2650 goto err; 2651 } 2652 2653 rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges]; 2654 rx_desc_info = __le32_to_cpu(rx_desc->info); 2655 2656 if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST)) 2657 sec_index = HTT_TXRX_SEC_MCAST; 2658 else 2659 sec_index = HTT_TXRX_SEC_UCAST; 2660 2661 sec_type = peer->rx_pn[sec_index].sec_type; 2662 first_msdu = rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU; 2663 2664 ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len); 2665 2666 if (check_pn_type == HTT_RX_PN_CHECK && tid >= IEEE80211_NUM_TIDS) { 2667 spin_lock_bh(&ar->data_lock); 2668 pn_invalid = ath10k_htt_rx_pn_check_replay_hl(ar, peer, rx); 2669 spin_unlock_bh(&ar->data_lock); 2670 2671 if (pn_invalid) 2672 goto err; 2673 } 2674 2675 /* Strip off all headers before the MAC header before delivery to 2676 * mac80211 2677 */ 2678 tot_hdr_len = sizeof(struct htt_resp_hdr) + sizeof(rx->hdr) + 2679 sizeof(rx->ppdu) + sizeof(rx->prefix) + 2680 sizeof(rx->fw_desc) + 2681 sizeof(*mpdu_ranges) * num_mpdu_ranges + rx_desc_len; 2682 2683 skb_pull(skb, tot_hdr_len); 2684 2685 hdr = (struct ieee80211_hdr *)skb->data; 2686 qos = ieee80211_is_data_qos(hdr->frame_control); 2687 2688 rx_status = IEEE80211_SKB_RXCB(skb); 2689 memset(rx_status, 0, sizeof(*rx_status)); 2690 2691 if (rx->ppdu.combined_rssi == 0) { 2692 /* SDIO firmware does not provide signal */ 2693 rx_status->signal = 0; 2694 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 2695 } else { 2696 rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR + 2697 rx->ppdu.combined_rssi; 2698 rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL; 2699 } 2700 2701 spin_lock_bh(&ar->data_lock); 2702 ch = ar->scan_channel; 2703 if (!ch) 2704 ch = ar->rx_channel; 2705 if (!ch) 2706 ch = ath10k_htt_rx_h_any_channel(ar); 2707 if (!ch) 2708 ch = ar->tgt_oper_chan; 2709 spin_unlock_bh(&ar->data_lock); 2710 2711 if (ch) { 2712 rx_status->band = ch->band; 2713 rx_status->freq = ch->center_freq; 2714 } 2715 if (rx->fw_desc.flags & FW_RX_DESC_FLAGS_LAST_MSDU) 2716 rx_status->flag &= ~RX_FLAG_AMSDU_MORE; 2717 else 2718 rx_status->flag |= RX_FLAG_AMSDU_MORE; 2719 2720 /* Not entirely sure about this, but all frames from the chipset has 2721 * the protected flag set even though they have already been decrypted. 2722 * Unmasking this flag is necessary in order for mac80211 not to drop 2723 * the frame. 2724 * TODO: Verify this is always the case or find out a way to check 2725 * if there has been hw decryption. 2726 */ 2727 if (ieee80211_has_protected(hdr->frame_control)) { 2728 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 2729 rx_status->flag |= RX_FLAG_DECRYPTED | 2730 RX_FLAG_IV_STRIPPED | 2731 RX_FLAG_MMIC_STRIPPED; 2732 2733 if (tid < IEEE80211_NUM_TIDS && 2734 first_msdu && 2735 check_pn_type == HTT_RX_PN_CHECK && 2736 (sec_type == HTT_SECURITY_AES_CCMP || 2737 sec_type == HTT_SECURITY_TKIP || 2738 sec_type == HTT_SECURITY_TKIP_NOMIC)) { 2739 u8 offset, *ivp, i; 2740 s8 keyidx = 0; 2741 __le64 pn48 = cpu_to_le64(new_pn.pn48); 2742 2743 hdr = (struct ieee80211_hdr *)skb->data; 2744 offset = ieee80211_hdrlen(hdr->frame_control); 2745 hdr->frame_control |= __cpu_to_le16(IEEE80211_FCTL_PROTECTED); 2746 rx_status->flag &= ~RX_FLAG_IV_STRIPPED; 2747 2748 memmove(skb->data - IEEE80211_CCMP_HDR_LEN, 2749 skb->data, offset); 2750 skb_push(skb, IEEE80211_CCMP_HDR_LEN); 2751 ivp = skb->data + offset; 2752 memset(skb->data + offset, 0, IEEE80211_CCMP_HDR_LEN); 2753 /* Ext IV */ 2754 ivp[IEEE80211_WEP_IV_LEN - 1] |= ATH10K_IEEE80211_EXTIV; 2755 2756 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 2757 if (peer->keys[i] && 2758 peer->keys[i]->flags & IEEE80211_KEY_FLAG_PAIRWISE) 2759 keyidx = peer->keys[i]->keyidx; 2760 } 2761 2762 /* Key ID */ 2763 ivp[IEEE80211_WEP_IV_LEN - 1] |= keyidx << 6; 2764 2765 if (sec_type == HTT_SECURITY_AES_CCMP) { 2766 rx_status->flag |= RX_FLAG_MIC_STRIPPED; 2767 /* pn 0, pn 1 */ 2768 memcpy(skb->data + offset, &pn48, 2); 2769 /* pn 1, pn 3 , pn 34 , pn 5 */ 2770 memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4); 2771 } else { 2772 rx_status->flag |= RX_FLAG_ICV_STRIPPED; 2773 /* TSC 0 */ 2774 memcpy(skb->data + offset + 2, &pn48, 1); 2775 /* TSC 1 */ 2776 memcpy(skb->data + offset, ((u8 *)&pn48) + 1, 1); 2777 /* TSC 2 , TSC 3 , TSC 4 , TSC 5*/ 2778 memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4); 2779 } 2780 } 2781 } 2782 2783 if (tkip_mic_type == HTT_RX_TKIP_MIC) 2784 rx_status->flag &= ~RX_FLAG_IV_STRIPPED & 2785 ~RX_FLAG_MMIC_STRIPPED; 2786 2787 if (mpdu_ranges->mpdu_range_status == HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR) 2788 rx_status->flag |= RX_FLAG_MMIC_ERROR; 2789 2790 if (!qos && tid < IEEE80211_NUM_TIDS) { 2791 u8 offset; 2792 __le16 qos_ctrl = 0; 2793 2794 hdr = (struct ieee80211_hdr *)skb->data; 2795 offset = ieee80211_hdrlen(hdr->frame_control); 2796 2797 hdr->frame_control |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 2798 memmove(skb->data - IEEE80211_QOS_CTL_LEN, skb->data, offset); 2799 skb_push(skb, IEEE80211_QOS_CTL_LEN); 2800 qos_ctrl = cpu_to_le16(tid); 2801 memcpy(skb->data + offset, &qos_ctrl, IEEE80211_QOS_CTL_LEN); 2802 } 2803 2804 if (ar->napi.dev) 2805 ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi); 2806 else 2807 ieee80211_rx_ni(ar->hw, skb); 2808 2809 /* We have delivered the skb to the upper layers (mac80211) so we 2810 * must not free it. 2811 */ 2812 return false; 2813 err: 2814 /* Tell the caller that it must free the skb since we have not 2815 * consumed it 2816 */ 2817 return true; 2818 } 2819 2820 static int ath10k_htt_rx_frag_tkip_decap_nomic(struct sk_buff *skb, 2821 u16 head_len, 2822 u16 hdr_len) 2823 { 2824 u8 *ivp, *orig_hdr; 2825 2826 orig_hdr = skb->data; 2827 ivp = orig_hdr + hdr_len + head_len; 2828 2829 /* the ExtIV bit is always set to 1 for TKIP */ 2830 if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV)) 2831 return -EINVAL; 2832 2833 memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len); 2834 skb_pull(skb, IEEE80211_TKIP_IV_LEN); 2835 skb_trim(skb, skb->len - ATH10K_IEEE80211_TKIP_MICLEN); 2836 return 0; 2837 } 2838 2839 static int ath10k_htt_rx_frag_tkip_decap_withmic(struct sk_buff *skb, 2840 u16 head_len, 2841 u16 hdr_len) 2842 { 2843 u8 *ivp, *orig_hdr; 2844 2845 orig_hdr = skb->data; 2846 ivp = orig_hdr + hdr_len + head_len; 2847 2848 /* the ExtIV bit is always set to 1 for TKIP */ 2849 if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV)) 2850 return -EINVAL; 2851 2852 memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len); 2853 skb_pull(skb, IEEE80211_TKIP_IV_LEN); 2854 skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN); 2855 return 0; 2856 } 2857 2858 static int ath10k_htt_rx_frag_ccmp_decap(struct sk_buff *skb, 2859 u16 head_len, 2860 u16 hdr_len) 2861 { 2862 u8 *ivp, *orig_hdr; 2863 2864 orig_hdr = skb->data; 2865 ivp = orig_hdr + hdr_len + head_len; 2866 2867 /* the ExtIV bit is always set to 1 for CCMP */ 2868 if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV)) 2869 return -EINVAL; 2870 2871 skb_trim(skb, skb->len - IEEE80211_CCMP_MIC_LEN); 2872 memmove(orig_hdr + IEEE80211_CCMP_HDR_LEN, orig_hdr, head_len + hdr_len); 2873 skb_pull(skb, IEEE80211_CCMP_HDR_LEN); 2874 return 0; 2875 } 2876 2877 static int ath10k_htt_rx_frag_wep_decap(struct sk_buff *skb, 2878 u16 head_len, 2879 u16 hdr_len) 2880 { 2881 u8 *orig_hdr; 2882 2883 orig_hdr = skb->data; 2884 2885 memmove(orig_hdr + IEEE80211_WEP_IV_LEN, 2886 orig_hdr, head_len + hdr_len); 2887 skb_pull(skb, IEEE80211_WEP_IV_LEN); 2888 skb_trim(skb, skb->len - IEEE80211_WEP_ICV_LEN); 2889 return 0; 2890 } 2891 2892 static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt, 2893 struct htt_rx_fragment_indication *rx, 2894 struct sk_buff *skb) 2895 { 2896 struct ath10k *ar = htt->ar; 2897 enum htt_rx_tkip_demic_type tkip_mic = HTT_RX_NON_TKIP_MIC; 2898 enum htt_txrx_sec_cast_type sec_index; 2899 struct htt_rx_indication_hl *rx_hl; 2900 enum htt_security_types sec_type; 2901 u32 tid, frag, seq, rx_desc_info; 2902 union htt_rx_pn_t new_pn = {0}; 2903 struct htt_hl_rx_desc *rx_desc; 2904 u16 peer_id, sc, hdr_space; 2905 union htt_rx_pn_t *last_pn; 2906 struct ieee80211_hdr *hdr; 2907 int ret, num_mpdu_ranges; 2908 struct ath10k_peer *peer; 2909 struct htt_resp *resp; 2910 size_t tot_hdr_len; 2911 2912 resp = (struct htt_resp *)(skb->data + HTT_RX_FRAG_IND_INFO0_HEADER_LEN); 2913 skb_pull(skb, HTT_RX_FRAG_IND_INFO0_HEADER_LEN); 2914 skb_trim(skb, skb->len - FCS_LEN); 2915 2916 peer_id = __le16_to_cpu(rx->peer_id); 2917 rx_hl = (struct htt_rx_indication_hl *)(&resp->rx_ind_hl); 2918 2919 spin_lock_bh(&ar->data_lock); 2920 peer = ath10k_peer_find_by_id(ar, peer_id); 2921 if (!peer) { 2922 ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer: %u\n", peer_id); 2923 goto err; 2924 } 2925 2926 num_mpdu_ranges = MS(__le32_to_cpu(rx_hl->hdr.info1), 2927 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); 2928 2929 tot_hdr_len = sizeof(struct htt_resp_hdr) + 2930 sizeof(rx_hl->hdr) + 2931 sizeof(rx_hl->ppdu) + 2932 sizeof(rx_hl->prefix) + 2933 sizeof(rx_hl->fw_desc) + 2934 sizeof(struct htt_rx_indication_mpdu_range) * num_mpdu_ranges; 2935 2936 tid = MS(rx_hl->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID); 2937 rx_desc = (struct htt_hl_rx_desc *)(skb->data + tot_hdr_len); 2938 rx_desc_info = __le32_to_cpu(rx_desc->info); 2939 2940 hdr = (struct ieee80211_hdr *)((u8 *)rx_desc + rx_hl->fw_desc.len); 2941 2942 if (is_multicast_ether_addr(hdr->addr1)) { 2943 /* Discard the fragment with multicast DA */ 2944 goto err; 2945 } 2946 2947 if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED)) { 2948 spin_unlock_bh(&ar->data_lock); 2949 return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb, 2950 HTT_RX_NON_PN_CHECK, 2951 HTT_RX_NON_TKIP_MIC); 2952 } 2953 2954 if (ieee80211_has_retry(hdr->frame_control)) 2955 goto err; 2956 2957 hdr_space = ieee80211_hdrlen(hdr->frame_control); 2958 sc = __le16_to_cpu(hdr->seq_ctrl); 2959 seq = (sc & IEEE80211_SCTL_SEQ) >> 4; 2960 frag = sc & IEEE80211_SCTL_FRAG; 2961 2962 sec_index = MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST) ? 2963 HTT_TXRX_SEC_MCAST : HTT_TXRX_SEC_UCAST; 2964 sec_type = peer->rx_pn[sec_index].sec_type; 2965 ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len); 2966 2967 switch (sec_type) { 2968 case HTT_SECURITY_TKIP: 2969 tkip_mic = HTT_RX_TKIP_MIC; 2970 ret = ath10k_htt_rx_frag_tkip_decap_withmic(skb, 2971 tot_hdr_len + 2972 rx_hl->fw_desc.len, 2973 hdr_space); 2974 if (ret) 2975 goto err; 2976 break; 2977 case HTT_SECURITY_TKIP_NOMIC: 2978 ret = ath10k_htt_rx_frag_tkip_decap_nomic(skb, 2979 tot_hdr_len + 2980 rx_hl->fw_desc.len, 2981 hdr_space); 2982 if (ret) 2983 goto err; 2984 break; 2985 case HTT_SECURITY_AES_CCMP: 2986 ret = ath10k_htt_rx_frag_ccmp_decap(skb, 2987 tot_hdr_len + rx_hl->fw_desc.len, 2988 hdr_space); 2989 if (ret) 2990 goto err; 2991 break; 2992 case HTT_SECURITY_WEP128: 2993 case HTT_SECURITY_WEP104: 2994 case HTT_SECURITY_WEP40: 2995 ret = ath10k_htt_rx_frag_wep_decap(skb, 2996 tot_hdr_len + rx_hl->fw_desc.len, 2997 hdr_space); 2998 if (ret) 2999 goto err; 3000 break; 3001 default: 3002 break; 3003 } 3004 3005 resp = (struct htt_resp *)(skb->data); 3006 3007 if (sec_type != HTT_SECURITY_AES_CCMP && 3008 sec_type != HTT_SECURITY_TKIP && 3009 sec_type != HTT_SECURITY_TKIP_NOMIC) { 3010 spin_unlock_bh(&ar->data_lock); 3011 return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb, 3012 HTT_RX_NON_PN_CHECK, 3013 HTT_RX_NON_TKIP_MIC); 3014 } 3015 3016 last_pn = &peer->frag_tids_last_pn[tid]; 3017 3018 if (frag == 0) { 3019 if (ath10k_htt_rx_pn_check_replay_hl(ar, peer, &resp->rx_ind_hl)) 3020 goto err; 3021 3022 last_pn->pn48 = new_pn.pn48; 3023 peer->frag_tids_seq[tid] = seq; 3024 } else if (sec_type == HTT_SECURITY_AES_CCMP) { 3025 if (seq != peer->frag_tids_seq[tid]) 3026 goto err; 3027 3028 if (new_pn.pn48 != last_pn->pn48 + 1) 3029 goto err; 3030 3031 last_pn->pn48 = new_pn.pn48; 3032 last_pn = &peer->tids_last_pn[tid]; 3033 last_pn->pn48 = new_pn.pn48; 3034 } 3035 3036 spin_unlock_bh(&ar->data_lock); 3037 3038 return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb, 3039 HTT_RX_NON_PN_CHECK, tkip_mic); 3040 3041 err: 3042 spin_unlock_bh(&ar->data_lock); 3043 3044 /* Tell the caller that it must free the skb since we have not 3045 * consumed it 3046 */ 3047 return true; 3048 } 3049 3050 static void ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt *htt, 3051 struct htt_rx_indication *rx) 3052 { 3053 struct ath10k *ar = htt->ar; 3054 struct htt_rx_indication_mpdu_range *mpdu_ranges; 3055 int num_mpdu_ranges; 3056 int i, mpdu_count = 0; 3057 u16 peer_id; 3058 u8 tid; 3059 3060 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), 3061 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); 3062 peer_id = __le16_to_cpu(rx->hdr.peer_id); 3063 tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID); 3064 3065 mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx); 3066 3067 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ", 3068 rx, struct_size(rx, mpdu_ranges, num_mpdu_ranges)); 3069 3070 for (i = 0; i < num_mpdu_ranges; i++) 3071 mpdu_count += mpdu_ranges[i].mpdu_count; 3072 3073 atomic_add(mpdu_count, &htt->num_mpdus_ready); 3074 3075 ath10k_sta_update_rx_tid_stats_ampdu(ar, peer_id, tid, mpdu_ranges, 3076 num_mpdu_ranges); 3077 } 3078 3079 static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar, 3080 struct sk_buff *skb) 3081 { 3082 struct ath10k_htt *htt = &ar->htt; 3083 struct htt_resp *resp = (struct htt_resp *)skb->data; 3084 struct htt_tx_done tx_done = {}; 3085 int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS); 3086 __le16 msdu_id, *msdus; 3087 bool rssi_enabled = false; 3088 u8 msdu_count = 0, num_airtime_records, tid; 3089 int i, htt_pad = 0; 3090 struct htt_data_tx_compl_ppdu_dur *ppdu_info; 3091 struct ath10k_peer *peer; 3092 u16 ppdu_info_offset = 0, peer_id; 3093 u32 tx_duration; 3094 3095 switch (status) { 3096 case HTT_DATA_TX_STATUS_NO_ACK: 3097 tx_done.status = HTT_TX_COMPL_STATE_NOACK; 3098 break; 3099 case HTT_DATA_TX_STATUS_OK: 3100 tx_done.status = HTT_TX_COMPL_STATE_ACK; 3101 break; 3102 case HTT_DATA_TX_STATUS_DISCARD: 3103 case HTT_DATA_TX_STATUS_POSTPONE: 3104 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL: 3105 tx_done.status = HTT_TX_COMPL_STATE_DISCARD; 3106 break; 3107 default: 3108 ath10k_warn(ar, "unhandled tx completion status %d\n", status); 3109 tx_done.status = HTT_TX_COMPL_STATE_DISCARD; 3110 break; 3111 } 3112 3113 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n", 3114 resp->data_tx_completion.num_msdus); 3115 3116 msdu_count = resp->data_tx_completion.num_msdus; 3117 msdus = resp->data_tx_completion.msdus; 3118 rssi_enabled = ath10k_is_rssi_enable(&ar->hw_params, resp); 3119 3120 if (rssi_enabled) 3121 htt_pad = ath10k_tx_data_rssi_get_pad_bytes(&ar->hw_params, 3122 resp); 3123 3124 for (i = 0; i < msdu_count; i++) { 3125 msdu_id = msdus[i]; 3126 tx_done.msdu_id = __le16_to_cpu(msdu_id); 3127 3128 if (rssi_enabled) { 3129 /* Total no of MSDUs should be even, 3130 * if odd MSDUs are sent firmware fills 3131 * last msdu id with 0xffff 3132 */ 3133 if (msdu_count & 0x01) { 3134 msdu_id = msdus[msdu_count + i + 1 + htt_pad]; 3135 tx_done.ack_rssi = __le16_to_cpu(msdu_id); 3136 } else { 3137 msdu_id = msdus[msdu_count + i + htt_pad]; 3138 tx_done.ack_rssi = __le16_to_cpu(msdu_id); 3139 } 3140 } 3141 3142 /* kfifo_put: In practice firmware shouldn't fire off per-CE 3143 * interrupt and main interrupt (MSI/-X range case) for the same 3144 * HTC service so it should be safe to use kfifo_put w/o lock. 3145 * 3146 * From kfifo_put() documentation: 3147 * Note that with only one concurrent reader and one concurrent 3148 * writer, you don't need extra locking to use these macro. 3149 */ 3150 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) { 3151 ath10k_txrx_tx_unref(htt, &tx_done); 3152 } else if (!kfifo_put(&htt->txdone_fifo, tx_done)) { 3153 ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n", 3154 tx_done.msdu_id, tx_done.status); 3155 ath10k_txrx_tx_unref(htt, &tx_done); 3156 } 3157 } 3158 3159 if (!(resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT)) 3160 return; 3161 3162 ppdu_info_offset = (msdu_count & 0x01) ? msdu_count + 1 : msdu_count; 3163 3164 if (rssi_enabled) 3165 ppdu_info_offset += ppdu_info_offset; 3166 3167 if (resp->data_tx_completion.flags2 & 3168 (HTT_TX_CMPL_FLAG_PPID_PRESENT | HTT_TX_CMPL_FLAG_PA_PRESENT)) 3169 ppdu_info_offset += 2; 3170 3171 ppdu_info = (struct htt_data_tx_compl_ppdu_dur *)&msdus[ppdu_info_offset]; 3172 num_airtime_records = FIELD_GET(HTT_TX_COMPL_PPDU_DUR_INFO0_NUM_ENTRIES_MASK, 3173 __le32_to_cpu(ppdu_info->info0)); 3174 3175 for (i = 0; i < num_airtime_records; i++) { 3176 struct htt_data_tx_ppdu_dur *ppdu_dur; 3177 u32 info0; 3178 3179 ppdu_dur = &ppdu_info->ppdu_dur[i]; 3180 info0 = __le32_to_cpu(ppdu_dur->info0); 3181 3182 peer_id = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK, 3183 info0); 3184 rcu_read_lock(); 3185 spin_lock_bh(&ar->data_lock); 3186 3187 peer = ath10k_peer_find_by_id(ar, peer_id); 3188 if (!peer || !peer->sta) { 3189 spin_unlock_bh(&ar->data_lock); 3190 rcu_read_unlock(); 3191 continue; 3192 } 3193 3194 tid = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_TID_MASK, info0) & 3195 IEEE80211_QOS_CTL_TID_MASK; 3196 tx_duration = __le32_to_cpu(ppdu_dur->tx_duration); 3197 3198 ieee80211_sta_register_airtime(peer->sta, tid, tx_duration, 0); 3199 3200 spin_unlock_bh(&ar->data_lock); 3201 rcu_read_unlock(); 3202 } 3203 } 3204 3205 static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp) 3206 { 3207 struct htt_rx_addba *ev = &resp->rx_addba; 3208 struct ath10k_peer *peer; 3209 struct ath10k_vif *arvif; 3210 u16 info0, tid, peer_id; 3211 3212 info0 = __le16_to_cpu(ev->info0); 3213 tid = MS(info0, HTT_RX_BA_INFO0_TID); 3214 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID); 3215 3216 ath10k_dbg(ar, ATH10K_DBG_HTT, 3217 "htt rx addba tid %u peer_id %u size %u\n", 3218 tid, peer_id, ev->window_size); 3219 3220 spin_lock_bh(&ar->data_lock); 3221 peer = ath10k_peer_find_by_id(ar, peer_id); 3222 if (!peer) { 3223 ath10k_warn(ar, "received addba event for invalid peer_id: %u\n", 3224 peer_id); 3225 spin_unlock_bh(&ar->data_lock); 3226 return; 3227 } 3228 3229 arvif = ath10k_get_arvif(ar, peer->vdev_id); 3230 if (!arvif) { 3231 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n", 3232 peer->vdev_id); 3233 spin_unlock_bh(&ar->data_lock); 3234 return; 3235 } 3236 3237 ath10k_dbg(ar, ATH10K_DBG_HTT, 3238 "htt rx start rx ba session sta %pM tid %u size %u\n", 3239 peer->addr, tid, ev->window_size); 3240 3241 ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid); 3242 spin_unlock_bh(&ar->data_lock); 3243 } 3244 3245 static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp) 3246 { 3247 struct htt_rx_delba *ev = &resp->rx_delba; 3248 struct ath10k_peer *peer; 3249 struct ath10k_vif *arvif; 3250 u16 info0, tid, peer_id; 3251 3252 info0 = __le16_to_cpu(ev->info0); 3253 tid = MS(info0, HTT_RX_BA_INFO0_TID); 3254 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID); 3255 3256 ath10k_dbg(ar, ATH10K_DBG_HTT, 3257 "htt rx delba tid %u peer_id %u\n", 3258 tid, peer_id); 3259 3260 spin_lock_bh(&ar->data_lock); 3261 peer = ath10k_peer_find_by_id(ar, peer_id); 3262 if (!peer) { 3263 ath10k_warn(ar, "received addba event for invalid peer_id: %u\n", 3264 peer_id); 3265 spin_unlock_bh(&ar->data_lock); 3266 return; 3267 } 3268 3269 arvif = ath10k_get_arvif(ar, peer->vdev_id); 3270 if (!arvif) { 3271 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n", 3272 peer->vdev_id); 3273 spin_unlock_bh(&ar->data_lock); 3274 return; 3275 } 3276 3277 ath10k_dbg(ar, ATH10K_DBG_HTT, 3278 "htt rx stop rx ba session sta %pM tid %u\n", 3279 peer->addr, tid); 3280 3281 ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid); 3282 spin_unlock_bh(&ar->data_lock); 3283 } 3284 3285 static int ath10k_htt_rx_extract_amsdu(struct ath10k_hw_params *hw, 3286 struct sk_buff_head *list, 3287 struct sk_buff_head *amsdu) 3288 { 3289 struct sk_buff *msdu; 3290 struct htt_rx_desc *rxd; 3291 struct rx_msdu_end_common *rxd_msdu_end_common; 3292 3293 if (skb_queue_empty(list)) 3294 return -ENOBUFS; 3295 3296 if (WARN_ON(!skb_queue_empty(amsdu))) 3297 return -EINVAL; 3298 3299 while ((msdu = __skb_dequeue(list))) { 3300 __skb_queue_tail(amsdu, msdu); 3301 3302 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 3303 #if defined(__linux__) 3304 (void *)msdu->data - 3305 #elif defined(__FreeBSD__) 3306 (u8 *)msdu->data - 3307 #endif 3308 hw->rx_desc_ops->rx_desc_size); 3309 3310 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd); 3311 if (rxd_msdu_end_common->info0 & 3312 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)) 3313 break; 3314 } 3315 3316 msdu = skb_peek_tail(amsdu); 3317 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 3318 #if defined(__linux__) 3319 (void *)msdu->data - hw->rx_desc_ops->rx_desc_size); 3320 #elif defined(__FreeBSD__) 3321 (u8 *)msdu->data - hw->rx_desc_ops->rx_desc_size); 3322 #endif 3323 3324 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd); 3325 if (!(rxd_msdu_end_common->info0 & 3326 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) { 3327 skb_queue_splice_init(amsdu, list); 3328 return -EAGAIN; 3329 } 3330 3331 return 0; 3332 } 3333 3334 static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status, 3335 struct sk_buff *skb) 3336 { 3337 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 3338 3339 if (!ieee80211_has_protected(hdr->frame_control)) 3340 return; 3341 3342 /* Offloaded frames are already decrypted but firmware insists they are 3343 * protected in the 802.11 header. Strip the flag. Otherwise mac80211 3344 * will drop the frame. 3345 */ 3346 3347 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 3348 status->flag |= RX_FLAG_DECRYPTED | 3349 RX_FLAG_IV_STRIPPED | 3350 RX_FLAG_MMIC_STRIPPED; 3351 } 3352 3353 static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar, 3354 struct sk_buff_head *list) 3355 { 3356 struct ath10k_htt *htt = &ar->htt; 3357 struct ieee80211_rx_status *status = &htt->rx_status; 3358 struct htt_rx_offload_msdu *rx; 3359 struct sk_buff *msdu; 3360 size_t offset; 3361 3362 while ((msdu = __skb_dequeue(list))) { 3363 /* Offloaded frames don't have Rx descriptor. Instead they have 3364 * a short meta information header. 3365 */ 3366 3367 rx = (void *)msdu->data; 3368 3369 skb_put(msdu, sizeof(*rx)); 3370 skb_pull(msdu, sizeof(*rx)); 3371 3372 if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) { 3373 ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n"); 3374 dev_kfree_skb_any(msdu); 3375 continue; 3376 } 3377 3378 skb_put(msdu, __le16_to_cpu(rx->msdu_len)); 3379 3380 /* Offloaded rx header length isn't multiple of 2 nor 4 so the 3381 * actual payload is unaligned. Align the frame. Otherwise 3382 * mac80211 complains. This shouldn't reduce performance much 3383 * because these offloaded frames are rare. 3384 */ 3385 offset = 4 - ((unsigned long)msdu->data & 3); 3386 skb_put(msdu, offset); 3387 memmove(msdu->data + offset, msdu->data, msdu->len); 3388 skb_pull(msdu, offset); 3389 3390 /* FIXME: The frame is NWifi. Re-construct QoS Control 3391 * if possible later. 3392 */ 3393 3394 memset(status, 0, sizeof(*status)); 3395 status->flag |= RX_FLAG_NO_SIGNAL_VAL; 3396 3397 ath10k_htt_rx_h_rx_offload_prot(status, msdu); 3398 ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id); 3399 ath10k_htt_rx_h_queue_msdu(ar, status, msdu); 3400 } 3401 } 3402 3403 static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) 3404 { 3405 struct ath10k_htt *htt = &ar->htt; 3406 struct htt_resp *resp = (void *)skb->data; 3407 struct ieee80211_rx_status *status = &htt->rx_status; 3408 struct sk_buff_head list; 3409 struct sk_buff_head amsdu; 3410 u16 peer_id; 3411 u16 msdu_count; 3412 u8 vdev_id; 3413 u8 tid; 3414 bool offload; 3415 bool frag; 3416 int ret; 3417 3418 lockdep_assert_held(&htt->rx_ring.lock); 3419 3420 if (htt->rx_confused) 3421 return -EIO; 3422 3423 skb_pull(skb, sizeof(resp->hdr)); 3424 skb_pull(skb, sizeof(resp->rx_in_ord_ind)); 3425 3426 peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id); 3427 msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count); 3428 vdev_id = resp->rx_in_ord_ind.vdev_id; 3429 tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID); 3430 offload = !!(resp->rx_in_ord_ind.info & 3431 HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK); 3432 frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK); 3433 3434 ath10k_dbg(ar, ATH10K_DBG_HTT, 3435 "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n", 3436 vdev_id, peer_id, tid, offload, frag, msdu_count); 3437 3438 if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) { 3439 ath10k_warn(ar, "dropping invalid in order rx indication\n"); 3440 return -EINVAL; 3441 } 3442 3443 /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later 3444 * extracted and processed. 3445 */ 3446 __skb_queue_head_init(&list); 3447 if (ar->hw_params.target_64bit) 3448 ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind, 3449 &list); 3450 else 3451 ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind, 3452 &list); 3453 3454 if (ret < 0) { 3455 ath10k_warn(ar, "failed to pop paddr list: %d\n", ret); 3456 htt->rx_confused = true; 3457 return -EIO; 3458 } 3459 3460 /* Offloaded frames are very different and need to be handled 3461 * separately. 3462 */ 3463 if (offload) 3464 ath10k_htt_rx_h_rx_offload(ar, &list); 3465 3466 while (!skb_queue_empty(&list)) { 3467 __skb_queue_head_init(&amsdu); 3468 ret = ath10k_htt_rx_extract_amsdu(&ar->hw_params, &list, &amsdu); 3469 switch (ret) { 3470 case 0: 3471 /* Note: The in-order indication may report interleaved 3472 * frames from different PPDUs meaning reported rx rate 3473 * to mac80211 isn't accurate/reliable. It's still 3474 * better to report something than nothing though. This 3475 * should still give an idea about rx rate to the user. 3476 */ 3477 ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id); 3478 ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL); 3479 ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL, 3480 NULL, peer_id, frag); 3481 ath10k_htt_rx_h_enqueue(ar, &amsdu, status); 3482 break; 3483 case -EAGAIN: 3484 fallthrough; 3485 default: 3486 /* Should not happen. */ 3487 ath10k_warn(ar, "failed to extract amsdu: %d\n", ret); 3488 htt->rx_confused = true; 3489 __skb_queue_purge(&list); 3490 return -EIO; 3491 } 3492 } 3493 return ret; 3494 } 3495 3496 static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar, 3497 const __le32 *resp_ids, 3498 int num_resp_ids) 3499 { 3500 int i; 3501 u32 resp_id; 3502 3503 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n", 3504 num_resp_ids); 3505 3506 for (i = 0; i < num_resp_ids; i++) { 3507 resp_id = le32_to_cpu(resp_ids[i]); 3508 3509 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n", 3510 resp_id); 3511 3512 /* TODO: free resp_id */ 3513 } 3514 } 3515 3516 static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb) 3517 { 3518 struct ieee80211_hw *hw = ar->hw; 3519 struct ieee80211_txq *txq; 3520 struct htt_resp *resp = (struct htt_resp *)skb->data; 3521 struct htt_tx_fetch_record *record; 3522 size_t len; 3523 size_t max_num_bytes; 3524 size_t max_num_msdus; 3525 size_t num_bytes; 3526 size_t num_msdus; 3527 const __le32 *resp_ids; 3528 u16 num_records; 3529 u16 num_resp_ids; 3530 u16 peer_id; 3531 u8 tid; 3532 int ret; 3533 int i; 3534 bool may_tx; 3535 3536 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n"); 3537 3538 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind); 3539 if (unlikely(skb->len < len)) { 3540 ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n"); 3541 return; 3542 } 3543 3544 num_records = le16_to_cpu(resp->tx_fetch_ind.num_records); 3545 num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids); 3546 3547 len += sizeof(resp->tx_fetch_ind.records[0]) * num_records; 3548 len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids; 3549 3550 if (unlikely(skb->len < len)) { 3551 ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n"); 3552 return; 3553 } 3554 3555 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %u num resps %u seq %u\n", 3556 num_records, num_resp_ids, 3557 le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num)); 3558 3559 if (!ar->htt.tx_q_state.enabled) { 3560 ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n"); 3561 return; 3562 } 3563 3564 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) { 3565 ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n"); 3566 return; 3567 } 3568 3569 rcu_read_lock(); 3570 3571 for (i = 0; i < num_records; i++) { 3572 record = &resp->tx_fetch_ind.records[i]; 3573 peer_id = MS(le16_to_cpu(record->info), 3574 HTT_TX_FETCH_RECORD_INFO_PEER_ID); 3575 tid = MS(le16_to_cpu(record->info), 3576 HTT_TX_FETCH_RECORD_INFO_TID); 3577 max_num_msdus = le16_to_cpu(record->num_msdus); 3578 max_num_bytes = le32_to_cpu(record->num_bytes); 3579 3580 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %u tid %u msdus %zu bytes %zu\n", 3581 i, peer_id, tid, max_num_msdus, max_num_bytes); 3582 3583 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) || 3584 unlikely(tid >= ar->htt.tx_q_state.num_tids)) { 3585 ath10k_warn(ar, "received out of range peer_id %u tid %u\n", 3586 peer_id, tid); 3587 continue; 3588 } 3589 3590 spin_lock_bh(&ar->data_lock); 3591 txq = ath10k_mac_txq_lookup(ar, peer_id, tid); 3592 spin_unlock_bh(&ar->data_lock); 3593 3594 /* It is okay to release the lock and use txq because RCU read 3595 * lock is held. 3596 */ 3597 3598 if (unlikely(!txq)) { 3599 ath10k_warn(ar, "failed to lookup txq for peer_id %u tid %u\n", 3600 peer_id, tid); 3601 continue; 3602 } 3603 3604 num_msdus = 0; 3605 num_bytes = 0; 3606 3607 ieee80211_txq_schedule_start(hw, txq->ac); 3608 may_tx = ieee80211_txq_may_transmit(hw, txq); 3609 while (num_msdus < max_num_msdus && 3610 num_bytes < max_num_bytes) { 3611 if (!may_tx) 3612 break; 3613 3614 ret = ath10k_mac_tx_push_txq(hw, txq); 3615 if (ret < 0) 3616 break; 3617 3618 num_msdus++; 3619 num_bytes += ret; 3620 } 3621 ieee80211_return_txq(hw, txq, false); 3622 ieee80211_txq_schedule_end(hw, txq->ac); 3623 3624 record->num_msdus = cpu_to_le16(num_msdus); 3625 record->num_bytes = cpu_to_le32(num_bytes); 3626 3627 ath10k_htt_tx_txq_recalc(hw, txq); 3628 } 3629 3630 rcu_read_unlock(); 3631 3632 resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind); 3633 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids); 3634 3635 ret = ath10k_htt_tx_fetch_resp(ar, 3636 resp->tx_fetch_ind.token, 3637 resp->tx_fetch_ind.fetch_seq_num, 3638 resp->tx_fetch_ind.records, 3639 num_records); 3640 if (unlikely(ret)) { 3641 ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n", 3642 le32_to_cpu(resp->tx_fetch_ind.token), ret); 3643 /* FIXME: request fw restart */ 3644 } 3645 3646 ath10k_htt_tx_txq_sync(ar); 3647 } 3648 3649 static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar, 3650 struct sk_buff *skb) 3651 { 3652 const struct htt_resp *resp = (void *)skb->data; 3653 size_t len; 3654 int num_resp_ids; 3655 3656 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n"); 3657 3658 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm); 3659 if (unlikely(skb->len < len)) { 3660 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n"); 3661 return; 3662 } 3663 3664 num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids); 3665 len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids; 3666 3667 if (unlikely(skb->len < len)) { 3668 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n"); 3669 return; 3670 } 3671 3672 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, 3673 resp->tx_fetch_confirm.resp_ids, 3674 num_resp_ids); 3675 } 3676 3677 static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar, 3678 struct sk_buff *skb) 3679 { 3680 const struct htt_resp *resp = (void *)skb->data; 3681 const struct htt_tx_mode_switch_record *record; 3682 struct ieee80211_txq *txq; 3683 struct ath10k_txq *artxq; 3684 size_t len; 3685 size_t num_records; 3686 enum htt_tx_mode_switch_mode mode; 3687 bool enable; 3688 u16 info0; 3689 u16 info1; 3690 u16 threshold; 3691 u16 peer_id; 3692 u8 tid; 3693 int i; 3694 3695 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n"); 3696 3697 len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind); 3698 if (unlikely(skb->len < len)) { 3699 ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n"); 3700 return; 3701 } 3702 3703 info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0); 3704 info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1); 3705 3706 enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE); 3707 num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD); 3708 mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE); 3709 threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD); 3710 3711 ath10k_dbg(ar, ATH10K_DBG_HTT, 3712 "htt rx tx mode switch ind info0 0x%04hx info1 0x%04x enable %d num records %zd mode %d threshold %u\n", 3713 info0, info1, enable, num_records, mode, threshold); 3714 3715 len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records; 3716 3717 if (unlikely(skb->len < len)) { 3718 ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n"); 3719 return; 3720 } 3721 3722 switch (mode) { 3723 case HTT_TX_MODE_SWITCH_PUSH: 3724 case HTT_TX_MODE_SWITCH_PUSH_PULL: 3725 break; 3726 default: 3727 ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n", 3728 mode); 3729 return; 3730 } 3731 3732 if (!enable) 3733 return; 3734 3735 ar->htt.tx_q_state.enabled = enable; 3736 ar->htt.tx_q_state.mode = mode; 3737 ar->htt.tx_q_state.num_push_allowed = threshold; 3738 3739 rcu_read_lock(); 3740 3741 for (i = 0; i < num_records; i++) { 3742 record = &resp->tx_mode_switch_ind.records[i]; 3743 info0 = le16_to_cpu(record->info0); 3744 peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID); 3745 tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID); 3746 3747 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) || 3748 unlikely(tid >= ar->htt.tx_q_state.num_tids)) { 3749 ath10k_warn(ar, "received out of range peer_id %u tid %u\n", 3750 peer_id, tid); 3751 continue; 3752 } 3753 3754 spin_lock_bh(&ar->data_lock); 3755 txq = ath10k_mac_txq_lookup(ar, peer_id, tid); 3756 spin_unlock_bh(&ar->data_lock); 3757 3758 /* It is okay to release the lock and use txq because RCU read 3759 * lock is held. 3760 */ 3761 3762 if (unlikely(!txq)) { 3763 ath10k_warn(ar, "failed to lookup txq for peer_id %u tid %u\n", 3764 peer_id, tid); 3765 continue; 3766 } 3767 3768 spin_lock_bh(&ar->htt.tx_lock); 3769 artxq = (void *)txq->drv_priv; 3770 artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus); 3771 spin_unlock_bh(&ar->htt.tx_lock); 3772 } 3773 3774 rcu_read_unlock(); 3775 3776 ath10k_mac_tx_push_pending(ar); 3777 } 3778 3779 void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) 3780 { 3781 bool release; 3782 3783 release = ath10k_htt_t2h_msg_handler(ar, skb); 3784 3785 /* Free the indication buffer */ 3786 if (release) 3787 dev_kfree_skb_any(skb); 3788 } 3789 3790 static inline s8 ath10k_get_legacy_rate_idx(struct ath10k *ar, u8 rate) 3791 { 3792 static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12, 3793 18, 24, 36, 48, 54}; 3794 int i; 3795 3796 for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) { 3797 if (rate == legacy_rates[i]) 3798 return i; 3799 } 3800 3801 ath10k_warn(ar, "Invalid legacy rate %d peer stats", rate); 3802 return -EINVAL; 3803 } 3804 3805 static void 3806 ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar, 3807 struct ath10k_sta *arsta, 3808 struct ath10k_per_peer_tx_stats *pstats, 3809 s8 legacy_rate_idx) 3810 { 3811 struct rate_info *txrate = &arsta->txrate; 3812 struct ath10k_htt_tx_stats *tx_stats; 3813 int idx, ht_idx, gi, mcs, bw, nss; 3814 unsigned long flags; 3815 3816 if (!arsta->tx_stats) 3817 return; 3818 3819 tx_stats = arsta->tx_stats; 3820 flags = txrate->flags; 3821 gi = test_bit(ATH10K_RATE_INFO_FLAGS_SGI_BIT, &flags); 3822 mcs = ATH10K_HW_MCS_RATE(pstats->ratecode); 3823 bw = txrate->bw; 3824 nss = txrate->nss; 3825 ht_idx = mcs + (nss - 1) * 8; 3826 idx = mcs * 8 + 8 * 10 * (nss - 1); 3827 idx += bw * 2 + gi; 3828 3829 #define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name] 3830 3831 if (txrate->flags & RATE_INFO_FLAGS_VHT_MCS) { 3832 STATS_OP_FMT(SUCC).vht[0][mcs] += pstats->succ_bytes; 3833 STATS_OP_FMT(SUCC).vht[1][mcs] += pstats->succ_pkts; 3834 STATS_OP_FMT(FAIL).vht[0][mcs] += pstats->failed_bytes; 3835 STATS_OP_FMT(FAIL).vht[1][mcs] += pstats->failed_pkts; 3836 STATS_OP_FMT(RETRY).vht[0][mcs] += pstats->retry_bytes; 3837 STATS_OP_FMT(RETRY).vht[1][mcs] += pstats->retry_pkts; 3838 } else if (txrate->flags & RATE_INFO_FLAGS_MCS) { 3839 STATS_OP_FMT(SUCC).ht[0][ht_idx] += pstats->succ_bytes; 3840 STATS_OP_FMT(SUCC).ht[1][ht_idx] += pstats->succ_pkts; 3841 STATS_OP_FMT(FAIL).ht[0][ht_idx] += pstats->failed_bytes; 3842 STATS_OP_FMT(FAIL).ht[1][ht_idx] += pstats->failed_pkts; 3843 STATS_OP_FMT(RETRY).ht[0][ht_idx] += pstats->retry_bytes; 3844 STATS_OP_FMT(RETRY).ht[1][ht_idx] += pstats->retry_pkts; 3845 } else { 3846 mcs = legacy_rate_idx; 3847 3848 STATS_OP_FMT(SUCC).legacy[0][mcs] += pstats->succ_bytes; 3849 STATS_OP_FMT(SUCC).legacy[1][mcs] += pstats->succ_pkts; 3850 STATS_OP_FMT(FAIL).legacy[0][mcs] += pstats->failed_bytes; 3851 STATS_OP_FMT(FAIL).legacy[1][mcs] += pstats->failed_pkts; 3852 STATS_OP_FMT(RETRY).legacy[0][mcs] += pstats->retry_bytes; 3853 STATS_OP_FMT(RETRY).legacy[1][mcs] += pstats->retry_pkts; 3854 } 3855 3856 if (ATH10K_HW_AMPDU(pstats->flags)) { 3857 tx_stats->ba_fails += ATH10K_HW_BA_FAIL(pstats->flags); 3858 3859 if (txrate->flags & RATE_INFO_FLAGS_MCS) { 3860 STATS_OP_FMT(AMPDU).ht[0][ht_idx] += 3861 pstats->succ_bytes + pstats->retry_bytes; 3862 STATS_OP_FMT(AMPDU).ht[1][ht_idx] += 3863 pstats->succ_pkts + pstats->retry_pkts; 3864 } else { 3865 STATS_OP_FMT(AMPDU).vht[0][mcs] += 3866 pstats->succ_bytes + pstats->retry_bytes; 3867 STATS_OP_FMT(AMPDU).vht[1][mcs] += 3868 pstats->succ_pkts + pstats->retry_pkts; 3869 } 3870 STATS_OP_FMT(AMPDU).bw[0][bw] += 3871 pstats->succ_bytes + pstats->retry_bytes; 3872 STATS_OP_FMT(AMPDU).nss[0][nss - 1] += 3873 pstats->succ_bytes + pstats->retry_bytes; 3874 STATS_OP_FMT(AMPDU).gi[0][gi] += 3875 pstats->succ_bytes + pstats->retry_bytes; 3876 STATS_OP_FMT(AMPDU).rate_table[0][idx] += 3877 pstats->succ_bytes + pstats->retry_bytes; 3878 STATS_OP_FMT(AMPDU).bw[1][bw] += 3879 pstats->succ_pkts + pstats->retry_pkts; 3880 STATS_OP_FMT(AMPDU).nss[1][nss - 1] += 3881 pstats->succ_pkts + pstats->retry_pkts; 3882 STATS_OP_FMT(AMPDU).gi[1][gi] += 3883 pstats->succ_pkts + pstats->retry_pkts; 3884 STATS_OP_FMT(AMPDU).rate_table[1][idx] += 3885 pstats->succ_pkts + pstats->retry_pkts; 3886 } else { 3887 tx_stats->ack_fails += 3888 ATH10K_HW_BA_FAIL(pstats->flags); 3889 } 3890 3891 STATS_OP_FMT(SUCC).bw[0][bw] += pstats->succ_bytes; 3892 STATS_OP_FMT(SUCC).nss[0][nss - 1] += pstats->succ_bytes; 3893 STATS_OP_FMT(SUCC).gi[0][gi] += pstats->succ_bytes; 3894 3895 STATS_OP_FMT(SUCC).bw[1][bw] += pstats->succ_pkts; 3896 STATS_OP_FMT(SUCC).nss[1][nss - 1] += pstats->succ_pkts; 3897 STATS_OP_FMT(SUCC).gi[1][gi] += pstats->succ_pkts; 3898 3899 STATS_OP_FMT(FAIL).bw[0][bw] += pstats->failed_bytes; 3900 STATS_OP_FMT(FAIL).nss[0][nss - 1] += pstats->failed_bytes; 3901 STATS_OP_FMT(FAIL).gi[0][gi] += pstats->failed_bytes; 3902 3903 STATS_OP_FMT(FAIL).bw[1][bw] += pstats->failed_pkts; 3904 STATS_OP_FMT(FAIL).nss[1][nss - 1] += pstats->failed_pkts; 3905 STATS_OP_FMT(FAIL).gi[1][gi] += pstats->failed_pkts; 3906 3907 STATS_OP_FMT(RETRY).bw[0][bw] += pstats->retry_bytes; 3908 STATS_OP_FMT(RETRY).nss[0][nss - 1] += pstats->retry_bytes; 3909 STATS_OP_FMT(RETRY).gi[0][gi] += pstats->retry_bytes; 3910 3911 STATS_OP_FMT(RETRY).bw[1][bw] += pstats->retry_pkts; 3912 STATS_OP_FMT(RETRY).nss[1][nss - 1] += pstats->retry_pkts; 3913 STATS_OP_FMT(RETRY).gi[1][gi] += pstats->retry_pkts; 3914 3915 if (txrate->flags >= RATE_INFO_FLAGS_MCS) { 3916 STATS_OP_FMT(SUCC).rate_table[0][idx] += pstats->succ_bytes; 3917 STATS_OP_FMT(SUCC).rate_table[1][idx] += pstats->succ_pkts; 3918 STATS_OP_FMT(FAIL).rate_table[0][idx] += pstats->failed_bytes; 3919 STATS_OP_FMT(FAIL).rate_table[1][idx] += pstats->failed_pkts; 3920 STATS_OP_FMT(RETRY).rate_table[0][idx] += pstats->retry_bytes; 3921 STATS_OP_FMT(RETRY).rate_table[1][idx] += pstats->retry_pkts; 3922 } 3923 3924 tx_stats->tx_duration += pstats->duration; 3925 } 3926 3927 static void 3928 ath10k_update_per_peer_tx_stats(struct ath10k *ar, 3929 struct ieee80211_sta *sta, 3930 struct ath10k_per_peer_tx_stats *peer_stats) 3931 { 3932 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 3933 struct ieee80211_chanctx_conf *conf = NULL; 3934 u8 rate = 0, sgi; 3935 s8 rate_idx = 0; 3936 bool skip_auto_rate; 3937 struct rate_info txrate; 3938 3939 lockdep_assert_held(&ar->data_lock); 3940 3941 txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode); 3942 txrate.bw = ATH10K_HW_BW(peer_stats->flags); 3943 txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode); 3944 txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode); 3945 sgi = ATH10K_HW_GI(peer_stats->flags); 3946 skip_auto_rate = ATH10K_FW_SKIPPED_RATE_CTRL(peer_stats->flags); 3947 3948 /* Firmware's rate control skips broadcast/management frames, 3949 * if host has configure fixed rates and in some other special cases. 3950 */ 3951 if (skip_auto_rate) 3952 return; 3953 3954 if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) { 3955 ath10k_warn(ar, "Invalid VHT mcs %d peer stats", txrate.mcs); 3956 return; 3957 } 3958 3959 if (txrate.flags == WMI_RATE_PREAMBLE_HT && 3960 (txrate.mcs > 7 || txrate.nss < 1)) { 3961 ath10k_warn(ar, "Invalid HT mcs %d nss %d peer stats", 3962 txrate.mcs, txrate.nss); 3963 return; 3964 } 3965 3966 memset(&arsta->txrate, 0, sizeof(arsta->txrate)); 3967 memset(&arsta->tx_info.status, 0, sizeof(arsta->tx_info.status)); 3968 if (txrate.flags == WMI_RATE_PREAMBLE_CCK || 3969 txrate.flags == WMI_RATE_PREAMBLE_OFDM) { 3970 rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode); 3971 /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */ 3972 if (rate == 6 && txrate.flags == WMI_RATE_PREAMBLE_CCK) 3973 rate = 5; 3974 rate_idx = ath10k_get_legacy_rate_idx(ar, rate); 3975 if (rate_idx < 0) 3976 return; 3977 arsta->txrate.legacy = rate; 3978 } else if (txrate.flags == WMI_RATE_PREAMBLE_HT) { 3979 arsta->txrate.flags = RATE_INFO_FLAGS_MCS; 3980 arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1); 3981 } else { 3982 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; 3983 arsta->txrate.mcs = txrate.mcs; 3984 } 3985 3986 switch (txrate.flags) { 3987 case WMI_RATE_PREAMBLE_OFDM: 3988 if (arsta->arvif && arsta->arvif->vif) 3989 conf = rcu_dereference(arsta->arvif->vif->chanctx_conf); 3990 if (conf && conf->def.chan->band == NL80211_BAND_5GHZ) 3991 arsta->tx_info.status.rates[0].idx = rate_idx - 4; 3992 break; 3993 case WMI_RATE_PREAMBLE_CCK: 3994 arsta->tx_info.status.rates[0].idx = rate_idx; 3995 if (sgi) 3996 arsta->tx_info.status.rates[0].flags |= 3997 (IEEE80211_TX_RC_USE_SHORT_PREAMBLE | 3998 IEEE80211_TX_RC_SHORT_GI); 3999 break; 4000 case WMI_RATE_PREAMBLE_HT: 4001 arsta->tx_info.status.rates[0].idx = 4002 txrate.mcs + ((txrate.nss - 1) * 8); 4003 if (sgi) 4004 arsta->tx_info.status.rates[0].flags |= 4005 IEEE80211_TX_RC_SHORT_GI; 4006 arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_MCS; 4007 break; 4008 case WMI_RATE_PREAMBLE_VHT: 4009 ieee80211_rate_set_vht(&arsta->tx_info.status.rates[0], 4010 txrate.mcs, txrate.nss); 4011 if (sgi) 4012 arsta->tx_info.status.rates[0].flags |= 4013 IEEE80211_TX_RC_SHORT_GI; 4014 arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_VHT_MCS; 4015 break; 4016 } 4017 4018 arsta->txrate.nss = txrate.nss; 4019 arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw); 4020 arsta->last_tx_bitrate = cfg80211_calculate_bitrate(&arsta->txrate); 4021 if (sgi) 4022 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 4023 4024 switch (arsta->txrate.bw) { 4025 case RATE_INFO_BW_40: 4026 arsta->tx_info.status.rates[0].flags |= 4027 IEEE80211_TX_RC_40_MHZ_WIDTH; 4028 break; 4029 case RATE_INFO_BW_80: 4030 arsta->tx_info.status.rates[0].flags |= 4031 IEEE80211_TX_RC_80_MHZ_WIDTH; 4032 break; 4033 } 4034 4035 if (peer_stats->succ_pkts) { 4036 arsta->tx_info.flags = IEEE80211_TX_STAT_ACK; 4037 arsta->tx_info.status.rates[0].count = 1; 4038 ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info); 4039 } 4040 4041 if (ar->htt.disable_tx_comp) { 4042 arsta->tx_failed += peer_stats->failed_pkts; 4043 ath10k_dbg(ar, ATH10K_DBG_HTT, "tx failed %d\n", 4044 arsta->tx_failed); 4045 } 4046 4047 arsta->tx_retries += peer_stats->retry_pkts; 4048 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx retries %d", arsta->tx_retries); 4049 4050 if (ath10k_debug_is_extd_tx_stats_enabled(ar)) 4051 ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats, 4052 rate_idx); 4053 } 4054 4055 static void ath10k_htt_fetch_peer_stats(struct ath10k *ar, 4056 struct sk_buff *skb) 4057 { 4058 struct htt_resp *resp = (struct htt_resp *)skb->data; 4059 struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats; 4060 struct htt_per_peer_tx_stats_ind *tx_stats; 4061 struct ieee80211_sta *sta; 4062 struct ath10k_peer *peer; 4063 int peer_id, i; 4064 u8 ppdu_len, num_ppdu; 4065 4066 num_ppdu = resp->peer_tx_stats.num_ppdu; 4067 ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32); 4068 4069 if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) { 4070 ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len); 4071 return; 4072 } 4073 4074 tx_stats = (struct htt_per_peer_tx_stats_ind *) 4075 (resp->peer_tx_stats.payload); 4076 peer_id = __le16_to_cpu(tx_stats->peer_id); 4077 4078 rcu_read_lock(); 4079 spin_lock_bh(&ar->data_lock); 4080 peer = ath10k_peer_find_by_id(ar, peer_id); 4081 if (!peer || !peer->sta) { 4082 ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n", 4083 peer_id); 4084 goto out; 4085 } 4086 4087 sta = peer->sta; 4088 for (i = 0; i < num_ppdu; i++) { 4089 tx_stats = (struct htt_per_peer_tx_stats_ind *) 4090 (resp->peer_tx_stats.payload + i * ppdu_len); 4091 4092 p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes); 4093 p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes); 4094 p_tx_stats->failed_bytes = 4095 __le32_to_cpu(tx_stats->failed_bytes); 4096 p_tx_stats->ratecode = tx_stats->ratecode; 4097 p_tx_stats->flags = tx_stats->flags; 4098 p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts); 4099 p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts); 4100 p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts); 4101 p_tx_stats->duration = __le16_to_cpu(tx_stats->tx_duration); 4102 4103 ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats); 4104 } 4105 4106 out: 4107 spin_unlock_bh(&ar->data_lock); 4108 rcu_read_unlock(); 4109 } 4110 4111 static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data) 4112 { 4113 struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data; 4114 struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats; 4115 struct ath10k_10_2_peer_tx_stats *tx_stats; 4116 struct ieee80211_sta *sta; 4117 struct ath10k_peer *peer; 4118 u16 log_type = __le16_to_cpu(hdr->log_type); 4119 u32 peer_id = 0, i; 4120 4121 if (log_type != ATH_PKTLOG_TYPE_TX_STAT) 4122 return; 4123 4124 tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) + 4125 ATH10K_10_2_TX_STATS_OFFSET); 4126 4127 if (!tx_stats->tx_ppdu_cnt) 4128 return; 4129 4130 peer_id = tx_stats->peer_id; 4131 4132 rcu_read_lock(); 4133 spin_lock_bh(&ar->data_lock); 4134 peer = ath10k_peer_find_by_id(ar, peer_id); 4135 if (!peer || !peer->sta) { 4136 ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n", 4137 peer_id); 4138 goto out; 4139 } 4140 4141 sta = peer->sta; 4142 for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) { 4143 p_tx_stats->succ_bytes = 4144 __le16_to_cpu(tx_stats->success_bytes[i]); 4145 p_tx_stats->retry_bytes = 4146 __le16_to_cpu(tx_stats->retry_bytes[i]); 4147 p_tx_stats->failed_bytes = 4148 __le16_to_cpu(tx_stats->failed_bytes[i]); 4149 p_tx_stats->ratecode = tx_stats->ratecode[i]; 4150 p_tx_stats->flags = tx_stats->flags[i]; 4151 p_tx_stats->succ_pkts = tx_stats->success_pkts[i]; 4152 p_tx_stats->retry_pkts = tx_stats->retry_pkts[i]; 4153 p_tx_stats->failed_pkts = tx_stats->failed_pkts[i]; 4154 4155 ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats); 4156 } 4157 spin_unlock_bh(&ar->data_lock); 4158 rcu_read_unlock(); 4159 4160 return; 4161 4162 out: 4163 spin_unlock_bh(&ar->data_lock); 4164 rcu_read_unlock(); 4165 } 4166 4167 static int ath10k_htt_rx_pn_len(enum htt_security_types sec_type) 4168 { 4169 switch (sec_type) { 4170 case HTT_SECURITY_TKIP: 4171 case HTT_SECURITY_TKIP_NOMIC: 4172 case HTT_SECURITY_AES_CCMP: 4173 return 48; 4174 default: 4175 return 0; 4176 } 4177 } 4178 4179 static void ath10k_htt_rx_sec_ind_handler(struct ath10k *ar, 4180 struct htt_security_indication *ev) 4181 { 4182 enum htt_txrx_sec_cast_type sec_index; 4183 enum htt_security_types sec_type; 4184 struct ath10k_peer *peer; 4185 4186 spin_lock_bh(&ar->data_lock); 4187 4188 peer = ath10k_peer_find_by_id(ar, __le16_to_cpu(ev->peer_id)); 4189 if (!peer) { 4190 ath10k_warn(ar, "failed to find peer id %d for security indication", 4191 __le16_to_cpu(ev->peer_id)); 4192 goto out; 4193 } 4194 4195 sec_type = MS(ev->flags, HTT_SECURITY_TYPE); 4196 4197 if (ev->flags & HTT_SECURITY_IS_UNICAST) 4198 sec_index = HTT_TXRX_SEC_UCAST; 4199 else 4200 sec_index = HTT_TXRX_SEC_MCAST; 4201 4202 peer->rx_pn[sec_index].sec_type = sec_type; 4203 peer->rx_pn[sec_index].pn_len = ath10k_htt_rx_pn_len(sec_type); 4204 4205 memset(peer->tids_last_pn_valid, 0, sizeof(peer->tids_last_pn_valid)); 4206 memset(peer->tids_last_pn, 0, sizeof(peer->tids_last_pn)); 4207 4208 out: 4209 spin_unlock_bh(&ar->data_lock); 4210 } 4211 4212 bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) 4213 { 4214 struct ath10k_htt *htt = &ar->htt; 4215 struct htt_resp *resp = (struct htt_resp *)skb->data; 4216 enum htt_t2h_msg_type type; 4217 4218 /* confirm alignment */ 4219 if (!IS_ALIGNED((unsigned long)skb->data, 4)) 4220 ath10k_warn(ar, "unaligned htt message, expect trouble\n"); 4221 4222 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n", 4223 resp->hdr.msg_type); 4224 4225 if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) { 4226 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X", 4227 resp->hdr.msg_type, ar->htt.t2h_msg_types_max); 4228 return true; 4229 } 4230 type = ar->htt.t2h_msg_types[resp->hdr.msg_type]; 4231 4232 switch (type) { 4233 case HTT_T2H_MSG_TYPE_VERSION_CONF: { 4234 htt->target_version_major = resp->ver_resp.major; 4235 htt->target_version_minor = resp->ver_resp.minor; 4236 complete(&htt->target_version_received); 4237 break; 4238 } 4239 case HTT_T2H_MSG_TYPE_RX_IND: 4240 if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) { 4241 ath10k_htt_rx_proc_rx_ind_ll(htt, &resp->rx_ind); 4242 } else { 4243 skb_queue_tail(&htt->rx_indication_head, skb); 4244 return false; 4245 } 4246 break; 4247 case HTT_T2H_MSG_TYPE_PEER_MAP: { 4248 struct htt_peer_map_event ev = { 4249 .vdev_id = resp->peer_map.vdev_id, 4250 .peer_id = __le16_to_cpu(resp->peer_map.peer_id), 4251 }; 4252 memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr)); 4253 ath10k_peer_map_event(htt, &ev); 4254 break; 4255 } 4256 case HTT_T2H_MSG_TYPE_PEER_UNMAP: { 4257 struct htt_peer_unmap_event ev = { 4258 .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id), 4259 }; 4260 ath10k_peer_unmap_event(htt, &ev); 4261 break; 4262 } 4263 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: { 4264 struct htt_tx_done tx_done = {}; 4265 struct ath10k_htt *htt = &ar->htt; 4266 struct ath10k_htc *htc = &ar->htc; 4267 struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid]; 4268 int status = __le32_to_cpu(resp->mgmt_tx_completion.status); 4269 int info = __le32_to_cpu(resp->mgmt_tx_completion.info); 4270 4271 tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id); 4272 4273 switch (status) { 4274 case HTT_MGMT_TX_STATUS_OK: 4275 tx_done.status = HTT_TX_COMPL_STATE_ACK; 4276 if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, 4277 ar->wmi.svc_map) && 4278 (resp->mgmt_tx_completion.flags & 4279 HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI)) { 4280 tx_done.ack_rssi = 4281 FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK, 4282 info); 4283 } 4284 break; 4285 case HTT_MGMT_TX_STATUS_RETRY: 4286 tx_done.status = HTT_TX_COMPL_STATE_NOACK; 4287 break; 4288 case HTT_MGMT_TX_STATUS_DROP: 4289 tx_done.status = HTT_TX_COMPL_STATE_DISCARD; 4290 break; 4291 } 4292 4293 if (htt->disable_tx_comp) { 4294 spin_lock_bh(&htc->tx_lock); 4295 ep->tx_credits++; 4296 spin_unlock_bh(&htc->tx_lock); 4297 } 4298 4299 status = ath10k_txrx_tx_unref(htt, &tx_done); 4300 if (!status) { 4301 spin_lock_bh(&htt->tx_lock); 4302 ath10k_htt_tx_mgmt_dec_pending(htt); 4303 spin_unlock_bh(&htt->tx_lock); 4304 } 4305 break; 4306 } 4307 case HTT_T2H_MSG_TYPE_TX_COMPL_IND: 4308 ath10k_htt_rx_tx_compl_ind(htt->ar, skb); 4309 break; 4310 case HTT_T2H_MSG_TYPE_SEC_IND: { 4311 struct ath10k *ar = htt->ar; 4312 struct htt_security_indication *ev = &resp->security_indication; 4313 4314 ath10k_htt_rx_sec_ind_handler(ar, ev); 4315 ath10k_dbg(ar, ATH10K_DBG_HTT, 4316 "sec ind peer_id %d unicast %d type %d\n", 4317 __le16_to_cpu(ev->peer_id), 4318 !!(ev->flags & HTT_SECURITY_IS_UNICAST), 4319 MS(ev->flags, HTT_SECURITY_TYPE)); 4320 complete(&ar->install_key_done); 4321 break; 4322 } 4323 case HTT_T2H_MSG_TYPE_RX_FRAG_IND: { 4324 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", 4325 skb->data, skb->len); 4326 atomic_inc(&htt->num_mpdus_ready); 4327 4328 return ath10k_htt_rx_proc_rx_frag_ind(htt, 4329 &resp->rx_frag_ind, 4330 skb); 4331 } 4332 case HTT_T2H_MSG_TYPE_TEST: 4333 break; 4334 case HTT_T2H_MSG_TYPE_STATS_CONF: 4335 trace_ath10k_htt_stats(ar, skb->data, skb->len); 4336 break; 4337 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND: 4338 /* Firmware can return tx frames if it's unable to fully 4339 * process them and suspects host may be able to fix it. ath10k 4340 * sends all tx frames as already inspected so this shouldn't 4341 * happen unless fw has a bug. 4342 */ 4343 ath10k_warn(ar, "received an unexpected htt tx inspect event\n"); 4344 break; 4345 case HTT_T2H_MSG_TYPE_RX_ADDBA: 4346 ath10k_htt_rx_addba(ar, resp); 4347 break; 4348 case HTT_T2H_MSG_TYPE_RX_DELBA: 4349 ath10k_htt_rx_delba(ar, resp); 4350 break; 4351 case HTT_T2H_MSG_TYPE_PKTLOG: { 4352 trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload, 4353 skb->len - 4354 offsetof(struct htt_resp, 4355 pktlog_msg.payload)); 4356 4357 if (ath10k_peer_stats_enabled(ar)) 4358 ath10k_fetch_10_2_tx_stats(ar, 4359 resp->pktlog_msg.payload); 4360 break; 4361 } 4362 case HTT_T2H_MSG_TYPE_RX_FLUSH: { 4363 /* Ignore this event because mac80211 takes care of Rx 4364 * aggregation reordering. 4365 */ 4366 break; 4367 } 4368 case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: { 4369 skb_queue_tail(&htt->rx_in_ord_compl_q, skb); 4370 return false; 4371 } 4372 case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: { 4373 struct ath10k_htt *htt = &ar->htt; 4374 struct ath10k_htc *htc = &ar->htc; 4375 struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid]; 4376 u32 msg_word = __le32_to_cpu(*(__le32 *)resp); 4377 int htt_credit_delta; 4378 4379 htt_credit_delta = HTT_TX_CREDIT_DELTA_ABS_GET(msg_word); 4380 if (HTT_TX_CREDIT_SIGN_BIT_GET(msg_word)) 4381 htt_credit_delta = -htt_credit_delta; 4382 4383 ath10k_dbg(ar, ATH10K_DBG_HTT, 4384 "htt credit update delta %d\n", 4385 htt_credit_delta); 4386 4387 if (htt->disable_tx_comp) { 4388 spin_lock_bh(&htc->tx_lock); 4389 ep->tx_credits += htt_credit_delta; 4390 spin_unlock_bh(&htc->tx_lock); 4391 ath10k_dbg(ar, ATH10K_DBG_HTT, 4392 "htt credit total %d\n", 4393 ep->tx_credits); 4394 ep->ep_ops.ep_tx_credits(htc->ar); 4395 } 4396 break; 4397 } 4398 case HTT_T2H_MSG_TYPE_CHAN_CHANGE: { 4399 u32 phymode = __le32_to_cpu(resp->chan_change.phymode); 4400 u32 freq = __le32_to_cpu(resp->chan_change.freq); 4401 4402 ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq); 4403 ath10k_dbg(ar, ATH10K_DBG_HTT, 4404 "htt chan change freq %u phymode %s\n", 4405 freq, ath10k_wmi_phymode_str(phymode)); 4406 break; 4407 } 4408 case HTT_T2H_MSG_TYPE_AGGR_CONF: 4409 break; 4410 case HTT_T2H_MSG_TYPE_TX_FETCH_IND: { 4411 struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC); 4412 4413 if (!tx_fetch_ind) { 4414 ath10k_warn(ar, "failed to copy htt tx fetch ind\n"); 4415 break; 4416 } 4417 skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind); 4418 break; 4419 } 4420 case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM: 4421 ath10k_htt_rx_tx_fetch_confirm(ar, skb); 4422 break; 4423 case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND: 4424 ath10k_htt_rx_tx_mode_switch_ind(ar, skb); 4425 break; 4426 case HTT_T2H_MSG_TYPE_PEER_STATS: 4427 ath10k_htt_fetch_peer_stats(ar, skb); 4428 break; 4429 case HTT_T2H_MSG_TYPE_EN_STATS: 4430 default: 4431 ath10k_warn(ar, "htt event (%d) not handled\n", 4432 resp->hdr.msg_type); 4433 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", 4434 skb->data, skb->len); 4435 break; 4436 } 4437 return true; 4438 } 4439 EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler); 4440 4441 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar, 4442 struct sk_buff *skb) 4443 { 4444 trace_ath10k_htt_pktlog(ar, skb->data, skb->len); 4445 dev_kfree_skb_any(skb); 4446 } 4447 EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler); 4448 4449 static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget) 4450 { 4451 struct sk_buff *skb; 4452 4453 while (quota < budget) { 4454 if (skb_queue_empty(&ar->htt.rx_msdus_q)) 4455 break; 4456 4457 skb = skb_dequeue(&ar->htt.rx_msdus_q); 4458 if (!skb) 4459 break; 4460 ath10k_process_rx(ar, skb); 4461 quota++; 4462 } 4463 4464 return quota; 4465 } 4466 4467 int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget) 4468 { 4469 struct htt_resp *resp; 4470 struct ath10k_htt *htt = &ar->htt; 4471 struct sk_buff *skb; 4472 bool release; 4473 int quota; 4474 4475 for (quota = 0; quota < budget; quota++) { 4476 skb = skb_dequeue(&htt->rx_indication_head); 4477 if (!skb) 4478 break; 4479 4480 resp = (struct htt_resp *)skb->data; 4481 4482 release = ath10k_htt_rx_proc_rx_ind_hl(htt, 4483 &resp->rx_ind_hl, 4484 skb, 4485 HTT_RX_PN_CHECK, 4486 HTT_RX_NON_TKIP_MIC); 4487 4488 if (release) 4489 dev_kfree_skb_any(skb); 4490 4491 ath10k_dbg(ar, ATH10K_DBG_HTT, "rx indication poll pending count:%d\n", 4492 skb_queue_len(&htt->rx_indication_head)); 4493 } 4494 return quota; 4495 } 4496 EXPORT_SYMBOL(ath10k_htt_rx_hl_indication); 4497 4498 int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget) 4499 { 4500 struct ath10k_htt *htt = &ar->htt; 4501 struct htt_tx_done tx_done = {}; 4502 struct sk_buff_head tx_ind_q; 4503 struct sk_buff *skb; 4504 unsigned long flags; 4505 int quota = 0, done, ret; 4506 bool resched_napi = false; 4507 4508 __skb_queue_head_init(&tx_ind_q); 4509 4510 /* Process pending frames before dequeuing more data 4511 * from hardware. 4512 */ 4513 quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget); 4514 if (quota == budget) { 4515 resched_napi = true; 4516 goto exit; 4517 } 4518 4519 while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) { 4520 spin_lock_bh(&htt->rx_ring.lock); 4521 ret = ath10k_htt_rx_in_ord_ind(ar, skb); 4522 spin_unlock_bh(&htt->rx_ring.lock); 4523 4524 dev_kfree_skb_any(skb); 4525 if (ret == -EIO) { 4526 resched_napi = true; 4527 goto exit; 4528 } 4529 } 4530 4531 while (atomic_read(&htt->num_mpdus_ready)) { 4532 ret = ath10k_htt_rx_handle_amsdu(htt); 4533 if (ret == -EIO) { 4534 resched_napi = true; 4535 goto exit; 4536 } 4537 atomic_dec(&htt->num_mpdus_ready); 4538 } 4539 4540 /* Deliver received data after processing data from hardware */ 4541 quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget); 4542 4543 /* From NAPI documentation: 4544 * The napi poll() function may also process TX completions, in which 4545 * case if it processes the entire TX ring then it should count that 4546 * work as the rest of the budget. 4547 */ 4548 if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo)) 4549 quota = budget; 4550 4551 /* kfifo_get: called only within txrx_tasklet so it's neatly serialized. 4552 * From kfifo_get() documentation: 4553 * Note that with only one concurrent reader and one concurrent writer, 4554 * you don't need extra locking to use these macro. 4555 */ 4556 while (kfifo_get(&htt->txdone_fifo, &tx_done)) 4557 ath10k_txrx_tx_unref(htt, &tx_done); 4558 4559 ath10k_mac_tx_push_pending(ar); 4560 4561 spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags); 4562 skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q); 4563 spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags); 4564 4565 while ((skb = __skb_dequeue(&tx_ind_q))) { 4566 ath10k_htt_rx_tx_fetch_ind(ar, skb); 4567 dev_kfree_skb_any(skb); 4568 } 4569 4570 exit: 4571 ath10k_htt_rx_msdu_buff_replenish(htt); 4572 /* In case of rx failure or more data to read, report budget 4573 * to reschedule NAPI poll 4574 */ 4575 done = resched_napi ? budget : quota; 4576 4577 return done; 4578 } 4579 EXPORT_SYMBOL(ath10k_htt_txrx_compl_task); 4580 4581 static const struct ath10k_htt_rx_ops htt_rx_ops_32 = { 4582 .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32, 4583 .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32, 4584 .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32, 4585 .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32, 4586 .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32, 4587 }; 4588 4589 static const struct ath10k_htt_rx_ops htt_rx_ops_64 = { 4590 .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64, 4591 .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64, 4592 .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64, 4593 .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64, 4594 .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64, 4595 }; 4596 4597 static const struct ath10k_htt_rx_ops htt_rx_ops_hl = { 4598 .htt_rx_proc_rx_frag_ind = ath10k_htt_rx_proc_rx_frag_ind_hl, 4599 }; 4600 4601 void ath10k_htt_set_rx_ops(struct ath10k_htt *htt) 4602 { 4603 struct ath10k *ar = htt->ar; 4604 4605 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) 4606 htt->rx_ops = &htt_rx_ops_hl; 4607 else if (ar->hw_params.target_64bit) 4608 htt->rx_ops = &htt_rx_ops_64; 4609 else 4610 htt->rx_ops = &htt_rx_ops_32; 4611 } 4612