1da8fa4e3SBjoern A. Zeeb // SPDX-License-Identifier: ISC
2da8fa4e3SBjoern A. Zeeb /*
3da8fa4e3SBjoern A. Zeeb * Copyright (c) 2005-2011 Atheros Communications Inc.
4da8fa4e3SBjoern A. Zeeb * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5da8fa4e3SBjoern A. Zeeb * Copyright (c) 2018, The Linux Foundation. All rights reserved.
6da8fa4e3SBjoern A. Zeeb */
7da8fa4e3SBjoern A. Zeeb
8da8fa4e3SBjoern A. Zeeb #include "core.h"
9da8fa4e3SBjoern A. Zeeb #include "htc.h"
10da8fa4e3SBjoern A. Zeeb #include "htt.h"
11da8fa4e3SBjoern A. Zeeb #include "txrx.h"
12da8fa4e3SBjoern A. Zeeb #include "debug.h"
13da8fa4e3SBjoern A. Zeeb #include "trace.h"
14da8fa4e3SBjoern A. Zeeb #include "mac.h"
15da8fa4e3SBjoern A. Zeeb
16da8fa4e3SBjoern A. Zeeb #include <linux/log2.h>
17da8fa4e3SBjoern A. Zeeb #include <linux/bitfield.h>
18da8fa4e3SBjoern A. Zeeb
19da8fa4e3SBjoern A. Zeeb /* when under memory pressure rx ring refill may fail and needs a retry */
20da8fa4e3SBjoern A. Zeeb #define HTT_RX_RING_REFILL_RETRY_MS 50
21da8fa4e3SBjoern A. Zeeb
22da8fa4e3SBjoern A. Zeeb #define HTT_RX_RING_REFILL_RESCHED_MS 5
23da8fa4e3SBjoern A. Zeeb
24da8fa4e3SBjoern A. Zeeb /* shortcut to interpret a raw memory buffer as a rx descriptor */
25da8fa4e3SBjoern A. Zeeb #define HTT_RX_BUF_TO_RX_DESC(hw, buf) ath10k_htt_rx_desc_from_raw_buffer(hw, buf)
26da8fa4e3SBjoern A. Zeeb
27da8fa4e3SBjoern A. Zeeb static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb);
28da8fa4e3SBjoern A. Zeeb
29da8fa4e3SBjoern A. Zeeb static struct sk_buff *
ath10k_htt_rx_find_skb_paddr(struct ath10k * ar,u64 paddr)30da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr)
31da8fa4e3SBjoern A. Zeeb {
32da8fa4e3SBjoern A. Zeeb struct ath10k_skb_rxcb *rxcb;
33da8fa4e3SBjoern A. Zeeb
34da8fa4e3SBjoern A. Zeeb hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
35da8fa4e3SBjoern A. Zeeb if (rxcb->paddr == paddr)
36da8fa4e3SBjoern A. Zeeb return ATH10K_RXCB_SKB(rxcb);
37da8fa4e3SBjoern A. Zeeb
38da8fa4e3SBjoern A. Zeeb WARN_ON_ONCE(1);
39da8fa4e3SBjoern A. Zeeb return NULL;
40da8fa4e3SBjoern A. Zeeb }
41da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_ring_free(struct ath10k_htt * htt)42da8fa4e3SBjoern A. Zeeb static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
43da8fa4e3SBjoern A. Zeeb {
44da8fa4e3SBjoern A. Zeeb struct sk_buff *skb;
45da8fa4e3SBjoern A. Zeeb struct ath10k_skb_rxcb *rxcb;
46da8fa4e3SBjoern A. Zeeb struct hlist_node *n;
47da8fa4e3SBjoern A. Zeeb int i;
48da8fa4e3SBjoern A. Zeeb
49da8fa4e3SBjoern A. Zeeb if (htt->rx_ring.in_ord_rx) {
50da8fa4e3SBjoern A. Zeeb hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
51da8fa4e3SBjoern A. Zeeb skb = ATH10K_RXCB_SKB(rxcb);
52da8fa4e3SBjoern A. Zeeb dma_unmap_single(htt->ar->dev, rxcb->paddr,
53da8fa4e3SBjoern A. Zeeb skb->len + skb_tailroom(skb),
54da8fa4e3SBjoern A. Zeeb DMA_FROM_DEVICE);
55da8fa4e3SBjoern A. Zeeb hash_del(&rxcb->hlist);
56da8fa4e3SBjoern A. Zeeb dev_kfree_skb_any(skb);
57da8fa4e3SBjoern A. Zeeb }
58da8fa4e3SBjoern A. Zeeb } else {
59da8fa4e3SBjoern A. Zeeb for (i = 0; i < htt->rx_ring.size; i++) {
60da8fa4e3SBjoern A. Zeeb skb = htt->rx_ring.netbufs_ring[i];
61da8fa4e3SBjoern A. Zeeb if (!skb)
62da8fa4e3SBjoern A. Zeeb continue;
63da8fa4e3SBjoern A. Zeeb
64da8fa4e3SBjoern A. Zeeb rxcb = ATH10K_SKB_RXCB(skb);
65da8fa4e3SBjoern A. Zeeb dma_unmap_single(htt->ar->dev, rxcb->paddr,
66da8fa4e3SBjoern A. Zeeb skb->len + skb_tailroom(skb),
67da8fa4e3SBjoern A. Zeeb DMA_FROM_DEVICE);
68da8fa4e3SBjoern A. Zeeb dev_kfree_skb_any(skb);
69da8fa4e3SBjoern A. Zeeb }
70da8fa4e3SBjoern A. Zeeb }
71da8fa4e3SBjoern A. Zeeb
72da8fa4e3SBjoern A. Zeeb htt->rx_ring.fill_cnt = 0;
73da8fa4e3SBjoern A. Zeeb hash_init(htt->rx_ring.skb_table);
74da8fa4e3SBjoern A. Zeeb memset(htt->rx_ring.netbufs_ring, 0,
75da8fa4e3SBjoern A. Zeeb htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
76da8fa4e3SBjoern A. Zeeb }
77da8fa4e3SBjoern A. Zeeb
ath10k_htt_get_rx_ring_size_32(struct ath10k_htt * htt)78da8fa4e3SBjoern A. Zeeb static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt)
79da8fa4e3SBjoern A. Zeeb {
80da8fa4e3SBjoern A. Zeeb return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32);
81da8fa4e3SBjoern A. Zeeb }
82da8fa4e3SBjoern A. Zeeb
ath10k_htt_get_rx_ring_size_64(struct ath10k_htt * htt)83da8fa4e3SBjoern A. Zeeb static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt)
84da8fa4e3SBjoern A. Zeeb {
85da8fa4e3SBjoern A. Zeeb return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64);
86da8fa4e3SBjoern A. Zeeb }
87da8fa4e3SBjoern A. Zeeb
ath10k_htt_config_paddrs_ring_32(struct ath10k_htt * htt,void * vaddr)88da8fa4e3SBjoern A. Zeeb static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt,
89da8fa4e3SBjoern A. Zeeb void *vaddr)
90da8fa4e3SBjoern A. Zeeb {
91da8fa4e3SBjoern A. Zeeb htt->rx_ring.paddrs_ring_32 = vaddr;
92da8fa4e3SBjoern A. Zeeb }
93da8fa4e3SBjoern A. Zeeb
ath10k_htt_config_paddrs_ring_64(struct ath10k_htt * htt,void * vaddr)94da8fa4e3SBjoern A. Zeeb static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt,
95da8fa4e3SBjoern A. Zeeb void *vaddr)
96da8fa4e3SBjoern A. Zeeb {
97da8fa4e3SBjoern A. Zeeb htt->rx_ring.paddrs_ring_64 = vaddr;
98da8fa4e3SBjoern A. Zeeb }
99da8fa4e3SBjoern A. Zeeb
ath10k_htt_set_paddrs_ring_32(struct ath10k_htt * htt,dma_addr_t paddr,int idx)100da8fa4e3SBjoern A. Zeeb static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt,
101da8fa4e3SBjoern A. Zeeb dma_addr_t paddr, int idx)
102da8fa4e3SBjoern A. Zeeb {
103da8fa4e3SBjoern A. Zeeb htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr);
104da8fa4e3SBjoern A. Zeeb }
105da8fa4e3SBjoern A. Zeeb
ath10k_htt_set_paddrs_ring_64(struct ath10k_htt * htt,dma_addr_t paddr,int idx)106da8fa4e3SBjoern A. Zeeb static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt,
107da8fa4e3SBjoern A. Zeeb dma_addr_t paddr, int idx)
108da8fa4e3SBjoern A. Zeeb {
109da8fa4e3SBjoern A. Zeeb htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr);
110da8fa4e3SBjoern A. Zeeb }
111da8fa4e3SBjoern A. Zeeb
ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt * htt,int idx)112da8fa4e3SBjoern A. Zeeb static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx)
113da8fa4e3SBjoern A. Zeeb {
114da8fa4e3SBjoern A. Zeeb htt->rx_ring.paddrs_ring_32[idx] = 0;
115da8fa4e3SBjoern A. Zeeb }
116da8fa4e3SBjoern A. Zeeb
ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt * htt,int idx)117da8fa4e3SBjoern A. Zeeb static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx)
118da8fa4e3SBjoern A. Zeeb {
119da8fa4e3SBjoern A. Zeeb htt->rx_ring.paddrs_ring_64[idx] = 0;
120da8fa4e3SBjoern A. Zeeb }
121da8fa4e3SBjoern A. Zeeb
ath10k_htt_get_vaddr_ring_32(struct ath10k_htt * htt)122da8fa4e3SBjoern A. Zeeb static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt)
123da8fa4e3SBjoern A. Zeeb {
124da8fa4e3SBjoern A. Zeeb return (void *)htt->rx_ring.paddrs_ring_32;
125da8fa4e3SBjoern A. Zeeb }
126da8fa4e3SBjoern A. Zeeb
ath10k_htt_get_vaddr_ring_64(struct ath10k_htt * htt)127da8fa4e3SBjoern A. Zeeb static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt)
128da8fa4e3SBjoern A. Zeeb {
129da8fa4e3SBjoern A. Zeeb return (void *)htt->rx_ring.paddrs_ring_64;
130da8fa4e3SBjoern A. Zeeb }
131da8fa4e3SBjoern A. Zeeb
__ath10k_htt_rx_ring_fill_n(struct ath10k_htt * htt,int num)132da8fa4e3SBjoern A. Zeeb static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
133da8fa4e3SBjoern A. Zeeb {
134da8fa4e3SBjoern A. Zeeb struct ath10k_hw_params *hw = &htt->ar->hw_params;
135da8fa4e3SBjoern A. Zeeb struct htt_rx_desc *rx_desc;
136da8fa4e3SBjoern A. Zeeb struct ath10k_skb_rxcb *rxcb;
137da8fa4e3SBjoern A. Zeeb struct sk_buff *skb;
138da8fa4e3SBjoern A. Zeeb dma_addr_t paddr;
139da8fa4e3SBjoern A. Zeeb int ret = 0, idx;
140da8fa4e3SBjoern A. Zeeb
141da8fa4e3SBjoern A. Zeeb /* The Full Rx Reorder firmware has no way of telling the host
142da8fa4e3SBjoern A. Zeeb * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
143da8fa4e3SBjoern A. Zeeb * To keep things simple make sure ring is always half empty. This
144da8fa4e3SBjoern A. Zeeb * guarantees there'll be no replenishment overruns possible.
145da8fa4e3SBjoern A. Zeeb */
146da8fa4e3SBjoern A. Zeeb BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
147da8fa4e3SBjoern A. Zeeb
148da8fa4e3SBjoern A. Zeeb idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
149da8fa4e3SBjoern A. Zeeb
150da8fa4e3SBjoern A. Zeeb if (idx < 0 || idx >= htt->rx_ring.size) {
151da8fa4e3SBjoern A. Zeeb ath10k_err(htt->ar, "rx ring index is not valid, firmware malfunctioning?\n");
152da8fa4e3SBjoern A. Zeeb idx &= htt->rx_ring.size_mask;
153da8fa4e3SBjoern A. Zeeb ret = -ENOMEM;
154da8fa4e3SBjoern A. Zeeb goto fail;
155da8fa4e3SBjoern A. Zeeb }
156da8fa4e3SBjoern A. Zeeb
157da8fa4e3SBjoern A. Zeeb while (num > 0) {
158da8fa4e3SBjoern A. Zeeb skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
159da8fa4e3SBjoern A. Zeeb if (!skb) {
160da8fa4e3SBjoern A. Zeeb ret = -ENOMEM;
161da8fa4e3SBjoern A. Zeeb goto fail;
162da8fa4e3SBjoern A. Zeeb }
163da8fa4e3SBjoern A. Zeeb
164da8fa4e3SBjoern A. Zeeb if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
165da8fa4e3SBjoern A. Zeeb skb_pull(skb,
166da8fa4e3SBjoern A. Zeeb PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
167da8fa4e3SBjoern A. Zeeb skb->data);
168da8fa4e3SBjoern A. Zeeb
169da8fa4e3SBjoern A. Zeeb /* Clear rx_desc attention word before posting to Rx ring */
170da8fa4e3SBjoern A. Zeeb rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, skb->data);
171da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_desc_get_attention(hw, rx_desc)->flags = __cpu_to_le32(0);
172da8fa4e3SBjoern A. Zeeb
173da8fa4e3SBjoern A. Zeeb paddr = dma_map_single(htt->ar->dev, skb->data,
174da8fa4e3SBjoern A. Zeeb skb->len + skb_tailroom(skb),
175da8fa4e3SBjoern A. Zeeb DMA_FROM_DEVICE);
176da8fa4e3SBjoern A. Zeeb
177da8fa4e3SBjoern A. Zeeb if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
178da8fa4e3SBjoern A. Zeeb dev_kfree_skb_any(skb);
179da8fa4e3SBjoern A. Zeeb ret = -ENOMEM;
180da8fa4e3SBjoern A. Zeeb goto fail;
181da8fa4e3SBjoern A. Zeeb }
182da8fa4e3SBjoern A. Zeeb
183da8fa4e3SBjoern A. Zeeb rxcb = ATH10K_SKB_RXCB(skb);
184da8fa4e3SBjoern A. Zeeb rxcb->paddr = paddr;
185da8fa4e3SBjoern A. Zeeb htt->rx_ring.netbufs_ring[idx] = skb;
186da8fa4e3SBjoern A. Zeeb ath10k_htt_set_paddrs_ring(htt, paddr, idx);
187da8fa4e3SBjoern A. Zeeb htt->rx_ring.fill_cnt++;
188da8fa4e3SBjoern A. Zeeb
189da8fa4e3SBjoern A. Zeeb if (htt->rx_ring.in_ord_rx) {
190da8fa4e3SBjoern A. Zeeb hash_add(htt->rx_ring.skb_table,
191da8fa4e3SBjoern A. Zeeb &ATH10K_SKB_RXCB(skb)->hlist,
192da8fa4e3SBjoern A. Zeeb paddr);
193da8fa4e3SBjoern A. Zeeb }
194da8fa4e3SBjoern A. Zeeb
195da8fa4e3SBjoern A. Zeeb num--;
196da8fa4e3SBjoern A. Zeeb idx++;
197da8fa4e3SBjoern A. Zeeb idx &= htt->rx_ring.size_mask;
198da8fa4e3SBjoern A. Zeeb }
199da8fa4e3SBjoern A. Zeeb
200da8fa4e3SBjoern A. Zeeb fail:
201da8fa4e3SBjoern A. Zeeb /*
202da8fa4e3SBjoern A. Zeeb * Make sure the rx buffer is updated before available buffer
203da8fa4e3SBjoern A. Zeeb * index to avoid any potential rx ring corruption.
204da8fa4e3SBjoern A. Zeeb */
205da8fa4e3SBjoern A. Zeeb mb();
206da8fa4e3SBjoern A. Zeeb *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
207da8fa4e3SBjoern A. Zeeb return ret;
208da8fa4e3SBjoern A. Zeeb }
209da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_ring_fill_n(struct ath10k_htt * htt,int num)210da8fa4e3SBjoern A. Zeeb static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
211da8fa4e3SBjoern A. Zeeb {
212da8fa4e3SBjoern A. Zeeb lockdep_assert_held(&htt->rx_ring.lock);
213da8fa4e3SBjoern A. Zeeb return __ath10k_htt_rx_ring_fill_n(htt, num);
214da8fa4e3SBjoern A. Zeeb }
215da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt * htt)216da8fa4e3SBjoern A. Zeeb static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
217da8fa4e3SBjoern A. Zeeb {
218da8fa4e3SBjoern A. Zeeb int ret, num_deficit, num_to_fill;
219da8fa4e3SBjoern A. Zeeb
220da8fa4e3SBjoern A. Zeeb /* Refilling the whole RX ring buffer proves to be a bad idea. The
221da8fa4e3SBjoern A. Zeeb * reason is RX may take up significant amount of CPU cycles and starve
222da8fa4e3SBjoern A. Zeeb * other tasks, e.g. TX on an ethernet device while acting as a bridge
223da8fa4e3SBjoern A. Zeeb * with ath10k wlan interface. This ended up with very poor performance
224da8fa4e3SBjoern A. Zeeb * once CPU the host system was overwhelmed with RX on ath10k.
225da8fa4e3SBjoern A. Zeeb *
226da8fa4e3SBjoern A. Zeeb * By limiting the number of refills the replenishing occurs
227da8fa4e3SBjoern A. Zeeb * progressively. This in turns makes use of the fact tasklets are
228da8fa4e3SBjoern A. Zeeb * processed in FIFO order. This means actual RX processing can starve
229da8fa4e3SBjoern A. Zeeb * out refilling. If there's not enough buffers on RX ring FW will not
230da8fa4e3SBjoern A. Zeeb * report RX until it is refilled with enough buffers. This
231da8fa4e3SBjoern A. Zeeb * automatically balances load wrt to CPU power.
232da8fa4e3SBjoern A. Zeeb *
233da8fa4e3SBjoern A. Zeeb * This probably comes at a cost of lower maximum throughput but
234da8fa4e3SBjoern A. Zeeb * improves the average and stability.
235da8fa4e3SBjoern A. Zeeb */
236da8fa4e3SBjoern A. Zeeb spin_lock_bh(&htt->rx_ring.lock);
237da8fa4e3SBjoern A. Zeeb num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
238da8fa4e3SBjoern A. Zeeb num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
239da8fa4e3SBjoern A. Zeeb num_deficit -= num_to_fill;
240da8fa4e3SBjoern A. Zeeb ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
241da8fa4e3SBjoern A. Zeeb if (ret == -ENOMEM) {
242da8fa4e3SBjoern A. Zeeb /*
243da8fa4e3SBjoern A. Zeeb * Failed to fill it to the desired level -
244da8fa4e3SBjoern A. Zeeb * we'll start a timer and try again next time.
245da8fa4e3SBjoern A. Zeeb * As long as enough buffers are left in the ring for
246da8fa4e3SBjoern A. Zeeb * another A-MPDU rx, no special recovery is needed.
247da8fa4e3SBjoern A. Zeeb */
248da8fa4e3SBjoern A. Zeeb mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
249da8fa4e3SBjoern A. Zeeb msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
250da8fa4e3SBjoern A. Zeeb } else if (num_deficit > 0) {
251da8fa4e3SBjoern A. Zeeb mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
252da8fa4e3SBjoern A. Zeeb msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
253da8fa4e3SBjoern A. Zeeb }
254da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&htt->rx_ring.lock);
255da8fa4e3SBjoern A. Zeeb }
256da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_ring_refill_retry(struct timer_list * t)257da8fa4e3SBjoern A. Zeeb static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t)
258da8fa4e3SBjoern A. Zeeb {
259da8fa4e3SBjoern A. Zeeb struct ath10k_htt *htt = from_timer(htt, t, rx_ring.refill_retry_timer);
260da8fa4e3SBjoern A. Zeeb
261da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_msdu_buff_replenish(htt);
262da8fa4e3SBjoern A. Zeeb }
263da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_ring_refill(struct ath10k * ar)264da8fa4e3SBjoern A. Zeeb int ath10k_htt_rx_ring_refill(struct ath10k *ar)
265da8fa4e3SBjoern A. Zeeb {
266da8fa4e3SBjoern A. Zeeb struct ath10k_htt *htt = &ar->htt;
267da8fa4e3SBjoern A. Zeeb int ret;
268da8fa4e3SBjoern A. Zeeb
269da8fa4e3SBjoern A. Zeeb if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
270da8fa4e3SBjoern A. Zeeb return 0;
271da8fa4e3SBjoern A. Zeeb
272da8fa4e3SBjoern A. Zeeb spin_lock_bh(&htt->rx_ring.lock);
273da8fa4e3SBjoern A. Zeeb ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
274da8fa4e3SBjoern A. Zeeb htt->rx_ring.fill_cnt));
275da8fa4e3SBjoern A. Zeeb
276da8fa4e3SBjoern A. Zeeb if (ret)
277da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_ring_free(htt);
278da8fa4e3SBjoern A. Zeeb
279da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&htt->rx_ring.lock);
280da8fa4e3SBjoern A. Zeeb
281da8fa4e3SBjoern A. Zeeb return ret;
282da8fa4e3SBjoern A. Zeeb }
283da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_free(struct ath10k_htt * htt)284da8fa4e3SBjoern A. Zeeb void ath10k_htt_rx_free(struct ath10k_htt *htt)
285da8fa4e3SBjoern A. Zeeb {
286da8fa4e3SBjoern A. Zeeb if (htt->ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
287da8fa4e3SBjoern A. Zeeb return;
288da8fa4e3SBjoern A. Zeeb
289da8fa4e3SBjoern A. Zeeb del_timer_sync(&htt->rx_ring.refill_retry_timer);
290da8fa4e3SBjoern A. Zeeb
291da8fa4e3SBjoern A. Zeeb skb_queue_purge(&htt->rx_msdus_q);
292da8fa4e3SBjoern A. Zeeb skb_queue_purge(&htt->rx_in_ord_compl_q);
293da8fa4e3SBjoern A. Zeeb skb_queue_purge(&htt->tx_fetch_ind_q);
294da8fa4e3SBjoern A. Zeeb
295da8fa4e3SBjoern A. Zeeb spin_lock_bh(&htt->rx_ring.lock);
296da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_ring_free(htt);
297da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&htt->rx_ring.lock);
298da8fa4e3SBjoern A. Zeeb
299da8fa4e3SBjoern A. Zeeb dma_free_coherent(htt->ar->dev,
300da8fa4e3SBjoern A. Zeeb ath10k_htt_get_rx_ring_size(htt),
301da8fa4e3SBjoern A. Zeeb ath10k_htt_get_vaddr_ring(htt),
302da8fa4e3SBjoern A. Zeeb htt->rx_ring.base_paddr);
303da8fa4e3SBjoern A. Zeeb
304*07724ba6SBjoern A. Zeeb ath10k_htt_config_paddrs_ring(htt, NULL);
305*07724ba6SBjoern A. Zeeb
306da8fa4e3SBjoern A. Zeeb dma_free_coherent(htt->ar->dev,
307da8fa4e3SBjoern A. Zeeb sizeof(*htt->rx_ring.alloc_idx.vaddr),
308da8fa4e3SBjoern A. Zeeb htt->rx_ring.alloc_idx.vaddr,
309da8fa4e3SBjoern A. Zeeb htt->rx_ring.alloc_idx.paddr);
310*07724ba6SBjoern A. Zeeb htt->rx_ring.alloc_idx.vaddr = NULL;
311da8fa4e3SBjoern A. Zeeb
312da8fa4e3SBjoern A. Zeeb kfree(htt->rx_ring.netbufs_ring);
313*07724ba6SBjoern A. Zeeb htt->rx_ring.netbufs_ring = NULL;
314da8fa4e3SBjoern A. Zeeb }
315da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_netbuf_pop(struct ath10k_htt * htt)316da8fa4e3SBjoern A. Zeeb static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
317da8fa4e3SBjoern A. Zeeb {
318da8fa4e3SBjoern A. Zeeb struct ath10k *ar = htt->ar;
319da8fa4e3SBjoern A. Zeeb int idx;
320da8fa4e3SBjoern A. Zeeb struct sk_buff *msdu;
321da8fa4e3SBjoern A. Zeeb
322da8fa4e3SBjoern A. Zeeb lockdep_assert_held(&htt->rx_ring.lock);
323da8fa4e3SBjoern A. Zeeb
324da8fa4e3SBjoern A. Zeeb if (htt->rx_ring.fill_cnt == 0) {
325da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
326da8fa4e3SBjoern A. Zeeb return NULL;
327da8fa4e3SBjoern A. Zeeb }
328da8fa4e3SBjoern A. Zeeb
329da8fa4e3SBjoern A. Zeeb idx = htt->rx_ring.sw_rd_idx.msdu_payld;
330da8fa4e3SBjoern A. Zeeb msdu = htt->rx_ring.netbufs_ring[idx];
331da8fa4e3SBjoern A. Zeeb htt->rx_ring.netbufs_ring[idx] = NULL;
332da8fa4e3SBjoern A. Zeeb ath10k_htt_reset_paddrs_ring(htt, idx);
333da8fa4e3SBjoern A. Zeeb
334da8fa4e3SBjoern A. Zeeb idx++;
335da8fa4e3SBjoern A. Zeeb idx &= htt->rx_ring.size_mask;
336da8fa4e3SBjoern A. Zeeb htt->rx_ring.sw_rd_idx.msdu_payld = idx;
337da8fa4e3SBjoern A. Zeeb htt->rx_ring.fill_cnt--;
338da8fa4e3SBjoern A. Zeeb
339da8fa4e3SBjoern A. Zeeb dma_unmap_single(htt->ar->dev,
340da8fa4e3SBjoern A. Zeeb ATH10K_SKB_RXCB(msdu)->paddr,
341da8fa4e3SBjoern A. Zeeb msdu->len + skb_tailroom(msdu),
342da8fa4e3SBjoern A. Zeeb DMA_FROM_DEVICE);
343da8fa4e3SBjoern A. Zeeb ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
344da8fa4e3SBjoern A. Zeeb msdu->data, msdu->len + skb_tailroom(msdu));
345da8fa4e3SBjoern A. Zeeb
346da8fa4e3SBjoern A. Zeeb return msdu;
347da8fa4e3SBjoern A. Zeeb }
348da8fa4e3SBjoern A. Zeeb
349da8fa4e3SBjoern A. Zeeb /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
ath10k_htt_rx_amsdu_pop(struct ath10k_htt * htt,struct sk_buff_head * amsdu)350da8fa4e3SBjoern A. Zeeb static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
351da8fa4e3SBjoern A. Zeeb struct sk_buff_head *amsdu)
352da8fa4e3SBjoern A. Zeeb {
353da8fa4e3SBjoern A. Zeeb struct ath10k *ar = htt->ar;
354da8fa4e3SBjoern A. Zeeb struct ath10k_hw_params *hw = &ar->hw_params;
355da8fa4e3SBjoern A. Zeeb int msdu_len, msdu_chaining = 0;
356da8fa4e3SBjoern A. Zeeb struct sk_buff *msdu;
357da8fa4e3SBjoern A. Zeeb struct htt_rx_desc *rx_desc;
358da8fa4e3SBjoern A. Zeeb struct rx_attention *rx_desc_attention;
359da8fa4e3SBjoern A. Zeeb struct rx_frag_info_common *rx_desc_frag_info_common;
360da8fa4e3SBjoern A. Zeeb struct rx_msdu_start_common *rx_desc_msdu_start_common;
361da8fa4e3SBjoern A. Zeeb struct rx_msdu_end_common *rx_desc_msdu_end_common;
362da8fa4e3SBjoern A. Zeeb
363da8fa4e3SBjoern A. Zeeb lockdep_assert_held(&htt->rx_ring.lock);
364da8fa4e3SBjoern A. Zeeb
365da8fa4e3SBjoern A. Zeeb for (;;) {
366da8fa4e3SBjoern A. Zeeb int last_msdu, msdu_len_invalid, msdu_chained;
367da8fa4e3SBjoern A. Zeeb
368da8fa4e3SBjoern A. Zeeb msdu = ath10k_htt_rx_netbuf_pop(htt);
369da8fa4e3SBjoern A. Zeeb if (!msdu) {
370da8fa4e3SBjoern A. Zeeb __skb_queue_purge(amsdu);
371da8fa4e3SBjoern A. Zeeb return -ENOENT;
372da8fa4e3SBjoern A. Zeeb }
373da8fa4e3SBjoern A. Zeeb
374da8fa4e3SBjoern A. Zeeb __skb_queue_tail(amsdu, msdu);
375da8fa4e3SBjoern A. Zeeb
376da8fa4e3SBjoern A. Zeeb rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
377da8fa4e3SBjoern A. Zeeb rx_desc_attention = ath10k_htt_rx_desc_get_attention(hw, rx_desc);
378da8fa4e3SBjoern A. Zeeb rx_desc_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw,
379da8fa4e3SBjoern A. Zeeb rx_desc);
380da8fa4e3SBjoern A. Zeeb rx_desc_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rx_desc);
381da8fa4e3SBjoern A. Zeeb rx_desc_frag_info_common = ath10k_htt_rx_desc_get_frag_info(hw, rx_desc);
382da8fa4e3SBjoern A. Zeeb
383da8fa4e3SBjoern A. Zeeb /* FIXME: we must report msdu payload since this is what caller
384da8fa4e3SBjoern A. Zeeb * expects now
385da8fa4e3SBjoern A. Zeeb */
386da8fa4e3SBjoern A. Zeeb skb_put(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset);
387da8fa4e3SBjoern A. Zeeb skb_pull(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset);
388da8fa4e3SBjoern A. Zeeb
389da8fa4e3SBjoern A. Zeeb /*
390da8fa4e3SBjoern A. Zeeb * Sanity check - confirm the HW is finished filling in the
391da8fa4e3SBjoern A. Zeeb * rx data.
392da8fa4e3SBjoern A. Zeeb * If the HW and SW are working correctly, then it's guaranteed
393da8fa4e3SBjoern A. Zeeb * that the HW's MAC DMA is done before this point in the SW.
394da8fa4e3SBjoern A. Zeeb * To prevent the case that we handle a stale Rx descriptor,
395da8fa4e3SBjoern A. Zeeb * just assert for now until we have a way to recover.
396da8fa4e3SBjoern A. Zeeb */
397da8fa4e3SBjoern A. Zeeb if (!(__le32_to_cpu(rx_desc_attention->flags)
398da8fa4e3SBjoern A. Zeeb & RX_ATTENTION_FLAGS_MSDU_DONE)) {
399da8fa4e3SBjoern A. Zeeb __skb_queue_purge(amsdu);
400da8fa4e3SBjoern A. Zeeb return -EIO;
401da8fa4e3SBjoern A. Zeeb }
402da8fa4e3SBjoern A. Zeeb
403da8fa4e3SBjoern A. Zeeb msdu_len_invalid = !!(__le32_to_cpu(rx_desc_attention->flags)
404da8fa4e3SBjoern A. Zeeb & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
405da8fa4e3SBjoern A. Zeeb RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
406da8fa4e3SBjoern A. Zeeb msdu_len = MS(__le32_to_cpu(rx_desc_msdu_start_common->info0),
407da8fa4e3SBjoern A. Zeeb RX_MSDU_START_INFO0_MSDU_LENGTH);
408da8fa4e3SBjoern A. Zeeb msdu_chained = rx_desc_frag_info_common->ring2_more_count;
409da8fa4e3SBjoern A. Zeeb
410da8fa4e3SBjoern A. Zeeb if (msdu_len_invalid)
411da8fa4e3SBjoern A. Zeeb msdu_len = 0;
412da8fa4e3SBjoern A. Zeeb
413da8fa4e3SBjoern A. Zeeb skb_trim(msdu, 0);
414da8fa4e3SBjoern A. Zeeb skb_put(msdu, min(msdu_len, ath10k_htt_rx_msdu_size(hw)));
415da8fa4e3SBjoern A. Zeeb msdu_len -= msdu->len;
416da8fa4e3SBjoern A. Zeeb
417da8fa4e3SBjoern A. Zeeb /* Note: Chained buffers do not contain rx descriptor */
418da8fa4e3SBjoern A. Zeeb while (msdu_chained--) {
419da8fa4e3SBjoern A. Zeeb msdu = ath10k_htt_rx_netbuf_pop(htt);
420da8fa4e3SBjoern A. Zeeb if (!msdu) {
421da8fa4e3SBjoern A. Zeeb __skb_queue_purge(amsdu);
422da8fa4e3SBjoern A. Zeeb return -ENOENT;
423da8fa4e3SBjoern A. Zeeb }
424da8fa4e3SBjoern A. Zeeb
425da8fa4e3SBjoern A. Zeeb __skb_queue_tail(amsdu, msdu);
426da8fa4e3SBjoern A. Zeeb skb_trim(msdu, 0);
427da8fa4e3SBjoern A. Zeeb skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
428da8fa4e3SBjoern A. Zeeb msdu_len -= msdu->len;
429da8fa4e3SBjoern A. Zeeb msdu_chaining = 1;
430da8fa4e3SBjoern A. Zeeb }
431da8fa4e3SBjoern A. Zeeb
432da8fa4e3SBjoern A. Zeeb last_msdu = __le32_to_cpu(rx_desc_msdu_end_common->info0) &
433da8fa4e3SBjoern A. Zeeb RX_MSDU_END_INFO0_LAST_MSDU;
434da8fa4e3SBjoern A. Zeeb
435da8fa4e3SBjoern A. Zeeb /* FIXME: why are we skipping the first part of the rx_desc? */
436*07724ba6SBjoern A. Zeeb #if defined(__linux__)
437*07724ba6SBjoern A. Zeeb trace_ath10k_htt_rx_desc(ar, (void *)rx_desc + sizeof(u32),
438*07724ba6SBjoern A. Zeeb #elif defined(__FreeBSD__)
439*07724ba6SBjoern A. Zeeb trace_ath10k_htt_rx_desc(ar, (u8 *)rx_desc + sizeof(u32),
440*07724ba6SBjoern A. Zeeb #endif
441da8fa4e3SBjoern A. Zeeb hw->rx_desc_ops->rx_desc_size - sizeof(u32));
442da8fa4e3SBjoern A. Zeeb
443da8fa4e3SBjoern A. Zeeb if (last_msdu)
444da8fa4e3SBjoern A. Zeeb break;
445da8fa4e3SBjoern A. Zeeb }
446da8fa4e3SBjoern A. Zeeb
447da8fa4e3SBjoern A. Zeeb if (skb_queue_empty(amsdu))
448da8fa4e3SBjoern A. Zeeb msdu_chaining = -1;
449da8fa4e3SBjoern A. Zeeb
450da8fa4e3SBjoern A. Zeeb /*
451da8fa4e3SBjoern A. Zeeb * Don't refill the ring yet.
452da8fa4e3SBjoern A. Zeeb *
453da8fa4e3SBjoern A. Zeeb * First, the elements popped here are still in use - it is not
454da8fa4e3SBjoern A. Zeeb * safe to overwrite them until the matching call to
455da8fa4e3SBjoern A. Zeeb * mpdu_desc_list_next. Second, for efficiency it is preferable to
456da8fa4e3SBjoern A. Zeeb * refill the rx ring with 1 PPDU's worth of rx buffers (something
457da8fa4e3SBjoern A. Zeeb * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
458da8fa4e3SBjoern A. Zeeb * (something like 3 buffers). Consequently, we'll rely on the txrx
459da8fa4e3SBjoern A. Zeeb * SW to tell us when it is done pulling all the PPDU's rx buffers
460da8fa4e3SBjoern A. Zeeb * out of the rx ring, and then refill it just once.
461da8fa4e3SBjoern A. Zeeb */
462da8fa4e3SBjoern A. Zeeb
463da8fa4e3SBjoern A. Zeeb return msdu_chaining;
464da8fa4e3SBjoern A. Zeeb }
465da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_pop_paddr(struct ath10k_htt * htt,u64 paddr)466da8fa4e3SBjoern A. Zeeb static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
467da8fa4e3SBjoern A. Zeeb u64 paddr)
468da8fa4e3SBjoern A. Zeeb {
469da8fa4e3SBjoern A. Zeeb struct ath10k *ar = htt->ar;
470da8fa4e3SBjoern A. Zeeb struct ath10k_skb_rxcb *rxcb;
471da8fa4e3SBjoern A. Zeeb struct sk_buff *msdu;
472da8fa4e3SBjoern A. Zeeb
473da8fa4e3SBjoern A. Zeeb lockdep_assert_held(&htt->rx_ring.lock);
474da8fa4e3SBjoern A. Zeeb
475da8fa4e3SBjoern A. Zeeb msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
476da8fa4e3SBjoern A. Zeeb if (!msdu)
477da8fa4e3SBjoern A. Zeeb return NULL;
478da8fa4e3SBjoern A. Zeeb
479da8fa4e3SBjoern A. Zeeb rxcb = ATH10K_SKB_RXCB(msdu);
480da8fa4e3SBjoern A. Zeeb hash_del(&rxcb->hlist);
481da8fa4e3SBjoern A. Zeeb htt->rx_ring.fill_cnt--;
482da8fa4e3SBjoern A. Zeeb
483da8fa4e3SBjoern A. Zeeb dma_unmap_single(htt->ar->dev, rxcb->paddr,
484da8fa4e3SBjoern A. Zeeb msdu->len + skb_tailroom(msdu),
485da8fa4e3SBjoern A. Zeeb DMA_FROM_DEVICE);
486da8fa4e3SBjoern A. Zeeb ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
487da8fa4e3SBjoern A. Zeeb msdu->data, msdu->len + skb_tailroom(msdu));
488da8fa4e3SBjoern A. Zeeb
489da8fa4e3SBjoern A. Zeeb return msdu;
490da8fa4e3SBjoern A. Zeeb }
491da8fa4e3SBjoern A. Zeeb
ath10k_htt_append_frag_list(struct sk_buff * skb_head,struct sk_buff * frag_list,unsigned int frag_len)492da8fa4e3SBjoern A. Zeeb static inline void ath10k_htt_append_frag_list(struct sk_buff *skb_head,
493da8fa4e3SBjoern A. Zeeb struct sk_buff *frag_list,
494da8fa4e3SBjoern A. Zeeb unsigned int frag_len)
495da8fa4e3SBjoern A. Zeeb {
496da8fa4e3SBjoern A. Zeeb skb_shinfo(skb_head)->frag_list = frag_list;
497da8fa4e3SBjoern A. Zeeb skb_head->data_len = frag_len;
498da8fa4e3SBjoern A. Zeeb skb_head->len += skb_head->data_len;
499da8fa4e3SBjoern A. Zeeb }
500da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt * htt,struct sk_buff * msdu,struct htt_rx_in_ord_msdu_desc ** msdu_desc)501da8fa4e3SBjoern A. Zeeb static int ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt *htt,
502da8fa4e3SBjoern A. Zeeb struct sk_buff *msdu,
503da8fa4e3SBjoern A. Zeeb struct htt_rx_in_ord_msdu_desc **msdu_desc)
504da8fa4e3SBjoern A. Zeeb {
505da8fa4e3SBjoern A. Zeeb struct ath10k *ar = htt->ar;
506da8fa4e3SBjoern A. Zeeb struct ath10k_hw_params *hw = &ar->hw_params;
507da8fa4e3SBjoern A. Zeeb u32 paddr;
508da8fa4e3SBjoern A. Zeeb struct sk_buff *frag_buf;
509da8fa4e3SBjoern A. Zeeb struct sk_buff *prev_frag_buf;
510da8fa4e3SBjoern A. Zeeb u8 last_frag;
511da8fa4e3SBjoern A. Zeeb struct htt_rx_in_ord_msdu_desc *ind_desc = *msdu_desc;
512da8fa4e3SBjoern A. Zeeb struct htt_rx_desc *rxd;
513da8fa4e3SBjoern A. Zeeb int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
514da8fa4e3SBjoern A. Zeeb
515da8fa4e3SBjoern A. Zeeb rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
516da8fa4e3SBjoern A. Zeeb trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
517da8fa4e3SBjoern A. Zeeb
518da8fa4e3SBjoern A. Zeeb skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
519da8fa4e3SBjoern A. Zeeb skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
520da8fa4e3SBjoern A. Zeeb skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw)));
521da8fa4e3SBjoern A. Zeeb amsdu_len -= msdu->len;
522da8fa4e3SBjoern A. Zeeb
523da8fa4e3SBjoern A. Zeeb last_frag = ind_desc->reserved;
524da8fa4e3SBjoern A. Zeeb if (last_frag) {
525da8fa4e3SBjoern A. Zeeb if (amsdu_len) {
526da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "invalid amsdu len %u, left %d",
527da8fa4e3SBjoern A. Zeeb __le16_to_cpu(ind_desc->msdu_len),
528da8fa4e3SBjoern A. Zeeb amsdu_len);
529da8fa4e3SBjoern A. Zeeb }
530da8fa4e3SBjoern A. Zeeb return 0;
531da8fa4e3SBjoern A. Zeeb }
532da8fa4e3SBjoern A. Zeeb
533da8fa4e3SBjoern A. Zeeb ind_desc++;
534da8fa4e3SBjoern A. Zeeb paddr = __le32_to_cpu(ind_desc->msdu_paddr);
535da8fa4e3SBjoern A. Zeeb frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
536da8fa4e3SBjoern A. Zeeb if (!frag_buf) {
537da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%x", paddr);
538da8fa4e3SBjoern A. Zeeb return -ENOENT;
539da8fa4e3SBjoern A. Zeeb }
540da8fa4e3SBjoern A. Zeeb
541da8fa4e3SBjoern A. Zeeb skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
542da8fa4e3SBjoern A. Zeeb ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len);
543da8fa4e3SBjoern A. Zeeb
544da8fa4e3SBjoern A. Zeeb amsdu_len -= frag_buf->len;
545da8fa4e3SBjoern A. Zeeb prev_frag_buf = frag_buf;
546da8fa4e3SBjoern A. Zeeb last_frag = ind_desc->reserved;
547da8fa4e3SBjoern A. Zeeb while (!last_frag) {
548da8fa4e3SBjoern A. Zeeb ind_desc++;
549da8fa4e3SBjoern A. Zeeb paddr = __le32_to_cpu(ind_desc->msdu_paddr);
550da8fa4e3SBjoern A. Zeeb frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
551da8fa4e3SBjoern A. Zeeb if (!frag_buf) {
552da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to pop frag-n paddr: 0x%x",
553da8fa4e3SBjoern A. Zeeb paddr);
554da8fa4e3SBjoern A. Zeeb prev_frag_buf->next = NULL;
555da8fa4e3SBjoern A. Zeeb return -ENOENT;
556da8fa4e3SBjoern A. Zeeb }
557da8fa4e3SBjoern A. Zeeb
558da8fa4e3SBjoern A. Zeeb skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
559da8fa4e3SBjoern A. Zeeb last_frag = ind_desc->reserved;
560da8fa4e3SBjoern A. Zeeb amsdu_len -= frag_buf->len;
561da8fa4e3SBjoern A. Zeeb
562da8fa4e3SBjoern A. Zeeb prev_frag_buf->next = frag_buf;
563da8fa4e3SBjoern A. Zeeb prev_frag_buf = frag_buf;
564da8fa4e3SBjoern A. Zeeb }
565da8fa4e3SBjoern A. Zeeb
566da8fa4e3SBjoern A. Zeeb if (amsdu_len) {
567da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "invalid amsdu len %u, left %d",
568da8fa4e3SBjoern A. Zeeb __le16_to_cpu(ind_desc->msdu_len), amsdu_len);
569da8fa4e3SBjoern A. Zeeb }
570da8fa4e3SBjoern A. Zeeb
571da8fa4e3SBjoern A. Zeeb *msdu_desc = ind_desc;
572da8fa4e3SBjoern A. Zeeb
573da8fa4e3SBjoern A. Zeeb prev_frag_buf->next = NULL;
574da8fa4e3SBjoern A. Zeeb return 0;
575da8fa4e3SBjoern A. Zeeb }
576da8fa4e3SBjoern A. Zeeb
577da8fa4e3SBjoern A. Zeeb static int
ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt * htt,struct sk_buff * msdu,struct htt_rx_in_ord_msdu_desc_ext ** msdu_desc)578da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt *htt,
579da8fa4e3SBjoern A. Zeeb struct sk_buff *msdu,
580da8fa4e3SBjoern A. Zeeb struct htt_rx_in_ord_msdu_desc_ext **msdu_desc)
581da8fa4e3SBjoern A. Zeeb {
582da8fa4e3SBjoern A. Zeeb struct ath10k *ar = htt->ar;
583da8fa4e3SBjoern A. Zeeb struct ath10k_hw_params *hw = &ar->hw_params;
584da8fa4e3SBjoern A. Zeeb u64 paddr;
585da8fa4e3SBjoern A. Zeeb struct sk_buff *frag_buf;
586da8fa4e3SBjoern A. Zeeb struct sk_buff *prev_frag_buf;
587da8fa4e3SBjoern A. Zeeb u8 last_frag;
588da8fa4e3SBjoern A. Zeeb struct htt_rx_in_ord_msdu_desc_ext *ind_desc = *msdu_desc;
589da8fa4e3SBjoern A. Zeeb struct htt_rx_desc *rxd;
590da8fa4e3SBjoern A. Zeeb int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
591da8fa4e3SBjoern A. Zeeb
592da8fa4e3SBjoern A. Zeeb rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
593da8fa4e3SBjoern A. Zeeb trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
594da8fa4e3SBjoern A. Zeeb
595da8fa4e3SBjoern A. Zeeb skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
596da8fa4e3SBjoern A. Zeeb skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
597da8fa4e3SBjoern A. Zeeb skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw)));
598da8fa4e3SBjoern A. Zeeb amsdu_len -= msdu->len;
599da8fa4e3SBjoern A. Zeeb
600da8fa4e3SBjoern A. Zeeb last_frag = ind_desc->reserved;
601da8fa4e3SBjoern A. Zeeb if (last_frag) {
602da8fa4e3SBjoern A. Zeeb if (amsdu_len) {
603da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "invalid amsdu len %u, left %d",
604da8fa4e3SBjoern A. Zeeb __le16_to_cpu(ind_desc->msdu_len),
605da8fa4e3SBjoern A. Zeeb amsdu_len);
606da8fa4e3SBjoern A. Zeeb }
607da8fa4e3SBjoern A. Zeeb return 0;
608da8fa4e3SBjoern A. Zeeb }
609da8fa4e3SBjoern A. Zeeb
610da8fa4e3SBjoern A. Zeeb ind_desc++;
611da8fa4e3SBjoern A. Zeeb paddr = __le64_to_cpu(ind_desc->msdu_paddr);
612da8fa4e3SBjoern A. Zeeb frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
613da8fa4e3SBjoern A. Zeeb if (!frag_buf) {
614da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
615da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%llx", paddr);
616da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
617da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%jx", (uintmax_t)paddr);
618da8fa4e3SBjoern A. Zeeb #endif
619da8fa4e3SBjoern A. Zeeb return -ENOENT;
620da8fa4e3SBjoern A. Zeeb }
621da8fa4e3SBjoern A. Zeeb
622da8fa4e3SBjoern A. Zeeb skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
623da8fa4e3SBjoern A. Zeeb ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len);
624da8fa4e3SBjoern A. Zeeb
625da8fa4e3SBjoern A. Zeeb amsdu_len -= frag_buf->len;
626da8fa4e3SBjoern A. Zeeb prev_frag_buf = frag_buf;
627da8fa4e3SBjoern A. Zeeb last_frag = ind_desc->reserved;
628da8fa4e3SBjoern A. Zeeb while (!last_frag) {
629da8fa4e3SBjoern A. Zeeb ind_desc++;
630da8fa4e3SBjoern A. Zeeb paddr = __le64_to_cpu(ind_desc->msdu_paddr);
631da8fa4e3SBjoern A. Zeeb frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
632da8fa4e3SBjoern A. Zeeb if (!frag_buf) {
633da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
634da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to pop frag-n paddr: 0x%llx",
635da8fa4e3SBjoern A. Zeeb paddr);
636da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
637da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to pop frag-n paddr: 0x%jx",
638da8fa4e3SBjoern A. Zeeb (uintmax_t)paddr);
639da8fa4e3SBjoern A. Zeeb #endif
640da8fa4e3SBjoern A. Zeeb prev_frag_buf->next = NULL;
641da8fa4e3SBjoern A. Zeeb return -ENOENT;
642da8fa4e3SBjoern A. Zeeb }
643da8fa4e3SBjoern A. Zeeb
644da8fa4e3SBjoern A. Zeeb skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
645da8fa4e3SBjoern A. Zeeb last_frag = ind_desc->reserved;
646da8fa4e3SBjoern A. Zeeb amsdu_len -= frag_buf->len;
647da8fa4e3SBjoern A. Zeeb
648da8fa4e3SBjoern A. Zeeb prev_frag_buf->next = frag_buf;
649da8fa4e3SBjoern A. Zeeb prev_frag_buf = frag_buf;
650da8fa4e3SBjoern A. Zeeb }
651da8fa4e3SBjoern A. Zeeb
652da8fa4e3SBjoern A. Zeeb if (amsdu_len) {
653da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "invalid amsdu len %u, left %d",
654da8fa4e3SBjoern A. Zeeb __le16_to_cpu(ind_desc->msdu_len), amsdu_len);
655da8fa4e3SBjoern A. Zeeb }
656da8fa4e3SBjoern A. Zeeb
657da8fa4e3SBjoern A. Zeeb *msdu_desc = ind_desc;
658da8fa4e3SBjoern A. Zeeb
659da8fa4e3SBjoern A. Zeeb prev_frag_buf->next = NULL;
660da8fa4e3SBjoern A. Zeeb return 0;
661da8fa4e3SBjoern A. Zeeb }
662da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt * htt,struct htt_rx_in_ord_ind * ev,struct sk_buff_head * list)663da8fa4e3SBjoern A. Zeeb static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
664da8fa4e3SBjoern A. Zeeb struct htt_rx_in_ord_ind *ev,
665da8fa4e3SBjoern A. Zeeb struct sk_buff_head *list)
666da8fa4e3SBjoern A. Zeeb {
667da8fa4e3SBjoern A. Zeeb struct ath10k *ar = htt->ar;
668da8fa4e3SBjoern A. Zeeb struct ath10k_hw_params *hw = &ar->hw_params;
669da8fa4e3SBjoern A. Zeeb struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32;
670da8fa4e3SBjoern A. Zeeb struct htt_rx_desc *rxd;
671da8fa4e3SBjoern A. Zeeb struct rx_attention *rxd_attention;
672da8fa4e3SBjoern A. Zeeb struct sk_buff *msdu;
673da8fa4e3SBjoern A. Zeeb int msdu_count, ret;
674da8fa4e3SBjoern A. Zeeb bool is_offload;
675da8fa4e3SBjoern A. Zeeb u32 paddr;
676da8fa4e3SBjoern A. Zeeb
677da8fa4e3SBjoern A. Zeeb lockdep_assert_held(&htt->rx_ring.lock);
678da8fa4e3SBjoern A. Zeeb
679da8fa4e3SBjoern A. Zeeb msdu_count = __le16_to_cpu(ev->msdu_count);
680da8fa4e3SBjoern A. Zeeb is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
681da8fa4e3SBjoern A. Zeeb
682da8fa4e3SBjoern A. Zeeb while (msdu_count--) {
683da8fa4e3SBjoern A. Zeeb paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
684da8fa4e3SBjoern A. Zeeb
685da8fa4e3SBjoern A. Zeeb msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
686da8fa4e3SBjoern A. Zeeb if (!msdu) {
687da8fa4e3SBjoern A. Zeeb __skb_queue_purge(list);
688da8fa4e3SBjoern A. Zeeb return -ENOENT;
689da8fa4e3SBjoern A. Zeeb }
690da8fa4e3SBjoern A. Zeeb
691da8fa4e3SBjoern A. Zeeb if (!is_offload && ar->monitor_arvif) {
692da8fa4e3SBjoern A. Zeeb ret = ath10k_htt_rx_handle_amsdu_mon_32(htt, msdu,
693da8fa4e3SBjoern A. Zeeb &msdu_desc);
694da8fa4e3SBjoern A. Zeeb if (ret) {
695da8fa4e3SBjoern A. Zeeb __skb_queue_purge(list);
696da8fa4e3SBjoern A. Zeeb return ret;
697da8fa4e3SBjoern A. Zeeb }
698da8fa4e3SBjoern A. Zeeb __skb_queue_tail(list, msdu);
699da8fa4e3SBjoern A. Zeeb msdu_desc++;
700da8fa4e3SBjoern A. Zeeb continue;
701da8fa4e3SBjoern A. Zeeb }
702da8fa4e3SBjoern A. Zeeb
703da8fa4e3SBjoern A. Zeeb __skb_queue_tail(list, msdu);
704da8fa4e3SBjoern A. Zeeb
705da8fa4e3SBjoern A. Zeeb if (!is_offload) {
706da8fa4e3SBjoern A. Zeeb rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
707da8fa4e3SBjoern A. Zeeb rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
708da8fa4e3SBjoern A. Zeeb
709da8fa4e3SBjoern A. Zeeb trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
710da8fa4e3SBjoern A. Zeeb
711da8fa4e3SBjoern A. Zeeb skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
712da8fa4e3SBjoern A. Zeeb skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
713da8fa4e3SBjoern A. Zeeb skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
714da8fa4e3SBjoern A. Zeeb
715da8fa4e3SBjoern A. Zeeb if (!(__le32_to_cpu(rxd_attention->flags) &
716da8fa4e3SBjoern A. Zeeb RX_ATTENTION_FLAGS_MSDU_DONE)) {
717da8fa4e3SBjoern A. Zeeb ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
718da8fa4e3SBjoern A. Zeeb return -EIO;
719da8fa4e3SBjoern A. Zeeb }
720da8fa4e3SBjoern A. Zeeb }
721da8fa4e3SBjoern A. Zeeb
722da8fa4e3SBjoern A. Zeeb msdu_desc++;
723da8fa4e3SBjoern A. Zeeb }
724da8fa4e3SBjoern A. Zeeb
725da8fa4e3SBjoern A. Zeeb return 0;
726da8fa4e3SBjoern A. Zeeb }
727da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt * htt,struct htt_rx_in_ord_ind * ev,struct sk_buff_head * list)728da8fa4e3SBjoern A. Zeeb static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,
729da8fa4e3SBjoern A. Zeeb struct htt_rx_in_ord_ind *ev,
730da8fa4e3SBjoern A. Zeeb struct sk_buff_head *list)
731da8fa4e3SBjoern A. Zeeb {
732da8fa4e3SBjoern A. Zeeb struct ath10k *ar = htt->ar;
733da8fa4e3SBjoern A. Zeeb struct ath10k_hw_params *hw = &ar->hw_params;
734da8fa4e3SBjoern A. Zeeb struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64;
735da8fa4e3SBjoern A. Zeeb struct htt_rx_desc *rxd;
736da8fa4e3SBjoern A. Zeeb struct rx_attention *rxd_attention;
737da8fa4e3SBjoern A. Zeeb struct sk_buff *msdu;
738da8fa4e3SBjoern A. Zeeb int msdu_count, ret;
739da8fa4e3SBjoern A. Zeeb bool is_offload;
740da8fa4e3SBjoern A. Zeeb u64 paddr;
741da8fa4e3SBjoern A. Zeeb
742da8fa4e3SBjoern A. Zeeb lockdep_assert_held(&htt->rx_ring.lock);
743da8fa4e3SBjoern A. Zeeb
744da8fa4e3SBjoern A. Zeeb msdu_count = __le16_to_cpu(ev->msdu_count);
745da8fa4e3SBjoern A. Zeeb is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
746da8fa4e3SBjoern A. Zeeb
747da8fa4e3SBjoern A. Zeeb while (msdu_count--) {
748da8fa4e3SBjoern A. Zeeb paddr = __le64_to_cpu(msdu_desc->msdu_paddr);
749da8fa4e3SBjoern A. Zeeb msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
750da8fa4e3SBjoern A. Zeeb if (!msdu) {
751da8fa4e3SBjoern A. Zeeb __skb_queue_purge(list);
752da8fa4e3SBjoern A. Zeeb return -ENOENT;
753da8fa4e3SBjoern A. Zeeb }
754da8fa4e3SBjoern A. Zeeb
755da8fa4e3SBjoern A. Zeeb if (!is_offload && ar->monitor_arvif) {
756da8fa4e3SBjoern A. Zeeb ret = ath10k_htt_rx_handle_amsdu_mon_64(htt, msdu,
757da8fa4e3SBjoern A. Zeeb &msdu_desc);
758da8fa4e3SBjoern A. Zeeb if (ret) {
759da8fa4e3SBjoern A. Zeeb __skb_queue_purge(list);
760da8fa4e3SBjoern A. Zeeb return ret;
761da8fa4e3SBjoern A. Zeeb }
762da8fa4e3SBjoern A. Zeeb __skb_queue_tail(list, msdu);
763da8fa4e3SBjoern A. Zeeb msdu_desc++;
764da8fa4e3SBjoern A. Zeeb continue;
765da8fa4e3SBjoern A. Zeeb }
766da8fa4e3SBjoern A. Zeeb
767da8fa4e3SBjoern A. Zeeb __skb_queue_tail(list, msdu);
768da8fa4e3SBjoern A. Zeeb
769da8fa4e3SBjoern A. Zeeb if (!is_offload) {
770da8fa4e3SBjoern A. Zeeb rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
771da8fa4e3SBjoern A. Zeeb rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
772da8fa4e3SBjoern A. Zeeb
773da8fa4e3SBjoern A. Zeeb trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
774da8fa4e3SBjoern A. Zeeb
775da8fa4e3SBjoern A. Zeeb skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
776da8fa4e3SBjoern A. Zeeb skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
777da8fa4e3SBjoern A. Zeeb skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
778da8fa4e3SBjoern A. Zeeb
779da8fa4e3SBjoern A. Zeeb if (!(__le32_to_cpu(rxd_attention->flags) &
780da8fa4e3SBjoern A. Zeeb RX_ATTENTION_FLAGS_MSDU_DONE)) {
781da8fa4e3SBjoern A. Zeeb ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
782da8fa4e3SBjoern A. Zeeb return -EIO;
783da8fa4e3SBjoern A. Zeeb }
784da8fa4e3SBjoern A. Zeeb }
785da8fa4e3SBjoern A. Zeeb
786da8fa4e3SBjoern A. Zeeb msdu_desc++;
787da8fa4e3SBjoern A. Zeeb }
788da8fa4e3SBjoern A. Zeeb
789da8fa4e3SBjoern A. Zeeb return 0;
790da8fa4e3SBjoern A. Zeeb }
791da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_alloc(struct ath10k_htt * htt)792da8fa4e3SBjoern A. Zeeb int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
793da8fa4e3SBjoern A. Zeeb {
794da8fa4e3SBjoern A. Zeeb struct ath10k *ar = htt->ar;
795da8fa4e3SBjoern A. Zeeb dma_addr_t paddr;
796da8fa4e3SBjoern A. Zeeb void *vaddr, *vaddr_ring;
797da8fa4e3SBjoern A. Zeeb size_t size;
798da8fa4e3SBjoern A. Zeeb struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
799da8fa4e3SBjoern A. Zeeb
800da8fa4e3SBjoern A. Zeeb if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
801da8fa4e3SBjoern A. Zeeb return 0;
802da8fa4e3SBjoern A. Zeeb
803da8fa4e3SBjoern A. Zeeb htt->rx_confused = false;
804da8fa4e3SBjoern A. Zeeb
805da8fa4e3SBjoern A. Zeeb /* XXX: The fill level could be changed during runtime in response to
806da8fa4e3SBjoern A. Zeeb * the host processing latency. Is this really worth it?
807da8fa4e3SBjoern A. Zeeb */
808da8fa4e3SBjoern A. Zeeb htt->rx_ring.size = HTT_RX_RING_SIZE;
809da8fa4e3SBjoern A. Zeeb htt->rx_ring.size_mask = htt->rx_ring.size - 1;
810da8fa4e3SBjoern A. Zeeb htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level;
811da8fa4e3SBjoern A. Zeeb
812da8fa4e3SBjoern A. Zeeb if (!is_power_of_2(htt->rx_ring.size)) {
813da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "htt rx ring size is not power of 2\n");
814da8fa4e3SBjoern A. Zeeb return -EINVAL;
815da8fa4e3SBjoern A. Zeeb }
816da8fa4e3SBjoern A. Zeeb
817da8fa4e3SBjoern A. Zeeb htt->rx_ring.netbufs_ring =
818da8fa4e3SBjoern A. Zeeb kcalloc(htt->rx_ring.size, sizeof(struct sk_buff *),
819da8fa4e3SBjoern A. Zeeb GFP_KERNEL);
820da8fa4e3SBjoern A. Zeeb if (!htt->rx_ring.netbufs_ring)
821da8fa4e3SBjoern A. Zeeb goto err_netbuf;
822da8fa4e3SBjoern A. Zeeb
823da8fa4e3SBjoern A. Zeeb size = ath10k_htt_get_rx_ring_size(htt);
824da8fa4e3SBjoern A. Zeeb
825da8fa4e3SBjoern A. Zeeb vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
826da8fa4e3SBjoern A. Zeeb if (!vaddr_ring)
827da8fa4e3SBjoern A. Zeeb goto err_dma_ring;
828da8fa4e3SBjoern A. Zeeb
829da8fa4e3SBjoern A. Zeeb ath10k_htt_config_paddrs_ring(htt, vaddr_ring);
830da8fa4e3SBjoern A. Zeeb htt->rx_ring.base_paddr = paddr;
831da8fa4e3SBjoern A. Zeeb
832da8fa4e3SBjoern A. Zeeb vaddr = dma_alloc_coherent(htt->ar->dev,
833da8fa4e3SBjoern A. Zeeb sizeof(*htt->rx_ring.alloc_idx.vaddr),
834da8fa4e3SBjoern A. Zeeb &paddr, GFP_KERNEL);
835da8fa4e3SBjoern A. Zeeb if (!vaddr)
836da8fa4e3SBjoern A. Zeeb goto err_dma_idx;
837da8fa4e3SBjoern A. Zeeb
838da8fa4e3SBjoern A. Zeeb htt->rx_ring.alloc_idx.vaddr = vaddr;
839da8fa4e3SBjoern A. Zeeb htt->rx_ring.alloc_idx.paddr = paddr;
840da8fa4e3SBjoern A. Zeeb htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
841da8fa4e3SBjoern A. Zeeb *htt->rx_ring.alloc_idx.vaddr = 0;
842da8fa4e3SBjoern A. Zeeb
843da8fa4e3SBjoern A. Zeeb /* Initialize the Rx refill retry timer */
844da8fa4e3SBjoern A. Zeeb timer_setup(timer, ath10k_htt_rx_ring_refill_retry, 0);
845da8fa4e3SBjoern A. Zeeb
846da8fa4e3SBjoern A. Zeeb spin_lock_init(&htt->rx_ring.lock);
847da8fa4e3SBjoern A. Zeeb #if defined(__FreeBSD__)
848da8fa4e3SBjoern A. Zeeb spin_lock_init(&htt->tx_fetch_ind_q.lock);
849da8fa4e3SBjoern A. Zeeb #endif
850da8fa4e3SBjoern A. Zeeb
851da8fa4e3SBjoern A. Zeeb htt->rx_ring.fill_cnt = 0;
852da8fa4e3SBjoern A. Zeeb htt->rx_ring.sw_rd_idx.msdu_payld = 0;
853da8fa4e3SBjoern A. Zeeb hash_init(htt->rx_ring.skb_table);
854da8fa4e3SBjoern A. Zeeb
855da8fa4e3SBjoern A. Zeeb skb_queue_head_init(&htt->rx_msdus_q);
856da8fa4e3SBjoern A. Zeeb skb_queue_head_init(&htt->rx_in_ord_compl_q);
857da8fa4e3SBjoern A. Zeeb skb_queue_head_init(&htt->tx_fetch_ind_q);
858da8fa4e3SBjoern A. Zeeb atomic_set(&htt->num_mpdus_ready, 0);
859da8fa4e3SBjoern A. Zeeb
860da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
861da8fa4e3SBjoern A. Zeeb htt->rx_ring.size, htt->rx_ring.fill_level);
862da8fa4e3SBjoern A. Zeeb return 0;
863da8fa4e3SBjoern A. Zeeb
864da8fa4e3SBjoern A. Zeeb err_dma_idx:
865da8fa4e3SBjoern A. Zeeb dma_free_coherent(htt->ar->dev,
866da8fa4e3SBjoern A. Zeeb ath10k_htt_get_rx_ring_size(htt),
867da8fa4e3SBjoern A. Zeeb vaddr_ring,
868da8fa4e3SBjoern A. Zeeb htt->rx_ring.base_paddr);
869*07724ba6SBjoern A. Zeeb ath10k_htt_config_paddrs_ring(htt, NULL);
870da8fa4e3SBjoern A. Zeeb err_dma_ring:
871da8fa4e3SBjoern A. Zeeb kfree(htt->rx_ring.netbufs_ring);
872*07724ba6SBjoern A. Zeeb htt->rx_ring.netbufs_ring = NULL;
873da8fa4e3SBjoern A. Zeeb err_netbuf:
874da8fa4e3SBjoern A. Zeeb return -ENOMEM;
875da8fa4e3SBjoern A. Zeeb }
876da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_crypto_param_len(struct ath10k * ar,enum htt_rx_mpdu_encrypt_type type)877da8fa4e3SBjoern A. Zeeb static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
878da8fa4e3SBjoern A. Zeeb enum htt_rx_mpdu_encrypt_type type)
879da8fa4e3SBjoern A. Zeeb {
880da8fa4e3SBjoern A. Zeeb switch (type) {
881da8fa4e3SBjoern A. Zeeb case HTT_RX_MPDU_ENCRYPT_NONE:
882da8fa4e3SBjoern A. Zeeb return 0;
883da8fa4e3SBjoern A. Zeeb case HTT_RX_MPDU_ENCRYPT_WEP40:
884da8fa4e3SBjoern A. Zeeb case HTT_RX_MPDU_ENCRYPT_WEP104:
885da8fa4e3SBjoern A. Zeeb return IEEE80211_WEP_IV_LEN;
886da8fa4e3SBjoern A. Zeeb case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
887da8fa4e3SBjoern A. Zeeb case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
888da8fa4e3SBjoern A. Zeeb return IEEE80211_TKIP_IV_LEN;
889da8fa4e3SBjoern A. Zeeb case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
890da8fa4e3SBjoern A. Zeeb return IEEE80211_CCMP_HDR_LEN;
891da8fa4e3SBjoern A. Zeeb case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
892da8fa4e3SBjoern A. Zeeb return IEEE80211_CCMP_256_HDR_LEN;
893da8fa4e3SBjoern A. Zeeb case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
894da8fa4e3SBjoern A. Zeeb case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
895da8fa4e3SBjoern A. Zeeb return IEEE80211_GCMP_HDR_LEN;
896da8fa4e3SBjoern A. Zeeb case HTT_RX_MPDU_ENCRYPT_WEP128:
897da8fa4e3SBjoern A. Zeeb case HTT_RX_MPDU_ENCRYPT_WAPI:
898da8fa4e3SBjoern A. Zeeb break;
899da8fa4e3SBjoern A. Zeeb }
900da8fa4e3SBjoern A. Zeeb
901da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "unsupported encryption type %d\n", type);
902da8fa4e3SBjoern A. Zeeb return 0;
903da8fa4e3SBjoern A. Zeeb }
904da8fa4e3SBjoern A. Zeeb
905da8fa4e3SBjoern A. Zeeb #define MICHAEL_MIC_LEN 8
906da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_crypto_mic_len(struct ath10k * ar,enum htt_rx_mpdu_encrypt_type type)907da8fa4e3SBjoern A. Zeeb static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar,
908da8fa4e3SBjoern A. Zeeb enum htt_rx_mpdu_encrypt_type type)
909da8fa4e3SBjoern A. Zeeb {
910da8fa4e3SBjoern A. Zeeb switch (type) {
911da8fa4e3SBjoern A. Zeeb case HTT_RX_MPDU_ENCRYPT_NONE:
912da8fa4e3SBjoern A. Zeeb case HTT_RX_MPDU_ENCRYPT_WEP40:
913da8fa4e3SBjoern A. Zeeb case HTT_RX_MPDU_ENCRYPT_WEP104:
914da8fa4e3SBjoern A. Zeeb case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
915da8fa4e3SBjoern A. Zeeb case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
916da8fa4e3SBjoern A. Zeeb return 0;
917da8fa4e3SBjoern A. Zeeb case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
918da8fa4e3SBjoern A. Zeeb return IEEE80211_CCMP_MIC_LEN;
919da8fa4e3SBjoern A. Zeeb case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
920da8fa4e3SBjoern A. Zeeb return IEEE80211_CCMP_256_MIC_LEN;
921da8fa4e3SBjoern A. Zeeb case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
922da8fa4e3SBjoern A. Zeeb case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
923da8fa4e3SBjoern A. Zeeb return IEEE80211_GCMP_MIC_LEN;
924da8fa4e3SBjoern A. Zeeb case HTT_RX_MPDU_ENCRYPT_WEP128:
925da8fa4e3SBjoern A. Zeeb case HTT_RX_MPDU_ENCRYPT_WAPI:
926da8fa4e3SBjoern A. Zeeb break;
927da8fa4e3SBjoern A. Zeeb }
928da8fa4e3SBjoern A. Zeeb
929da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "unsupported encryption type %d\n", type);
930da8fa4e3SBjoern A. Zeeb return 0;
931da8fa4e3SBjoern A. Zeeb }
932da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_crypto_icv_len(struct ath10k * ar,enum htt_rx_mpdu_encrypt_type type)933da8fa4e3SBjoern A. Zeeb static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar,
934da8fa4e3SBjoern A. Zeeb enum htt_rx_mpdu_encrypt_type type)
935da8fa4e3SBjoern A. Zeeb {
936da8fa4e3SBjoern A. Zeeb switch (type) {
937da8fa4e3SBjoern A. Zeeb case HTT_RX_MPDU_ENCRYPT_NONE:
938da8fa4e3SBjoern A. Zeeb case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
939da8fa4e3SBjoern A. Zeeb case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
940da8fa4e3SBjoern A. Zeeb case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
941da8fa4e3SBjoern A. Zeeb case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
942da8fa4e3SBjoern A. Zeeb return 0;
943da8fa4e3SBjoern A. Zeeb case HTT_RX_MPDU_ENCRYPT_WEP40:
944da8fa4e3SBjoern A. Zeeb case HTT_RX_MPDU_ENCRYPT_WEP104:
945da8fa4e3SBjoern A. Zeeb return IEEE80211_WEP_ICV_LEN;
946da8fa4e3SBjoern A. Zeeb case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
947da8fa4e3SBjoern A. Zeeb case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
948da8fa4e3SBjoern A. Zeeb return IEEE80211_TKIP_ICV_LEN;
949da8fa4e3SBjoern A. Zeeb case HTT_RX_MPDU_ENCRYPT_WEP128:
950da8fa4e3SBjoern A. Zeeb case HTT_RX_MPDU_ENCRYPT_WAPI:
951da8fa4e3SBjoern A. Zeeb break;
952da8fa4e3SBjoern A. Zeeb }
953da8fa4e3SBjoern A. Zeeb
954da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "unsupported encryption type %d\n", type);
955da8fa4e3SBjoern A. Zeeb return 0;
956da8fa4e3SBjoern A. Zeeb }
957da8fa4e3SBjoern A. Zeeb
958da8fa4e3SBjoern A. Zeeb struct amsdu_subframe_hdr {
959da8fa4e3SBjoern A. Zeeb u8 dst[ETH_ALEN];
960da8fa4e3SBjoern A. Zeeb u8 src[ETH_ALEN];
961da8fa4e3SBjoern A. Zeeb __be16 len;
962da8fa4e3SBjoern A. Zeeb } __packed;
963da8fa4e3SBjoern A. Zeeb
964da8fa4e3SBjoern A. Zeeb #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
965da8fa4e3SBjoern A. Zeeb
ath10k_bw_to_mac80211_bw(u8 bw)966da8fa4e3SBjoern A. Zeeb static inline u8 ath10k_bw_to_mac80211_bw(u8 bw)
967da8fa4e3SBjoern A. Zeeb {
968da8fa4e3SBjoern A. Zeeb u8 ret = 0;
969da8fa4e3SBjoern A. Zeeb
970da8fa4e3SBjoern A. Zeeb switch (bw) {
971da8fa4e3SBjoern A. Zeeb case 0:
972da8fa4e3SBjoern A. Zeeb ret = RATE_INFO_BW_20;
973da8fa4e3SBjoern A. Zeeb break;
974da8fa4e3SBjoern A. Zeeb case 1:
975da8fa4e3SBjoern A. Zeeb ret = RATE_INFO_BW_40;
976da8fa4e3SBjoern A. Zeeb break;
977da8fa4e3SBjoern A. Zeeb case 2:
978da8fa4e3SBjoern A. Zeeb ret = RATE_INFO_BW_80;
979da8fa4e3SBjoern A. Zeeb break;
980da8fa4e3SBjoern A. Zeeb case 3:
981da8fa4e3SBjoern A. Zeeb ret = RATE_INFO_BW_160;
982da8fa4e3SBjoern A. Zeeb break;
983da8fa4e3SBjoern A. Zeeb }
984da8fa4e3SBjoern A. Zeeb
985da8fa4e3SBjoern A. Zeeb return ret;
986da8fa4e3SBjoern A. Zeeb }
987da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_h_rates(struct ath10k * ar,struct ieee80211_rx_status * status,struct htt_rx_desc * rxd)988da8fa4e3SBjoern A. Zeeb static void ath10k_htt_rx_h_rates(struct ath10k *ar,
989da8fa4e3SBjoern A. Zeeb struct ieee80211_rx_status *status,
990da8fa4e3SBjoern A. Zeeb struct htt_rx_desc *rxd)
991da8fa4e3SBjoern A. Zeeb {
992da8fa4e3SBjoern A. Zeeb struct ath10k_hw_params *hw = &ar->hw_params;
993da8fa4e3SBjoern A. Zeeb struct rx_attention *rxd_attention;
994da8fa4e3SBjoern A. Zeeb struct rx_mpdu_start *rxd_mpdu_start;
995da8fa4e3SBjoern A. Zeeb struct rx_mpdu_end *rxd_mpdu_end;
996da8fa4e3SBjoern A. Zeeb struct rx_msdu_start_common *rxd_msdu_start_common;
997da8fa4e3SBjoern A. Zeeb struct rx_msdu_end_common *rxd_msdu_end_common;
998da8fa4e3SBjoern A. Zeeb struct rx_ppdu_start *rxd_ppdu_start;
999da8fa4e3SBjoern A. Zeeb struct ieee80211_supported_band *sband;
1000da8fa4e3SBjoern A. Zeeb u8 cck, rate, bw, sgi, mcs, nss;
1001da8fa4e3SBjoern A. Zeeb u8 *rxd_msdu_payload;
1002da8fa4e3SBjoern A. Zeeb u8 preamble = 0;
1003da8fa4e3SBjoern A. Zeeb u8 group_id;
1004da8fa4e3SBjoern A. Zeeb u32 info1, info2, info3;
1005da8fa4e3SBjoern A. Zeeb u32 stbc, nsts_su;
1006da8fa4e3SBjoern A. Zeeb
1007da8fa4e3SBjoern A. Zeeb rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
1008da8fa4e3SBjoern A. Zeeb rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
1009da8fa4e3SBjoern A. Zeeb rxd_mpdu_end = ath10k_htt_rx_desc_get_mpdu_end(hw, rxd);
1010da8fa4e3SBjoern A. Zeeb rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
1011da8fa4e3SBjoern A. Zeeb rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
1012da8fa4e3SBjoern A. Zeeb rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd);
1013da8fa4e3SBjoern A. Zeeb rxd_msdu_payload = ath10k_htt_rx_desc_get_msdu_payload(hw, rxd);
1014da8fa4e3SBjoern A. Zeeb
1015da8fa4e3SBjoern A. Zeeb info1 = __le32_to_cpu(rxd_ppdu_start->info1);
1016da8fa4e3SBjoern A. Zeeb info2 = __le32_to_cpu(rxd_ppdu_start->info2);
1017da8fa4e3SBjoern A. Zeeb info3 = __le32_to_cpu(rxd_ppdu_start->info3);
1018da8fa4e3SBjoern A. Zeeb
1019da8fa4e3SBjoern A. Zeeb preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
1020da8fa4e3SBjoern A. Zeeb
1021da8fa4e3SBjoern A. Zeeb switch (preamble) {
1022da8fa4e3SBjoern A. Zeeb case HTT_RX_LEGACY:
1023da8fa4e3SBjoern A. Zeeb /* To get legacy rate index band is required. Since band can't
1024da8fa4e3SBjoern A. Zeeb * be undefined check if freq is non-zero.
1025da8fa4e3SBjoern A. Zeeb */
1026da8fa4e3SBjoern A. Zeeb if (!status->freq)
1027da8fa4e3SBjoern A. Zeeb return;
1028da8fa4e3SBjoern A. Zeeb
1029da8fa4e3SBjoern A. Zeeb cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
1030da8fa4e3SBjoern A. Zeeb rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
1031da8fa4e3SBjoern A. Zeeb rate &= ~RX_PPDU_START_RATE_FLAG;
1032da8fa4e3SBjoern A. Zeeb
1033da8fa4e3SBjoern A. Zeeb sband = &ar->mac.sbands[status->band];
1034da8fa4e3SBjoern A. Zeeb status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
1035da8fa4e3SBjoern A. Zeeb break;
1036da8fa4e3SBjoern A. Zeeb case HTT_RX_HT:
1037da8fa4e3SBjoern A. Zeeb case HTT_RX_HT_WITH_TXBF:
1038da8fa4e3SBjoern A. Zeeb /* HT-SIG - Table 20-11 in info2 and info3 */
1039da8fa4e3SBjoern A. Zeeb mcs = info2 & 0x1F;
1040da8fa4e3SBjoern A. Zeeb nss = mcs >> 3;
1041da8fa4e3SBjoern A. Zeeb bw = (info2 >> 7) & 1;
1042da8fa4e3SBjoern A. Zeeb sgi = (info3 >> 7) & 1;
1043da8fa4e3SBjoern A. Zeeb
1044da8fa4e3SBjoern A. Zeeb status->rate_idx = mcs;
1045da8fa4e3SBjoern A. Zeeb status->encoding = RX_ENC_HT;
1046da8fa4e3SBjoern A. Zeeb if (sgi)
1047da8fa4e3SBjoern A. Zeeb status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
1048da8fa4e3SBjoern A. Zeeb if (bw)
1049da8fa4e3SBjoern A. Zeeb status->bw = RATE_INFO_BW_40;
1050da8fa4e3SBjoern A. Zeeb break;
1051da8fa4e3SBjoern A. Zeeb case HTT_RX_VHT:
1052da8fa4e3SBjoern A. Zeeb case HTT_RX_VHT_WITH_TXBF:
1053da8fa4e3SBjoern A. Zeeb /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
1054da8fa4e3SBjoern A. Zeeb * TODO check this
1055da8fa4e3SBjoern A. Zeeb */
1056da8fa4e3SBjoern A. Zeeb bw = info2 & 3;
1057da8fa4e3SBjoern A. Zeeb sgi = info3 & 1;
1058da8fa4e3SBjoern A. Zeeb stbc = (info2 >> 3) & 1;
1059da8fa4e3SBjoern A. Zeeb group_id = (info2 >> 4) & 0x3F;
1060da8fa4e3SBjoern A. Zeeb
1061da8fa4e3SBjoern A. Zeeb if (GROUP_ID_IS_SU_MIMO(group_id)) {
1062da8fa4e3SBjoern A. Zeeb mcs = (info3 >> 4) & 0x0F;
1063da8fa4e3SBjoern A. Zeeb nsts_su = ((info2 >> 10) & 0x07);
1064da8fa4e3SBjoern A. Zeeb if (stbc)
1065da8fa4e3SBjoern A. Zeeb nss = (nsts_su >> 2) + 1;
1066da8fa4e3SBjoern A. Zeeb else
1067da8fa4e3SBjoern A. Zeeb nss = (nsts_su + 1);
1068da8fa4e3SBjoern A. Zeeb } else {
1069da8fa4e3SBjoern A. Zeeb /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
1070da8fa4e3SBjoern A. Zeeb * so it's impossible to decode MCS. Also since
1071da8fa4e3SBjoern A. Zeeb * firmware consumes Group Id Management frames host
1072da8fa4e3SBjoern A. Zeeb * has no knowledge regarding group/user position
1073da8fa4e3SBjoern A. Zeeb * mapping so it's impossible to pick the correct Nsts
1074da8fa4e3SBjoern A. Zeeb * from VHT-SIG-A1.
1075da8fa4e3SBjoern A. Zeeb *
1076da8fa4e3SBjoern A. Zeeb * Bandwidth and SGI are valid so report the rateinfo
1077da8fa4e3SBjoern A. Zeeb * on best-effort basis.
1078da8fa4e3SBjoern A. Zeeb */
1079da8fa4e3SBjoern A. Zeeb mcs = 0;
1080da8fa4e3SBjoern A. Zeeb nss = 1;
1081da8fa4e3SBjoern A. Zeeb }
1082da8fa4e3SBjoern A. Zeeb
1083da8fa4e3SBjoern A. Zeeb if (mcs > 0x09) {
1084da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "invalid MCS received %u\n", mcs);
1085da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
1086da8fa4e3SBjoern A. Zeeb __le32_to_cpu(rxd_attention->flags),
1087da8fa4e3SBjoern A. Zeeb __le32_to_cpu(rxd_mpdu_start->info0),
1088da8fa4e3SBjoern A. Zeeb __le32_to_cpu(rxd_mpdu_start->info1),
1089da8fa4e3SBjoern A. Zeeb __le32_to_cpu(rxd_msdu_start_common->info0),
1090da8fa4e3SBjoern A. Zeeb __le32_to_cpu(rxd_msdu_start_common->info1),
1091da8fa4e3SBjoern A. Zeeb rxd_ppdu_start->info0,
1092da8fa4e3SBjoern A. Zeeb __le32_to_cpu(rxd_ppdu_start->info1),
1093da8fa4e3SBjoern A. Zeeb __le32_to_cpu(rxd_ppdu_start->info2),
1094da8fa4e3SBjoern A. Zeeb __le32_to_cpu(rxd_ppdu_start->info3),
1095da8fa4e3SBjoern A. Zeeb __le32_to_cpu(rxd_ppdu_start->info4));
1096da8fa4e3SBjoern A. Zeeb
1097da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
1098da8fa4e3SBjoern A. Zeeb __le32_to_cpu(rxd_msdu_end_common->info0),
1099da8fa4e3SBjoern A. Zeeb __le32_to_cpu(rxd_mpdu_end->info0));
1100da8fa4e3SBjoern A. Zeeb
1101da8fa4e3SBjoern A. Zeeb ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
1102da8fa4e3SBjoern A. Zeeb "rx desc msdu payload: ",
1103da8fa4e3SBjoern A. Zeeb rxd_msdu_payload, 50);
1104da8fa4e3SBjoern A. Zeeb }
1105da8fa4e3SBjoern A. Zeeb
1106da8fa4e3SBjoern A. Zeeb status->rate_idx = mcs;
1107da8fa4e3SBjoern A. Zeeb status->nss = nss;
1108da8fa4e3SBjoern A. Zeeb
1109da8fa4e3SBjoern A. Zeeb if (sgi)
1110da8fa4e3SBjoern A. Zeeb status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
1111da8fa4e3SBjoern A. Zeeb
1112da8fa4e3SBjoern A. Zeeb status->bw = ath10k_bw_to_mac80211_bw(bw);
1113da8fa4e3SBjoern A. Zeeb status->encoding = RX_ENC_VHT;
1114da8fa4e3SBjoern A. Zeeb break;
1115da8fa4e3SBjoern A. Zeeb default:
1116da8fa4e3SBjoern A. Zeeb break;
1117da8fa4e3SBjoern A. Zeeb }
1118da8fa4e3SBjoern A. Zeeb }
1119da8fa4e3SBjoern A. Zeeb
1120da8fa4e3SBjoern A. Zeeb static struct ieee80211_channel *
ath10k_htt_rx_h_peer_channel(struct ath10k * ar,struct htt_rx_desc * rxd)1121da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
1122da8fa4e3SBjoern A. Zeeb {
1123da8fa4e3SBjoern A. Zeeb struct ath10k_hw_params *hw = &ar->hw_params;
1124da8fa4e3SBjoern A. Zeeb struct rx_attention *rxd_attention;
1125da8fa4e3SBjoern A. Zeeb struct rx_msdu_end_common *rxd_msdu_end_common;
1126da8fa4e3SBjoern A. Zeeb struct rx_mpdu_start *rxd_mpdu_start;
1127da8fa4e3SBjoern A. Zeeb struct ath10k_peer *peer;
1128da8fa4e3SBjoern A. Zeeb struct ath10k_vif *arvif;
1129da8fa4e3SBjoern A. Zeeb struct cfg80211_chan_def def;
1130da8fa4e3SBjoern A. Zeeb u16 peer_id;
1131da8fa4e3SBjoern A. Zeeb
1132da8fa4e3SBjoern A. Zeeb lockdep_assert_held(&ar->data_lock);
1133da8fa4e3SBjoern A. Zeeb
1134da8fa4e3SBjoern A. Zeeb if (!rxd)
1135da8fa4e3SBjoern A. Zeeb return NULL;
1136da8fa4e3SBjoern A. Zeeb
1137da8fa4e3SBjoern A. Zeeb rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
1138da8fa4e3SBjoern A. Zeeb rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
1139da8fa4e3SBjoern A. Zeeb rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
1140da8fa4e3SBjoern A. Zeeb
1141da8fa4e3SBjoern A. Zeeb if (rxd_attention->flags &
1142da8fa4e3SBjoern A. Zeeb __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
1143da8fa4e3SBjoern A. Zeeb return NULL;
1144da8fa4e3SBjoern A. Zeeb
1145da8fa4e3SBjoern A. Zeeb if (!(rxd_msdu_end_common->info0 &
1146da8fa4e3SBjoern A. Zeeb __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
1147da8fa4e3SBjoern A. Zeeb return NULL;
1148da8fa4e3SBjoern A. Zeeb
1149da8fa4e3SBjoern A. Zeeb peer_id = MS(__le32_to_cpu(rxd_mpdu_start->info0),
1150da8fa4e3SBjoern A. Zeeb RX_MPDU_START_INFO0_PEER_IDX);
1151da8fa4e3SBjoern A. Zeeb
1152da8fa4e3SBjoern A. Zeeb peer = ath10k_peer_find_by_id(ar, peer_id);
1153da8fa4e3SBjoern A. Zeeb if (!peer)
1154da8fa4e3SBjoern A. Zeeb return NULL;
1155da8fa4e3SBjoern A. Zeeb
1156da8fa4e3SBjoern A. Zeeb arvif = ath10k_get_arvif(ar, peer->vdev_id);
1157da8fa4e3SBjoern A. Zeeb if (WARN_ON_ONCE(!arvif))
1158da8fa4e3SBjoern A. Zeeb return NULL;
1159da8fa4e3SBjoern A. Zeeb
1160da8fa4e3SBjoern A. Zeeb if (ath10k_mac_vif_chan(arvif->vif, &def))
1161da8fa4e3SBjoern A. Zeeb return NULL;
1162da8fa4e3SBjoern A. Zeeb
1163da8fa4e3SBjoern A. Zeeb return def.chan;
1164da8fa4e3SBjoern A. Zeeb }
1165da8fa4e3SBjoern A. Zeeb
1166da8fa4e3SBjoern A. Zeeb static struct ieee80211_channel *
ath10k_htt_rx_h_vdev_channel(struct ath10k * ar,u32 vdev_id)1167da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
1168da8fa4e3SBjoern A. Zeeb {
1169da8fa4e3SBjoern A. Zeeb struct ath10k_vif *arvif;
1170da8fa4e3SBjoern A. Zeeb struct cfg80211_chan_def def;
1171da8fa4e3SBjoern A. Zeeb
1172da8fa4e3SBjoern A. Zeeb lockdep_assert_held(&ar->data_lock);
1173da8fa4e3SBjoern A. Zeeb
1174da8fa4e3SBjoern A. Zeeb list_for_each_entry(arvif, &ar->arvifs, list) {
1175da8fa4e3SBjoern A. Zeeb if (arvif->vdev_id == vdev_id &&
1176da8fa4e3SBjoern A. Zeeb ath10k_mac_vif_chan(arvif->vif, &def) == 0)
1177da8fa4e3SBjoern A. Zeeb return def.chan;
1178da8fa4e3SBjoern A. Zeeb }
1179da8fa4e3SBjoern A. Zeeb
1180da8fa4e3SBjoern A. Zeeb return NULL;
1181da8fa4e3SBjoern A. Zeeb }
1182da8fa4e3SBjoern A. Zeeb
1183da8fa4e3SBjoern A. Zeeb static void
ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * conf,void * data)1184da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
1185da8fa4e3SBjoern A. Zeeb struct ieee80211_chanctx_conf *conf,
1186da8fa4e3SBjoern A. Zeeb void *data)
1187da8fa4e3SBjoern A. Zeeb {
1188da8fa4e3SBjoern A. Zeeb struct cfg80211_chan_def *def = data;
1189da8fa4e3SBjoern A. Zeeb
1190da8fa4e3SBjoern A. Zeeb *def = conf->def;
1191da8fa4e3SBjoern A. Zeeb }
1192da8fa4e3SBjoern A. Zeeb
1193da8fa4e3SBjoern A. Zeeb static struct ieee80211_channel *
ath10k_htt_rx_h_any_channel(struct ath10k * ar)1194da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_h_any_channel(struct ath10k *ar)
1195da8fa4e3SBjoern A. Zeeb {
1196da8fa4e3SBjoern A. Zeeb struct cfg80211_chan_def def = {};
1197da8fa4e3SBjoern A. Zeeb
1198da8fa4e3SBjoern A. Zeeb ieee80211_iter_chan_contexts_atomic(ar->hw,
1199da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_h_any_chan_iter,
1200da8fa4e3SBjoern A. Zeeb &def);
1201da8fa4e3SBjoern A. Zeeb
1202da8fa4e3SBjoern A. Zeeb return def.chan;
1203da8fa4e3SBjoern A. Zeeb }
1204da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_h_channel(struct ath10k * ar,struct ieee80211_rx_status * status,struct htt_rx_desc * rxd,u32 vdev_id)1205da8fa4e3SBjoern A. Zeeb static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
1206da8fa4e3SBjoern A. Zeeb struct ieee80211_rx_status *status,
1207da8fa4e3SBjoern A. Zeeb struct htt_rx_desc *rxd,
1208da8fa4e3SBjoern A. Zeeb u32 vdev_id)
1209da8fa4e3SBjoern A. Zeeb {
1210da8fa4e3SBjoern A. Zeeb struct ieee80211_channel *ch;
1211da8fa4e3SBjoern A. Zeeb
1212da8fa4e3SBjoern A. Zeeb spin_lock_bh(&ar->data_lock);
1213da8fa4e3SBjoern A. Zeeb ch = ar->scan_channel;
1214da8fa4e3SBjoern A. Zeeb if (!ch)
1215da8fa4e3SBjoern A. Zeeb ch = ar->rx_channel;
1216da8fa4e3SBjoern A. Zeeb if (!ch)
1217da8fa4e3SBjoern A. Zeeb ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
1218da8fa4e3SBjoern A. Zeeb if (!ch)
1219da8fa4e3SBjoern A. Zeeb ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
1220da8fa4e3SBjoern A. Zeeb if (!ch)
1221da8fa4e3SBjoern A. Zeeb ch = ath10k_htt_rx_h_any_channel(ar);
1222da8fa4e3SBjoern A. Zeeb if (!ch)
1223da8fa4e3SBjoern A. Zeeb ch = ar->tgt_oper_chan;
1224da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ar->data_lock);
1225da8fa4e3SBjoern A. Zeeb
1226da8fa4e3SBjoern A. Zeeb if (!ch)
1227da8fa4e3SBjoern A. Zeeb return false;
1228da8fa4e3SBjoern A. Zeeb
1229da8fa4e3SBjoern A. Zeeb status->band = ch->band;
1230da8fa4e3SBjoern A. Zeeb status->freq = ch->center_freq;
1231da8fa4e3SBjoern A. Zeeb
1232da8fa4e3SBjoern A. Zeeb return true;
1233da8fa4e3SBjoern A. Zeeb }
1234da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_h_signal(struct ath10k * ar,struct ieee80211_rx_status * status,struct htt_rx_desc * rxd)1235da8fa4e3SBjoern A. Zeeb static void ath10k_htt_rx_h_signal(struct ath10k *ar,
1236da8fa4e3SBjoern A. Zeeb struct ieee80211_rx_status *status,
1237da8fa4e3SBjoern A. Zeeb struct htt_rx_desc *rxd)
1238da8fa4e3SBjoern A. Zeeb {
1239da8fa4e3SBjoern A. Zeeb struct ath10k_hw_params *hw = &ar->hw_params;
1240da8fa4e3SBjoern A. Zeeb struct rx_ppdu_start *rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd);
1241da8fa4e3SBjoern A. Zeeb int i;
1242da8fa4e3SBjoern A. Zeeb
1243da8fa4e3SBjoern A. Zeeb for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) {
1244da8fa4e3SBjoern A. Zeeb status->chains &= ~BIT(i);
1245da8fa4e3SBjoern A. Zeeb
1246da8fa4e3SBjoern A. Zeeb if (rxd_ppdu_start->rssi_chains[i].pri20_mhz != 0x80) {
1247da8fa4e3SBjoern A. Zeeb status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR +
1248da8fa4e3SBjoern A. Zeeb rxd_ppdu_start->rssi_chains[i].pri20_mhz;
1249da8fa4e3SBjoern A. Zeeb
1250da8fa4e3SBjoern A. Zeeb status->chains |= BIT(i);
1251da8fa4e3SBjoern A. Zeeb }
1252da8fa4e3SBjoern A. Zeeb }
1253da8fa4e3SBjoern A. Zeeb
1254da8fa4e3SBjoern A. Zeeb /* FIXME: Get real NF */
1255da8fa4e3SBjoern A. Zeeb status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
1256da8fa4e3SBjoern A. Zeeb rxd_ppdu_start->rssi_comb;
1257da8fa4e3SBjoern A. Zeeb status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
1258da8fa4e3SBjoern A. Zeeb }
1259da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_h_mactime(struct ath10k * ar,struct ieee80211_rx_status * status,struct htt_rx_desc * rxd)1260da8fa4e3SBjoern A. Zeeb static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
1261da8fa4e3SBjoern A. Zeeb struct ieee80211_rx_status *status,
1262da8fa4e3SBjoern A. Zeeb struct htt_rx_desc *rxd)
1263da8fa4e3SBjoern A. Zeeb {
1264da8fa4e3SBjoern A. Zeeb struct ath10k_hw_params *hw = &ar->hw_params;
1265da8fa4e3SBjoern A. Zeeb struct rx_ppdu_end_common *rxd_ppdu_end_common;
1266da8fa4e3SBjoern A. Zeeb
1267da8fa4e3SBjoern A. Zeeb rxd_ppdu_end_common = ath10k_htt_rx_desc_get_ppdu_end(hw, rxd);
1268da8fa4e3SBjoern A. Zeeb
1269da8fa4e3SBjoern A. Zeeb /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
1270da8fa4e3SBjoern A. Zeeb * means all prior MSDUs in a PPDU are reported to mac80211 without the
1271da8fa4e3SBjoern A. Zeeb * TSF. Is it worth holding frames until end of PPDU is known?
1272da8fa4e3SBjoern A. Zeeb *
1273da8fa4e3SBjoern A. Zeeb * FIXME: Can we get/compute 64bit TSF?
1274da8fa4e3SBjoern A. Zeeb */
1275da8fa4e3SBjoern A. Zeeb status->mactime = __le32_to_cpu(rxd_ppdu_end_common->tsf_timestamp);
1276da8fa4e3SBjoern A. Zeeb status->flag |= RX_FLAG_MACTIME_END;
1277da8fa4e3SBjoern A. Zeeb }
1278da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_h_ppdu(struct ath10k * ar,struct sk_buff_head * amsdu,struct ieee80211_rx_status * status,u32 vdev_id)1279da8fa4e3SBjoern A. Zeeb static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
1280da8fa4e3SBjoern A. Zeeb struct sk_buff_head *amsdu,
1281da8fa4e3SBjoern A. Zeeb struct ieee80211_rx_status *status,
1282da8fa4e3SBjoern A. Zeeb u32 vdev_id)
1283da8fa4e3SBjoern A. Zeeb {
1284da8fa4e3SBjoern A. Zeeb struct sk_buff *first;
1285da8fa4e3SBjoern A. Zeeb struct ath10k_hw_params *hw = &ar->hw_params;
1286da8fa4e3SBjoern A. Zeeb struct htt_rx_desc *rxd;
1287da8fa4e3SBjoern A. Zeeb struct rx_attention *rxd_attention;
1288da8fa4e3SBjoern A. Zeeb bool is_first_ppdu;
1289da8fa4e3SBjoern A. Zeeb bool is_last_ppdu;
1290da8fa4e3SBjoern A. Zeeb
1291da8fa4e3SBjoern A. Zeeb if (skb_queue_empty(amsdu))
1292da8fa4e3SBjoern A. Zeeb return;
1293da8fa4e3SBjoern A. Zeeb
1294da8fa4e3SBjoern A. Zeeb first = skb_peek(amsdu);
1295da8fa4e3SBjoern A. Zeeb rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1296da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
1297da8fa4e3SBjoern A. Zeeb (void *)first->data - hw->rx_desc_ops->rx_desc_size);
1298da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
1299da8fa4e3SBjoern A. Zeeb (u8 *)first->data - hw->rx_desc_ops->rx_desc_size);
1300da8fa4e3SBjoern A. Zeeb #endif
1301da8fa4e3SBjoern A. Zeeb
1302da8fa4e3SBjoern A. Zeeb rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
1303da8fa4e3SBjoern A. Zeeb
1304da8fa4e3SBjoern A. Zeeb is_first_ppdu = !!(rxd_attention->flags &
1305da8fa4e3SBjoern A. Zeeb __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
1306da8fa4e3SBjoern A. Zeeb is_last_ppdu = !!(rxd_attention->flags &
1307da8fa4e3SBjoern A. Zeeb __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
1308da8fa4e3SBjoern A. Zeeb
1309da8fa4e3SBjoern A. Zeeb if (is_first_ppdu) {
1310da8fa4e3SBjoern A. Zeeb /* New PPDU starts so clear out the old per-PPDU status. */
1311da8fa4e3SBjoern A. Zeeb status->freq = 0;
1312da8fa4e3SBjoern A. Zeeb status->rate_idx = 0;
1313da8fa4e3SBjoern A. Zeeb status->nss = 0;
1314da8fa4e3SBjoern A. Zeeb status->encoding = RX_ENC_LEGACY;
1315da8fa4e3SBjoern A. Zeeb status->bw = RATE_INFO_BW_20;
1316da8fa4e3SBjoern A. Zeeb
1317da8fa4e3SBjoern A. Zeeb status->flag &= ~RX_FLAG_MACTIME_END;
1318da8fa4e3SBjoern A. Zeeb status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1319da8fa4e3SBjoern A. Zeeb
1320da8fa4e3SBjoern A. Zeeb status->flag &= ~(RX_FLAG_AMPDU_IS_LAST);
1321da8fa4e3SBjoern A. Zeeb status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
1322da8fa4e3SBjoern A. Zeeb status->ampdu_reference = ar->ampdu_reference;
1323da8fa4e3SBjoern A. Zeeb
1324da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_h_signal(ar, status, rxd);
1325da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
1326da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_h_rates(ar, status, rxd);
1327da8fa4e3SBjoern A. Zeeb }
1328da8fa4e3SBjoern A. Zeeb
1329da8fa4e3SBjoern A. Zeeb if (is_last_ppdu) {
1330da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_h_mactime(ar, status, rxd);
1331da8fa4e3SBjoern A. Zeeb
1332da8fa4e3SBjoern A. Zeeb /* set ampdu last segment flag */
1333da8fa4e3SBjoern A. Zeeb status->flag |= RX_FLAG_AMPDU_IS_LAST;
1334da8fa4e3SBjoern A. Zeeb ar->ampdu_reference++;
1335da8fa4e3SBjoern A. Zeeb }
1336da8fa4e3SBjoern A. Zeeb }
1337da8fa4e3SBjoern A. Zeeb
1338da8fa4e3SBjoern A. Zeeb static const char * const tid_to_ac[] = {
1339da8fa4e3SBjoern A. Zeeb "BE",
1340da8fa4e3SBjoern A. Zeeb "BK",
1341da8fa4e3SBjoern A. Zeeb "BK",
1342da8fa4e3SBjoern A. Zeeb "BE",
1343da8fa4e3SBjoern A. Zeeb "VI",
1344da8fa4e3SBjoern A. Zeeb "VI",
1345da8fa4e3SBjoern A. Zeeb "VO",
1346da8fa4e3SBjoern A. Zeeb "VO",
1347da8fa4e3SBjoern A. Zeeb };
1348da8fa4e3SBjoern A. Zeeb
ath10k_get_tid(struct ieee80211_hdr * hdr,char * out,size_t size)1349da8fa4e3SBjoern A. Zeeb static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
1350da8fa4e3SBjoern A. Zeeb {
1351da8fa4e3SBjoern A. Zeeb u8 *qc;
1352da8fa4e3SBjoern A. Zeeb int tid;
1353da8fa4e3SBjoern A. Zeeb
1354da8fa4e3SBjoern A. Zeeb if (!ieee80211_is_data_qos(hdr->frame_control))
1355da8fa4e3SBjoern A. Zeeb return "";
1356da8fa4e3SBjoern A. Zeeb
1357da8fa4e3SBjoern A. Zeeb qc = ieee80211_get_qos_ctl(hdr);
1358da8fa4e3SBjoern A. Zeeb tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
1359da8fa4e3SBjoern A. Zeeb if (tid < 8)
1360da8fa4e3SBjoern A. Zeeb snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
1361da8fa4e3SBjoern A. Zeeb else
1362da8fa4e3SBjoern A. Zeeb snprintf(out, size, "tid %d", tid);
1363da8fa4e3SBjoern A. Zeeb
1364da8fa4e3SBjoern A. Zeeb return out;
1365da8fa4e3SBjoern A. Zeeb }
1366da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_h_queue_msdu(struct ath10k * ar,struct ieee80211_rx_status * rx_status,struct sk_buff * skb)1367da8fa4e3SBjoern A. Zeeb static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar,
1368da8fa4e3SBjoern A. Zeeb struct ieee80211_rx_status *rx_status,
1369da8fa4e3SBjoern A. Zeeb struct sk_buff *skb)
1370da8fa4e3SBjoern A. Zeeb {
1371da8fa4e3SBjoern A. Zeeb struct ieee80211_rx_status *status;
1372da8fa4e3SBjoern A. Zeeb
1373da8fa4e3SBjoern A. Zeeb status = IEEE80211_SKB_RXCB(skb);
1374da8fa4e3SBjoern A. Zeeb *status = *rx_status;
1375da8fa4e3SBjoern A. Zeeb
1376da8fa4e3SBjoern A. Zeeb skb_queue_tail(&ar->htt.rx_msdus_q, skb);
1377da8fa4e3SBjoern A. Zeeb }
1378da8fa4e3SBjoern A. Zeeb
ath10k_process_rx(struct ath10k * ar,struct sk_buff * skb)1379da8fa4e3SBjoern A. Zeeb static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
1380da8fa4e3SBjoern A. Zeeb {
1381da8fa4e3SBjoern A. Zeeb struct ieee80211_rx_status *status;
1382da8fa4e3SBjoern A. Zeeb struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1383da8fa4e3SBjoern A. Zeeb char tid[32];
1384da8fa4e3SBjoern A. Zeeb
1385da8fa4e3SBjoern A. Zeeb status = IEEE80211_SKB_RXCB(skb);
1386da8fa4e3SBjoern A. Zeeb
1387da8fa4e3SBjoern A. Zeeb if (!(ar->filter_flags & FIF_FCSFAIL) &&
1388da8fa4e3SBjoern A. Zeeb status->flag & RX_FLAG_FAILED_FCS_CRC) {
1389da8fa4e3SBjoern A. Zeeb ar->stats.rx_crc_err_drop++;
1390da8fa4e3SBjoern A. Zeeb dev_kfree_skb_any(skb);
1391da8fa4e3SBjoern A. Zeeb return;
1392da8fa4e3SBjoern A. Zeeb }
1393da8fa4e3SBjoern A. Zeeb
1394da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_DATA,
1395da8fa4e3SBjoern A. Zeeb "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
1396da8fa4e3SBjoern A. Zeeb skb,
1397da8fa4e3SBjoern A. Zeeb skb->len,
1398da8fa4e3SBjoern A. Zeeb ieee80211_get_SA(hdr),
1399da8fa4e3SBjoern A. Zeeb ath10k_get_tid(hdr, tid, sizeof(tid)),
1400da8fa4e3SBjoern A. Zeeb is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
1401da8fa4e3SBjoern A. Zeeb "mcast" : "ucast",
1402*07724ba6SBjoern A. Zeeb IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)),
1403da8fa4e3SBjoern A. Zeeb (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
1404da8fa4e3SBjoern A. Zeeb (status->encoding == RX_ENC_HT) ? "ht" : "",
1405da8fa4e3SBjoern A. Zeeb (status->encoding == RX_ENC_VHT) ? "vht" : "",
1406da8fa4e3SBjoern A. Zeeb (status->bw == RATE_INFO_BW_40) ? "40" : "",
1407da8fa4e3SBjoern A. Zeeb (status->bw == RATE_INFO_BW_80) ? "80" : "",
1408da8fa4e3SBjoern A. Zeeb (status->bw == RATE_INFO_BW_160) ? "160" : "",
1409da8fa4e3SBjoern A. Zeeb status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
1410da8fa4e3SBjoern A. Zeeb status->rate_idx,
1411da8fa4e3SBjoern A. Zeeb status->nss,
1412da8fa4e3SBjoern A. Zeeb status->freq,
1413da8fa4e3SBjoern A. Zeeb status->band, status->flag,
1414da8fa4e3SBjoern A. Zeeb !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
1415da8fa4e3SBjoern A. Zeeb !!(status->flag & RX_FLAG_MMIC_ERROR),
1416da8fa4e3SBjoern A. Zeeb !!(status->flag & RX_FLAG_AMSDU_MORE));
1417da8fa4e3SBjoern A. Zeeb ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
1418da8fa4e3SBjoern A. Zeeb skb->data, skb->len);
1419da8fa4e3SBjoern A. Zeeb trace_ath10k_rx_hdr(ar, skb->data, skb->len);
1420da8fa4e3SBjoern A. Zeeb trace_ath10k_rx_payload(ar, skb->data, skb->len);
1421da8fa4e3SBjoern A. Zeeb
1422da8fa4e3SBjoern A. Zeeb ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
1423da8fa4e3SBjoern A. Zeeb }
1424da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_nwifi_hdrlen(struct ath10k * ar,struct ieee80211_hdr * hdr)1425da8fa4e3SBjoern A. Zeeb static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
1426da8fa4e3SBjoern A. Zeeb struct ieee80211_hdr *hdr)
1427da8fa4e3SBjoern A. Zeeb {
1428da8fa4e3SBjoern A. Zeeb int len = ieee80211_hdrlen(hdr->frame_control);
1429da8fa4e3SBjoern A. Zeeb
1430da8fa4e3SBjoern A. Zeeb if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
1431da8fa4e3SBjoern A. Zeeb ar->running_fw->fw_file.fw_features))
1432da8fa4e3SBjoern A. Zeeb len = round_up(len, 4);
1433da8fa4e3SBjoern A. Zeeb
1434da8fa4e3SBjoern A. Zeeb return len;
1435da8fa4e3SBjoern A. Zeeb }
1436da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_h_undecap_raw(struct ath10k * ar,struct sk_buff * msdu,struct ieee80211_rx_status * status,enum htt_rx_mpdu_encrypt_type enctype,bool is_decrypted,const u8 first_hdr[64])1437da8fa4e3SBjoern A. Zeeb static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
1438da8fa4e3SBjoern A. Zeeb struct sk_buff *msdu,
1439da8fa4e3SBjoern A. Zeeb struct ieee80211_rx_status *status,
1440da8fa4e3SBjoern A. Zeeb enum htt_rx_mpdu_encrypt_type enctype,
1441da8fa4e3SBjoern A. Zeeb bool is_decrypted,
1442da8fa4e3SBjoern A. Zeeb const u8 first_hdr[64])
1443da8fa4e3SBjoern A. Zeeb {
1444da8fa4e3SBjoern A. Zeeb struct ieee80211_hdr *hdr;
1445da8fa4e3SBjoern A. Zeeb struct ath10k_hw_params *hw = &ar->hw_params;
1446da8fa4e3SBjoern A. Zeeb struct htt_rx_desc *rxd;
1447da8fa4e3SBjoern A. Zeeb struct rx_msdu_end_common *rxd_msdu_end_common;
1448da8fa4e3SBjoern A. Zeeb size_t hdr_len;
1449da8fa4e3SBjoern A. Zeeb size_t crypto_len;
1450da8fa4e3SBjoern A. Zeeb bool is_first;
1451da8fa4e3SBjoern A. Zeeb bool is_last;
1452da8fa4e3SBjoern A. Zeeb bool msdu_limit_err;
1453da8fa4e3SBjoern A. Zeeb int bytes_aligned = ar->hw_params.decap_align_bytes;
1454da8fa4e3SBjoern A. Zeeb u8 *qos;
1455da8fa4e3SBjoern A. Zeeb
1456da8fa4e3SBjoern A. Zeeb rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1457da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
1458da8fa4e3SBjoern A. Zeeb (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
1459da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
1460da8fa4e3SBjoern A. Zeeb (u8 *)msdu->data - hw->rx_desc_ops->rx_desc_size);
1461da8fa4e3SBjoern A. Zeeb #endif
1462da8fa4e3SBjoern A. Zeeb
1463da8fa4e3SBjoern A. Zeeb rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
1464da8fa4e3SBjoern A. Zeeb is_first = !!(rxd_msdu_end_common->info0 &
1465da8fa4e3SBjoern A. Zeeb __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1466da8fa4e3SBjoern A. Zeeb is_last = !!(rxd_msdu_end_common->info0 &
1467da8fa4e3SBjoern A. Zeeb __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1468da8fa4e3SBjoern A. Zeeb
1469da8fa4e3SBjoern A. Zeeb /* Delivered decapped frame:
1470da8fa4e3SBjoern A. Zeeb * [802.11 header]
1471da8fa4e3SBjoern A. Zeeb * [crypto param] <-- can be trimmed if !fcs_err &&
1472da8fa4e3SBjoern A. Zeeb * !decrypt_err && !peer_idx_invalid
1473da8fa4e3SBjoern A. Zeeb * [amsdu header] <-- only if A-MSDU
1474da8fa4e3SBjoern A. Zeeb * [rfc1042/llc]
1475da8fa4e3SBjoern A. Zeeb * [payload]
1476da8fa4e3SBjoern A. Zeeb * [FCS] <-- at end, needs to be trimmed
1477da8fa4e3SBjoern A. Zeeb */
1478da8fa4e3SBjoern A. Zeeb
1479da8fa4e3SBjoern A. Zeeb /* Some hardwares(QCA99x0 variants) limit number of msdus in a-msdu when
1480da8fa4e3SBjoern A. Zeeb * deaggregate, so that unwanted MSDU-deaggregation is avoided for
1481da8fa4e3SBjoern A. Zeeb * error packets. If limit exceeds, hw sends all remaining MSDUs as
1482da8fa4e3SBjoern A. Zeeb * a single last MSDU with this msdu limit error set.
1483da8fa4e3SBjoern A. Zeeb */
1484da8fa4e3SBjoern A. Zeeb msdu_limit_err = ath10k_htt_rx_desc_msdu_limit_error(hw, rxd);
1485da8fa4e3SBjoern A. Zeeb
1486da8fa4e3SBjoern A. Zeeb /* If MSDU limit error happens, then don't warn on, the partial raw MSDU
1487da8fa4e3SBjoern A. Zeeb * without first MSDU is expected in that case, and handled later here.
1488da8fa4e3SBjoern A. Zeeb */
1489da8fa4e3SBjoern A. Zeeb /* This probably shouldn't happen but warn just in case */
1490da8fa4e3SBjoern A. Zeeb if (WARN_ON_ONCE(!is_first && !msdu_limit_err))
1491da8fa4e3SBjoern A. Zeeb return;
1492da8fa4e3SBjoern A. Zeeb
1493da8fa4e3SBjoern A. Zeeb /* This probably shouldn't happen but warn just in case */
1494da8fa4e3SBjoern A. Zeeb if (WARN_ON_ONCE(!(is_first && is_last) && !msdu_limit_err))
1495da8fa4e3SBjoern A. Zeeb return;
1496da8fa4e3SBjoern A. Zeeb
1497da8fa4e3SBjoern A. Zeeb skb_trim(msdu, msdu->len - FCS_LEN);
1498da8fa4e3SBjoern A. Zeeb
1499da8fa4e3SBjoern A. Zeeb /* Push original 80211 header */
1500da8fa4e3SBjoern A. Zeeb if (unlikely(msdu_limit_err)) {
1501da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
1502da8fa4e3SBjoern A. Zeeb hdr = (struct ieee80211_hdr *)first_hdr;
1503da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
1504da8fa4e3SBjoern A. Zeeb hdr = __DECONST(struct ieee80211_hdr *, first_hdr);
1505da8fa4e3SBjoern A. Zeeb #endif
1506da8fa4e3SBjoern A. Zeeb hdr_len = ieee80211_hdrlen(hdr->frame_control);
1507da8fa4e3SBjoern A. Zeeb crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1508da8fa4e3SBjoern A. Zeeb
1509da8fa4e3SBjoern A. Zeeb if (ieee80211_is_data_qos(hdr->frame_control)) {
1510da8fa4e3SBjoern A. Zeeb qos = ieee80211_get_qos_ctl(hdr);
1511da8fa4e3SBjoern A. Zeeb qos[0] |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1512da8fa4e3SBjoern A. Zeeb }
1513da8fa4e3SBjoern A. Zeeb
1514da8fa4e3SBjoern A. Zeeb if (crypto_len)
1515da8fa4e3SBjoern A. Zeeb memcpy(skb_push(msdu, crypto_len),
1516da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
1517da8fa4e3SBjoern A. Zeeb (void *)hdr + round_up(hdr_len, bytes_aligned),
1518da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
1519da8fa4e3SBjoern A. Zeeb (u8 *)hdr + round_up(hdr_len, bytes_aligned),
1520da8fa4e3SBjoern A. Zeeb #endif
1521da8fa4e3SBjoern A. Zeeb crypto_len);
1522da8fa4e3SBjoern A. Zeeb
1523da8fa4e3SBjoern A. Zeeb memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1524da8fa4e3SBjoern A. Zeeb }
1525da8fa4e3SBjoern A. Zeeb
1526da8fa4e3SBjoern A. Zeeb /* In most cases this will be true for sniffed frames. It makes sense
1527da8fa4e3SBjoern A. Zeeb * to deliver them as-is without stripping the crypto param. This is
1528da8fa4e3SBjoern A. Zeeb * necessary for software based decryption.
1529da8fa4e3SBjoern A. Zeeb *
1530da8fa4e3SBjoern A. Zeeb * If there's no error then the frame is decrypted. At least that is
1531da8fa4e3SBjoern A. Zeeb * the case for frames that come in via fragmented rx indication.
1532da8fa4e3SBjoern A. Zeeb */
1533da8fa4e3SBjoern A. Zeeb if (!is_decrypted)
1534da8fa4e3SBjoern A. Zeeb return;
1535da8fa4e3SBjoern A. Zeeb
1536da8fa4e3SBjoern A. Zeeb /* The payload is decrypted so strip crypto params. Start from tail
1537da8fa4e3SBjoern A. Zeeb * since hdr is used to compute some stuff.
1538da8fa4e3SBjoern A. Zeeb */
1539da8fa4e3SBjoern A. Zeeb
1540da8fa4e3SBjoern A. Zeeb hdr = (void *)msdu->data;
1541da8fa4e3SBjoern A. Zeeb
1542da8fa4e3SBjoern A. Zeeb /* Tail */
1543da8fa4e3SBjoern A. Zeeb if (status->flag & RX_FLAG_IV_STRIPPED) {
1544da8fa4e3SBjoern A. Zeeb skb_trim(msdu, msdu->len -
1545da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_crypto_mic_len(ar, enctype));
1546da8fa4e3SBjoern A. Zeeb
1547da8fa4e3SBjoern A. Zeeb skb_trim(msdu, msdu->len -
1548da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_crypto_icv_len(ar, enctype));
1549da8fa4e3SBjoern A. Zeeb } else {
1550da8fa4e3SBjoern A. Zeeb /* MIC */
1551da8fa4e3SBjoern A. Zeeb if (status->flag & RX_FLAG_MIC_STRIPPED)
1552da8fa4e3SBjoern A. Zeeb skb_trim(msdu, msdu->len -
1553da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_crypto_mic_len(ar, enctype));
1554da8fa4e3SBjoern A. Zeeb
1555da8fa4e3SBjoern A. Zeeb /* ICV */
1556da8fa4e3SBjoern A. Zeeb if (status->flag & RX_FLAG_ICV_STRIPPED)
1557da8fa4e3SBjoern A. Zeeb skb_trim(msdu, msdu->len -
1558da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_crypto_icv_len(ar, enctype));
1559da8fa4e3SBjoern A. Zeeb }
1560da8fa4e3SBjoern A. Zeeb
1561da8fa4e3SBjoern A. Zeeb /* MMIC */
1562da8fa4e3SBjoern A. Zeeb if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
1563da8fa4e3SBjoern A. Zeeb !ieee80211_has_morefrags(hdr->frame_control) &&
1564da8fa4e3SBjoern A. Zeeb enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1565da8fa4e3SBjoern A. Zeeb skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN);
1566da8fa4e3SBjoern A. Zeeb
1567da8fa4e3SBjoern A. Zeeb /* Head */
1568da8fa4e3SBjoern A. Zeeb if (status->flag & RX_FLAG_IV_STRIPPED) {
1569da8fa4e3SBjoern A. Zeeb hdr_len = ieee80211_hdrlen(hdr->frame_control);
1570da8fa4e3SBjoern A. Zeeb crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1571da8fa4e3SBjoern A. Zeeb
1572da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
1573da8fa4e3SBjoern A. Zeeb memmove((void *)msdu->data + crypto_len,
1574da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
1575da8fa4e3SBjoern A. Zeeb memmove((u8 *)msdu->data + crypto_len,
1576da8fa4e3SBjoern A. Zeeb #endif
1577da8fa4e3SBjoern A. Zeeb (void *)msdu->data, hdr_len);
1578da8fa4e3SBjoern A. Zeeb skb_pull(msdu, crypto_len);
1579da8fa4e3SBjoern A. Zeeb }
1580da8fa4e3SBjoern A. Zeeb }
1581da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_h_undecap_nwifi(struct ath10k * ar,struct sk_buff * msdu,struct ieee80211_rx_status * status,const u8 first_hdr[64],enum htt_rx_mpdu_encrypt_type enctype)1582da8fa4e3SBjoern A. Zeeb static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
1583da8fa4e3SBjoern A. Zeeb struct sk_buff *msdu,
1584da8fa4e3SBjoern A. Zeeb struct ieee80211_rx_status *status,
1585da8fa4e3SBjoern A. Zeeb const u8 first_hdr[64],
1586da8fa4e3SBjoern A. Zeeb enum htt_rx_mpdu_encrypt_type enctype)
1587da8fa4e3SBjoern A. Zeeb {
1588da8fa4e3SBjoern A. Zeeb struct ath10k_hw_params *hw = &ar->hw_params;
1589da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
1590da8fa4e3SBjoern A. Zeeb struct ieee80211_hdr *hdr;
1591da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
1592da8fa4e3SBjoern A. Zeeb const struct ieee80211_hdr *hdr;
1593da8fa4e3SBjoern A. Zeeb struct ieee80211_hdr *hdr2;
1594da8fa4e3SBjoern A. Zeeb #endif
1595da8fa4e3SBjoern A. Zeeb struct htt_rx_desc *rxd;
1596da8fa4e3SBjoern A. Zeeb size_t hdr_len;
1597da8fa4e3SBjoern A. Zeeb u8 da[ETH_ALEN];
1598da8fa4e3SBjoern A. Zeeb u8 sa[ETH_ALEN];
1599da8fa4e3SBjoern A. Zeeb int l3_pad_bytes;
1600da8fa4e3SBjoern A. Zeeb int bytes_aligned = ar->hw_params.decap_align_bytes;
1601da8fa4e3SBjoern A. Zeeb
1602da8fa4e3SBjoern A. Zeeb /* Delivered decapped frame:
1603da8fa4e3SBjoern A. Zeeb * [nwifi 802.11 header] <-- replaced with 802.11 hdr
1604da8fa4e3SBjoern A. Zeeb * [rfc1042/llc]
1605da8fa4e3SBjoern A. Zeeb *
1606da8fa4e3SBjoern A. Zeeb * Note: The nwifi header doesn't have QoS Control and is
1607da8fa4e3SBjoern A. Zeeb * (always?) a 3addr frame.
1608da8fa4e3SBjoern A. Zeeb *
1609da8fa4e3SBjoern A. Zeeb * Note2: There's no A-MSDU subframe header. Even if it's part
1610da8fa4e3SBjoern A. Zeeb * of an A-MSDU.
1611da8fa4e3SBjoern A. Zeeb */
1612da8fa4e3SBjoern A. Zeeb
1613da8fa4e3SBjoern A. Zeeb /* pull decapped header and copy SA & DA */
1614da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
1615da8fa4e3SBjoern A. Zeeb rxd = HTT_RX_BUF_TO_RX_DESC(hw, (void *)msdu->data -
1616da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
1617da8fa4e3SBjoern A. Zeeb rxd = HTT_RX_BUF_TO_RX_DESC(hw, (u8 *)msdu->data -
1618da8fa4e3SBjoern A. Zeeb #endif
1619da8fa4e3SBjoern A. Zeeb hw->rx_desc_ops->rx_desc_size);
1620da8fa4e3SBjoern A. Zeeb
1621da8fa4e3SBjoern A. Zeeb l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1622da8fa4e3SBjoern A. Zeeb skb_put(msdu, l3_pad_bytes);
1623da8fa4e3SBjoern A. Zeeb
1624da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
1625da8fa4e3SBjoern A. Zeeb hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
1626da8fa4e3SBjoern A. Zeeb
1627da8fa4e3SBjoern A. Zeeb hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
1628da8fa4e3SBjoern A. Zeeb ether_addr_copy(da, ieee80211_get_DA(hdr));
1629da8fa4e3SBjoern A. Zeeb ether_addr_copy(sa, ieee80211_get_SA(hdr));
1630da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
1631da8fa4e3SBjoern A. Zeeb hdr2 = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
1632da8fa4e3SBjoern A. Zeeb
1633da8fa4e3SBjoern A. Zeeb hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr2);
1634da8fa4e3SBjoern A. Zeeb ether_addr_copy(da, ieee80211_get_DA(hdr2));
1635da8fa4e3SBjoern A. Zeeb ether_addr_copy(sa, ieee80211_get_SA(hdr2));
1636da8fa4e3SBjoern A. Zeeb #endif
1637da8fa4e3SBjoern A. Zeeb skb_pull(msdu, hdr_len);
1638da8fa4e3SBjoern A. Zeeb
1639da8fa4e3SBjoern A. Zeeb /* push original 802.11 header */
1640da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
1641da8fa4e3SBjoern A. Zeeb hdr = (struct ieee80211_hdr *)first_hdr;
1642da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
1643da8fa4e3SBjoern A. Zeeb hdr = (const struct ieee80211_hdr *)first_hdr;
1644da8fa4e3SBjoern A. Zeeb #endif
1645da8fa4e3SBjoern A. Zeeb hdr_len = ieee80211_hdrlen(hdr->frame_control);
1646da8fa4e3SBjoern A. Zeeb
1647da8fa4e3SBjoern A. Zeeb if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1648da8fa4e3SBjoern A. Zeeb memcpy(skb_push(msdu,
1649da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_crypto_param_len(ar, enctype)),
1650da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
1651da8fa4e3SBjoern A. Zeeb (void *)hdr + round_up(hdr_len, bytes_aligned),
1652da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
1653da8fa4e3SBjoern A. Zeeb (const u8 *)hdr + round_up(hdr_len, bytes_aligned),
1654da8fa4e3SBjoern A. Zeeb #endif
1655da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_crypto_param_len(ar, enctype));
1656da8fa4e3SBjoern A. Zeeb }
1657da8fa4e3SBjoern A. Zeeb
1658da8fa4e3SBjoern A. Zeeb memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1659da8fa4e3SBjoern A. Zeeb
1660da8fa4e3SBjoern A. Zeeb /* original 802.11 header has a different DA and in
1661da8fa4e3SBjoern A. Zeeb * case of 4addr it may also have different SA
1662da8fa4e3SBjoern A. Zeeb */
1663da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
1664da8fa4e3SBjoern A. Zeeb hdr = (struct ieee80211_hdr *)msdu->data;
1665da8fa4e3SBjoern A. Zeeb ether_addr_copy(ieee80211_get_DA(hdr), da);
1666da8fa4e3SBjoern A. Zeeb ether_addr_copy(ieee80211_get_SA(hdr), sa);
1667da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
1668da8fa4e3SBjoern A. Zeeb hdr2 = (struct ieee80211_hdr *)msdu->data;
1669da8fa4e3SBjoern A. Zeeb ether_addr_copy(ieee80211_get_DA(hdr2), da);
1670da8fa4e3SBjoern A. Zeeb ether_addr_copy(ieee80211_get_SA(hdr2), sa);
1671da8fa4e3SBjoern A. Zeeb #endif
1672da8fa4e3SBjoern A. Zeeb }
1673da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_h_find_rfc1042(struct ath10k * ar,struct sk_buff * msdu,enum htt_rx_mpdu_encrypt_type enctype)1674da8fa4e3SBjoern A. Zeeb static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
1675da8fa4e3SBjoern A. Zeeb struct sk_buff *msdu,
1676da8fa4e3SBjoern A. Zeeb enum htt_rx_mpdu_encrypt_type enctype)
1677da8fa4e3SBjoern A. Zeeb {
1678da8fa4e3SBjoern A. Zeeb struct ieee80211_hdr *hdr;
1679da8fa4e3SBjoern A. Zeeb struct ath10k_hw_params *hw = &ar->hw_params;
1680da8fa4e3SBjoern A. Zeeb struct htt_rx_desc *rxd;
1681da8fa4e3SBjoern A. Zeeb struct rx_msdu_end_common *rxd_msdu_end_common;
1682da8fa4e3SBjoern A. Zeeb u8 *rxd_rx_hdr_status;
1683da8fa4e3SBjoern A. Zeeb size_t hdr_len, crypto_len;
1684da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
1685da8fa4e3SBjoern A. Zeeb void *rfc1042;
1686da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
1687da8fa4e3SBjoern A. Zeeb u8 *rfc1042;
1688da8fa4e3SBjoern A. Zeeb #endif
1689da8fa4e3SBjoern A. Zeeb bool is_first, is_last, is_amsdu;
1690da8fa4e3SBjoern A. Zeeb int bytes_aligned = ar->hw_params.decap_align_bytes;
1691da8fa4e3SBjoern A. Zeeb
1692da8fa4e3SBjoern A. Zeeb rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1693da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
1694da8fa4e3SBjoern A. Zeeb (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
1695da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
1696da8fa4e3SBjoern A. Zeeb (u8 *)msdu->data - hw->rx_desc_ops->rx_desc_size);
1697da8fa4e3SBjoern A. Zeeb #endif
1698da8fa4e3SBjoern A. Zeeb
1699da8fa4e3SBjoern A. Zeeb rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
1700da8fa4e3SBjoern A. Zeeb rxd_rx_hdr_status = ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
1701da8fa4e3SBjoern A. Zeeb hdr = (void *)rxd_rx_hdr_status;
1702da8fa4e3SBjoern A. Zeeb
1703da8fa4e3SBjoern A. Zeeb is_first = !!(rxd_msdu_end_common->info0 &
1704da8fa4e3SBjoern A. Zeeb __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1705da8fa4e3SBjoern A. Zeeb is_last = !!(rxd_msdu_end_common->info0 &
1706da8fa4e3SBjoern A. Zeeb __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1707da8fa4e3SBjoern A. Zeeb is_amsdu = !(is_first && is_last);
1708da8fa4e3SBjoern A. Zeeb
1709da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
1710da8fa4e3SBjoern A. Zeeb rfc1042 = hdr;
1711da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
1712da8fa4e3SBjoern A. Zeeb rfc1042 = (void *)hdr;
1713da8fa4e3SBjoern A. Zeeb #endif
1714da8fa4e3SBjoern A. Zeeb
1715da8fa4e3SBjoern A. Zeeb if (is_first) {
1716da8fa4e3SBjoern A. Zeeb hdr_len = ieee80211_hdrlen(hdr->frame_control);
1717da8fa4e3SBjoern A. Zeeb crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1718da8fa4e3SBjoern A. Zeeb
1719da8fa4e3SBjoern A. Zeeb rfc1042 += round_up(hdr_len, bytes_aligned) +
1720da8fa4e3SBjoern A. Zeeb round_up(crypto_len, bytes_aligned);
1721da8fa4e3SBjoern A. Zeeb }
1722da8fa4e3SBjoern A. Zeeb
1723da8fa4e3SBjoern A. Zeeb if (is_amsdu)
1724da8fa4e3SBjoern A. Zeeb rfc1042 += sizeof(struct amsdu_subframe_hdr);
1725da8fa4e3SBjoern A. Zeeb
1726da8fa4e3SBjoern A. Zeeb return rfc1042;
1727da8fa4e3SBjoern A. Zeeb }
1728da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_h_undecap_eth(struct ath10k * ar,struct sk_buff * msdu,struct ieee80211_rx_status * status,const u8 first_hdr[64],enum htt_rx_mpdu_encrypt_type enctype)1729da8fa4e3SBjoern A. Zeeb static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1730da8fa4e3SBjoern A. Zeeb struct sk_buff *msdu,
1731da8fa4e3SBjoern A. Zeeb struct ieee80211_rx_status *status,
1732da8fa4e3SBjoern A. Zeeb const u8 first_hdr[64],
1733da8fa4e3SBjoern A. Zeeb enum htt_rx_mpdu_encrypt_type enctype)
1734da8fa4e3SBjoern A. Zeeb {
1735da8fa4e3SBjoern A. Zeeb struct ath10k_hw_params *hw = &ar->hw_params;
1736da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
1737da8fa4e3SBjoern A. Zeeb struct ieee80211_hdr *hdr;
1738da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
1739da8fa4e3SBjoern A. Zeeb const struct ieee80211_hdr *hdr;
1740da8fa4e3SBjoern A. Zeeb struct ieee80211_hdr *hdr2;
1741da8fa4e3SBjoern A. Zeeb #endif
1742da8fa4e3SBjoern A. Zeeb struct ethhdr *eth;
1743da8fa4e3SBjoern A. Zeeb size_t hdr_len;
1744da8fa4e3SBjoern A. Zeeb void *rfc1042;
1745da8fa4e3SBjoern A. Zeeb u8 da[ETH_ALEN];
1746da8fa4e3SBjoern A. Zeeb u8 sa[ETH_ALEN];
1747da8fa4e3SBjoern A. Zeeb int l3_pad_bytes;
1748da8fa4e3SBjoern A. Zeeb struct htt_rx_desc *rxd;
1749da8fa4e3SBjoern A. Zeeb int bytes_aligned = ar->hw_params.decap_align_bytes;
1750da8fa4e3SBjoern A. Zeeb
1751da8fa4e3SBjoern A. Zeeb /* Delivered decapped frame:
1752da8fa4e3SBjoern A. Zeeb * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
1753da8fa4e3SBjoern A. Zeeb * [payload]
1754da8fa4e3SBjoern A. Zeeb */
1755da8fa4e3SBjoern A. Zeeb
1756da8fa4e3SBjoern A. Zeeb rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
1757da8fa4e3SBjoern A. Zeeb if (WARN_ON_ONCE(!rfc1042))
1758da8fa4e3SBjoern A. Zeeb return;
1759da8fa4e3SBjoern A. Zeeb
1760da8fa4e3SBjoern A. Zeeb rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1761da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
1762da8fa4e3SBjoern A. Zeeb (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
1763da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
1764da8fa4e3SBjoern A. Zeeb (u8 *)msdu->data - hw->rx_desc_ops->rx_desc_size);
1765da8fa4e3SBjoern A. Zeeb #endif
1766da8fa4e3SBjoern A. Zeeb
1767da8fa4e3SBjoern A. Zeeb l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1768da8fa4e3SBjoern A. Zeeb skb_put(msdu, l3_pad_bytes);
1769da8fa4e3SBjoern A. Zeeb skb_pull(msdu, l3_pad_bytes);
1770da8fa4e3SBjoern A. Zeeb
1771da8fa4e3SBjoern A. Zeeb /* pull decapped header and copy SA & DA */
1772da8fa4e3SBjoern A. Zeeb eth = (struct ethhdr *)msdu->data;
1773da8fa4e3SBjoern A. Zeeb ether_addr_copy(da, eth->h_dest);
1774da8fa4e3SBjoern A. Zeeb ether_addr_copy(sa, eth->h_source);
1775da8fa4e3SBjoern A. Zeeb skb_pull(msdu, sizeof(struct ethhdr));
1776da8fa4e3SBjoern A. Zeeb
1777da8fa4e3SBjoern A. Zeeb /* push rfc1042/llc/snap */
1778da8fa4e3SBjoern A. Zeeb memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
1779da8fa4e3SBjoern A. Zeeb sizeof(struct rfc1042_hdr));
1780da8fa4e3SBjoern A. Zeeb
1781da8fa4e3SBjoern A. Zeeb /* push original 802.11 header */
1782da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
1783da8fa4e3SBjoern A. Zeeb hdr = (struct ieee80211_hdr *)first_hdr;
1784da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
1785da8fa4e3SBjoern A. Zeeb hdr = (const struct ieee80211_hdr *)first_hdr;
1786da8fa4e3SBjoern A. Zeeb #endif
1787da8fa4e3SBjoern A. Zeeb hdr_len = ieee80211_hdrlen(hdr->frame_control);
1788da8fa4e3SBjoern A. Zeeb
1789da8fa4e3SBjoern A. Zeeb if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1790da8fa4e3SBjoern A. Zeeb memcpy(skb_push(msdu,
1791da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_crypto_param_len(ar, enctype)),
1792da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
1793da8fa4e3SBjoern A. Zeeb (void *)hdr + round_up(hdr_len, bytes_aligned),
1794da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
1795da8fa4e3SBjoern A. Zeeb (const u8 *)hdr + round_up(hdr_len, bytes_aligned),
1796da8fa4e3SBjoern A. Zeeb #endif
1797da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_crypto_param_len(ar, enctype));
1798da8fa4e3SBjoern A. Zeeb }
1799da8fa4e3SBjoern A. Zeeb
1800da8fa4e3SBjoern A. Zeeb memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1801da8fa4e3SBjoern A. Zeeb
1802da8fa4e3SBjoern A. Zeeb /* original 802.11 header has a different DA and in
1803da8fa4e3SBjoern A. Zeeb * case of 4addr it may also have different SA
1804da8fa4e3SBjoern A. Zeeb */
1805da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
1806da8fa4e3SBjoern A. Zeeb hdr = (struct ieee80211_hdr *)msdu->data;
1807da8fa4e3SBjoern A. Zeeb ether_addr_copy(ieee80211_get_DA(hdr), da);
1808da8fa4e3SBjoern A. Zeeb ether_addr_copy(ieee80211_get_SA(hdr), sa);
1809da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
1810da8fa4e3SBjoern A. Zeeb hdr2 = (struct ieee80211_hdr *)msdu->data;
1811da8fa4e3SBjoern A. Zeeb ether_addr_copy(ieee80211_get_DA(hdr2), da);
1812da8fa4e3SBjoern A. Zeeb ether_addr_copy(ieee80211_get_SA(hdr2), sa);
1813da8fa4e3SBjoern A. Zeeb #endif
1814da8fa4e3SBjoern A. Zeeb }
1815da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_h_undecap_snap(struct ath10k * ar,struct sk_buff * msdu,struct ieee80211_rx_status * status,const u8 first_hdr[64],enum htt_rx_mpdu_encrypt_type enctype)1816da8fa4e3SBjoern A. Zeeb static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
1817da8fa4e3SBjoern A. Zeeb struct sk_buff *msdu,
1818da8fa4e3SBjoern A. Zeeb struct ieee80211_rx_status *status,
1819da8fa4e3SBjoern A. Zeeb const u8 first_hdr[64],
1820da8fa4e3SBjoern A. Zeeb enum htt_rx_mpdu_encrypt_type enctype)
1821da8fa4e3SBjoern A. Zeeb {
1822da8fa4e3SBjoern A. Zeeb struct ath10k_hw_params *hw = &ar->hw_params;
1823da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
1824da8fa4e3SBjoern A. Zeeb struct ieee80211_hdr *hdr;
1825da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
1826da8fa4e3SBjoern A. Zeeb const struct ieee80211_hdr *hdr;
1827da8fa4e3SBjoern A. Zeeb #endif
1828da8fa4e3SBjoern A. Zeeb size_t hdr_len;
1829da8fa4e3SBjoern A. Zeeb int l3_pad_bytes;
1830da8fa4e3SBjoern A. Zeeb struct htt_rx_desc *rxd;
1831da8fa4e3SBjoern A. Zeeb int bytes_aligned = ar->hw_params.decap_align_bytes;
1832da8fa4e3SBjoern A. Zeeb
1833da8fa4e3SBjoern A. Zeeb /* Delivered decapped frame:
1834da8fa4e3SBjoern A. Zeeb * [amsdu header] <-- replaced with 802.11 hdr
1835da8fa4e3SBjoern A. Zeeb * [rfc1042/llc]
1836da8fa4e3SBjoern A. Zeeb * [payload]
1837da8fa4e3SBjoern A. Zeeb */
1838da8fa4e3SBjoern A. Zeeb
1839da8fa4e3SBjoern A. Zeeb rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1840da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
1841da8fa4e3SBjoern A. Zeeb (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
1842da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
1843da8fa4e3SBjoern A. Zeeb (u8 *)msdu->data - hw->rx_desc_ops->rx_desc_size);
1844da8fa4e3SBjoern A. Zeeb #endif
1845da8fa4e3SBjoern A. Zeeb
1846da8fa4e3SBjoern A. Zeeb l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1847da8fa4e3SBjoern A. Zeeb
1848da8fa4e3SBjoern A. Zeeb skb_put(msdu, l3_pad_bytes);
1849da8fa4e3SBjoern A. Zeeb skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
1850da8fa4e3SBjoern A. Zeeb
1851da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
1852da8fa4e3SBjoern A. Zeeb hdr = (struct ieee80211_hdr *)first_hdr;
1853da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
1854da8fa4e3SBjoern A. Zeeb hdr = (const struct ieee80211_hdr *)first_hdr;
1855da8fa4e3SBjoern A. Zeeb #endif
1856da8fa4e3SBjoern A. Zeeb hdr_len = ieee80211_hdrlen(hdr->frame_control);
1857da8fa4e3SBjoern A. Zeeb
1858da8fa4e3SBjoern A. Zeeb if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1859da8fa4e3SBjoern A. Zeeb memcpy(skb_push(msdu,
1860da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_crypto_param_len(ar, enctype)),
1861da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
1862da8fa4e3SBjoern A. Zeeb (void *)hdr + round_up(hdr_len, bytes_aligned),
1863da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
1864da8fa4e3SBjoern A. Zeeb (const u8 *)hdr + round_up(hdr_len, bytes_aligned),
1865da8fa4e3SBjoern A. Zeeb #endif
1866da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_crypto_param_len(ar, enctype));
1867da8fa4e3SBjoern A. Zeeb }
1868da8fa4e3SBjoern A. Zeeb
1869da8fa4e3SBjoern A. Zeeb memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1870da8fa4e3SBjoern A. Zeeb }
1871da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_h_undecap(struct ath10k * ar,struct sk_buff * msdu,struct ieee80211_rx_status * status,u8 first_hdr[64],enum htt_rx_mpdu_encrypt_type enctype,bool is_decrypted)1872da8fa4e3SBjoern A. Zeeb static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
1873da8fa4e3SBjoern A. Zeeb struct sk_buff *msdu,
1874da8fa4e3SBjoern A. Zeeb struct ieee80211_rx_status *status,
1875da8fa4e3SBjoern A. Zeeb u8 first_hdr[64],
1876da8fa4e3SBjoern A. Zeeb enum htt_rx_mpdu_encrypt_type enctype,
1877da8fa4e3SBjoern A. Zeeb bool is_decrypted)
1878da8fa4e3SBjoern A. Zeeb {
1879da8fa4e3SBjoern A. Zeeb struct ath10k_hw_params *hw = &ar->hw_params;
1880da8fa4e3SBjoern A. Zeeb struct htt_rx_desc *rxd;
1881da8fa4e3SBjoern A. Zeeb struct rx_msdu_start_common *rxd_msdu_start_common;
1882da8fa4e3SBjoern A. Zeeb enum rx_msdu_decap_format decap;
1883da8fa4e3SBjoern A. Zeeb
1884da8fa4e3SBjoern A. Zeeb /* First msdu's decapped header:
1885da8fa4e3SBjoern A. Zeeb * [802.11 header] <-- padded to 4 bytes long
1886da8fa4e3SBjoern A. Zeeb * [crypto param] <-- padded to 4 bytes long
1887da8fa4e3SBjoern A. Zeeb * [amsdu header] <-- only if A-MSDU
1888da8fa4e3SBjoern A. Zeeb * [rfc1042/llc]
1889da8fa4e3SBjoern A. Zeeb *
1890da8fa4e3SBjoern A. Zeeb * Other (2nd, 3rd, ..) msdu's decapped header:
1891da8fa4e3SBjoern A. Zeeb * [amsdu header] <-- only if A-MSDU
1892da8fa4e3SBjoern A. Zeeb * [rfc1042/llc]
1893da8fa4e3SBjoern A. Zeeb */
1894da8fa4e3SBjoern A. Zeeb
1895da8fa4e3SBjoern A. Zeeb rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1896da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
1897da8fa4e3SBjoern A. Zeeb (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
1898da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
1899da8fa4e3SBjoern A. Zeeb (u8 *)msdu->data - hw->rx_desc_ops->rx_desc_size);
1900da8fa4e3SBjoern A. Zeeb #endif
1901da8fa4e3SBjoern A. Zeeb
1902da8fa4e3SBjoern A. Zeeb rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
1903da8fa4e3SBjoern A. Zeeb decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1),
1904da8fa4e3SBjoern A. Zeeb RX_MSDU_START_INFO1_DECAP_FORMAT);
1905da8fa4e3SBjoern A. Zeeb
1906da8fa4e3SBjoern A. Zeeb switch (decap) {
1907da8fa4e3SBjoern A. Zeeb case RX_MSDU_DECAP_RAW:
1908da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
1909da8fa4e3SBjoern A. Zeeb is_decrypted, first_hdr);
1910da8fa4e3SBjoern A. Zeeb break;
1911da8fa4e3SBjoern A. Zeeb case RX_MSDU_DECAP_NATIVE_WIFI:
1912da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr,
1913da8fa4e3SBjoern A. Zeeb enctype);
1914da8fa4e3SBjoern A. Zeeb break;
1915da8fa4e3SBjoern A. Zeeb case RX_MSDU_DECAP_ETHERNET2_DIX:
1916da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
1917da8fa4e3SBjoern A. Zeeb break;
1918da8fa4e3SBjoern A. Zeeb case RX_MSDU_DECAP_8023_SNAP_LLC:
1919da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr,
1920da8fa4e3SBjoern A. Zeeb enctype);
1921da8fa4e3SBjoern A. Zeeb break;
1922da8fa4e3SBjoern A. Zeeb }
1923da8fa4e3SBjoern A. Zeeb }
1924da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_get_csum_state(struct ath10k_hw_params * hw,struct sk_buff * skb)1925da8fa4e3SBjoern A. Zeeb static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb)
1926da8fa4e3SBjoern A. Zeeb {
1927da8fa4e3SBjoern A. Zeeb struct htt_rx_desc *rxd;
1928da8fa4e3SBjoern A. Zeeb struct rx_attention *rxd_attention;
1929da8fa4e3SBjoern A. Zeeb struct rx_msdu_start_common *rxd_msdu_start_common;
1930da8fa4e3SBjoern A. Zeeb u32 flags, info;
1931da8fa4e3SBjoern A. Zeeb bool is_ip4, is_ip6;
1932da8fa4e3SBjoern A. Zeeb bool is_tcp, is_udp;
1933da8fa4e3SBjoern A. Zeeb bool ip_csum_ok, tcpudp_csum_ok;
1934da8fa4e3SBjoern A. Zeeb
1935da8fa4e3SBjoern A. Zeeb rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1936da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
1937da8fa4e3SBjoern A. Zeeb (void *)skb->data - hw->rx_desc_ops->rx_desc_size);
1938da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
1939da8fa4e3SBjoern A. Zeeb (u8 *)skb->data - hw->rx_desc_ops->rx_desc_size);
1940da8fa4e3SBjoern A. Zeeb #endif
1941da8fa4e3SBjoern A. Zeeb
1942da8fa4e3SBjoern A. Zeeb rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
1943da8fa4e3SBjoern A. Zeeb rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
1944da8fa4e3SBjoern A. Zeeb flags = __le32_to_cpu(rxd_attention->flags);
1945da8fa4e3SBjoern A. Zeeb info = __le32_to_cpu(rxd_msdu_start_common->info1);
1946da8fa4e3SBjoern A. Zeeb
1947da8fa4e3SBjoern A. Zeeb is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
1948da8fa4e3SBjoern A. Zeeb is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
1949da8fa4e3SBjoern A. Zeeb is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
1950da8fa4e3SBjoern A. Zeeb is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
1951da8fa4e3SBjoern A. Zeeb ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
1952da8fa4e3SBjoern A. Zeeb tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
1953da8fa4e3SBjoern A. Zeeb
1954da8fa4e3SBjoern A. Zeeb if (!is_ip4 && !is_ip6)
1955da8fa4e3SBjoern A. Zeeb return CHECKSUM_NONE;
1956da8fa4e3SBjoern A. Zeeb if (!is_tcp && !is_udp)
1957da8fa4e3SBjoern A. Zeeb return CHECKSUM_NONE;
1958da8fa4e3SBjoern A. Zeeb if (!ip_csum_ok)
1959da8fa4e3SBjoern A. Zeeb return CHECKSUM_NONE;
1960da8fa4e3SBjoern A. Zeeb if (!tcpudp_csum_ok)
1961da8fa4e3SBjoern A. Zeeb return CHECKSUM_NONE;
1962da8fa4e3SBjoern A. Zeeb
1963da8fa4e3SBjoern A. Zeeb return CHECKSUM_UNNECESSARY;
1964da8fa4e3SBjoern A. Zeeb }
1965da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_h_csum_offload(struct ath10k_hw_params * hw,struct sk_buff * msdu)1966da8fa4e3SBjoern A. Zeeb static void ath10k_htt_rx_h_csum_offload(struct ath10k_hw_params *hw,
1967da8fa4e3SBjoern A. Zeeb struct sk_buff *msdu)
1968da8fa4e3SBjoern A. Zeeb {
1969da8fa4e3SBjoern A. Zeeb msdu->ip_summed = ath10k_htt_rx_get_csum_state(hw, msdu);
1970da8fa4e3SBjoern A. Zeeb }
1971da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_h_get_pn(struct ath10k * ar,struct sk_buff * skb,enum htt_rx_mpdu_encrypt_type enctype)1972da8fa4e3SBjoern A. Zeeb static u64 ath10k_htt_rx_h_get_pn(struct ath10k *ar, struct sk_buff *skb,
1973da8fa4e3SBjoern A. Zeeb enum htt_rx_mpdu_encrypt_type enctype)
1974da8fa4e3SBjoern A. Zeeb {
1975da8fa4e3SBjoern A. Zeeb struct ieee80211_hdr *hdr;
1976da8fa4e3SBjoern A. Zeeb u64 pn = 0;
1977da8fa4e3SBjoern A. Zeeb u8 *ehdr;
1978da8fa4e3SBjoern A. Zeeb
1979*07724ba6SBjoern A. Zeeb hdr = (struct ieee80211_hdr *)skb->data;
1980*07724ba6SBjoern A. Zeeb ehdr = skb->data + ieee80211_hdrlen(hdr->frame_control);
1981da8fa4e3SBjoern A. Zeeb
1982da8fa4e3SBjoern A. Zeeb if (enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) {
1983da8fa4e3SBjoern A. Zeeb pn = ehdr[0];
1984da8fa4e3SBjoern A. Zeeb pn |= (u64)ehdr[1] << 8;
1985da8fa4e3SBjoern A. Zeeb pn |= (u64)ehdr[4] << 16;
1986da8fa4e3SBjoern A. Zeeb pn |= (u64)ehdr[5] << 24;
1987da8fa4e3SBjoern A. Zeeb pn |= (u64)ehdr[6] << 32;
1988da8fa4e3SBjoern A. Zeeb pn |= (u64)ehdr[7] << 40;
1989da8fa4e3SBjoern A. Zeeb }
1990da8fa4e3SBjoern A. Zeeb return pn;
1991da8fa4e3SBjoern A. Zeeb }
1992da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_h_frag_multicast_check(struct ath10k * ar,struct sk_buff * skb)1993da8fa4e3SBjoern A. Zeeb static bool ath10k_htt_rx_h_frag_multicast_check(struct ath10k *ar,
1994*07724ba6SBjoern A. Zeeb struct sk_buff *skb)
1995da8fa4e3SBjoern A. Zeeb {
1996da8fa4e3SBjoern A. Zeeb struct ieee80211_hdr *hdr;
1997da8fa4e3SBjoern A. Zeeb
1998*07724ba6SBjoern A. Zeeb hdr = (struct ieee80211_hdr *)skb->data;
1999da8fa4e3SBjoern A. Zeeb return !is_multicast_ether_addr(hdr->addr1);
2000da8fa4e3SBjoern A. Zeeb }
2001da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_h_frag_pn_check(struct ath10k * ar,struct sk_buff * skb,u16 peer_id,enum htt_rx_mpdu_encrypt_type enctype)2002da8fa4e3SBjoern A. Zeeb static bool ath10k_htt_rx_h_frag_pn_check(struct ath10k *ar,
2003da8fa4e3SBjoern A. Zeeb struct sk_buff *skb,
2004da8fa4e3SBjoern A. Zeeb u16 peer_id,
2005da8fa4e3SBjoern A. Zeeb enum htt_rx_mpdu_encrypt_type enctype)
2006da8fa4e3SBjoern A. Zeeb {
2007da8fa4e3SBjoern A. Zeeb struct ath10k_peer *peer;
2008da8fa4e3SBjoern A. Zeeb union htt_rx_pn_t *last_pn, new_pn = {0};
2009da8fa4e3SBjoern A. Zeeb struct ieee80211_hdr *hdr;
2010da8fa4e3SBjoern A. Zeeb u8 tid, frag_number;
2011da8fa4e3SBjoern A. Zeeb u32 seq;
2012da8fa4e3SBjoern A. Zeeb
2013da8fa4e3SBjoern A. Zeeb peer = ath10k_peer_find_by_id(ar, peer_id);
2014da8fa4e3SBjoern A. Zeeb if (!peer) {
2015da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer for frag pn check\n");
2016da8fa4e3SBjoern A. Zeeb return false;
2017da8fa4e3SBjoern A. Zeeb }
2018da8fa4e3SBjoern A. Zeeb
2019*07724ba6SBjoern A. Zeeb hdr = (struct ieee80211_hdr *)skb->data;
2020da8fa4e3SBjoern A. Zeeb if (ieee80211_is_data_qos(hdr->frame_control))
2021da8fa4e3SBjoern A. Zeeb tid = ieee80211_get_tid(hdr);
2022da8fa4e3SBjoern A. Zeeb else
2023da8fa4e3SBjoern A. Zeeb tid = ATH10K_TXRX_NON_QOS_TID;
2024da8fa4e3SBjoern A. Zeeb
2025da8fa4e3SBjoern A. Zeeb last_pn = &peer->frag_tids_last_pn[tid];
2026*07724ba6SBjoern A. Zeeb new_pn.pn48 = ath10k_htt_rx_h_get_pn(ar, skb, enctype);
2027da8fa4e3SBjoern A. Zeeb frag_number = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
2028*07724ba6SBjoern A. Zeeb seq = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl));
2029da8fa4e3SBjoern A. Zeeb
2030da8fa4e3SBjoern A. Zeeb if (frag_number == 0) {
2031da8fa4e3SBjoern A. Zeeb last_pn->pn48 = new_pn.pn48;
2032da8fa4e3SBjoern A. Zeeb peer->frag_tids_seq[tid] = seq;
2033da8fa4e3SBjoern A. Zeeb } else {
2034da8fa4e3SBjoern A. Zeeb if (seq != peer->frag_tids_seq[tid])
2035da8fa4e3SBjoern A. Zeeb return false;
2036da8fa4e3SBjoern A. Zeeb
2037da8fa4e3SBjoern A. Zeeb if (new_pn.pn48 != last_pn->pn48 + 1)
2038da8fa4e3SBjoern A. Zeeb return false;
2039da8fa4e3SBjoern A. Zeeb
2040da8fa4e3SBjoern A. Zeeb last_pn->pn48 = new_pn.pn48;
2041da8fa4e3SBjoern A. Zeeb }
2042da8fa4e3SBjoern A. Zeeb
2043da8fa4e3SBjoern A. Zeeb return true;
2044da8fa4e3SBjoern A. Zeeb }
2045da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_h_mpdu(struct ath10k * ar,struct sk_buff_head * amsdu,struct ieee80211_rx_status * status,bool fill_crypt_header,u8 * rx_hdr,enum ath10k_pkt_rx_err * err,u16 peer_id,bool frag)2046da8fa4e3SBjoern A. Zeeb static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
2047da8fa4e3SBjoern A. Zeeb struct sk_buff_head *amsdu,
2048da8fa4e3SBjoern A. Zeeb struct ieee80211_rx_status *status,
2049da8fa4e3SBjoern A. Zeeb bool fill_crypt_header,
2050da8fa4e3SBjoern A. Zeeb u8 *rx_hdr,
2051da8fa4e3SBjoern A. Zeeb enum ath10k_pkt_rx_err *err,
2052da8fa4e3SBjoern A. Zeeb u16 peer_id,
2053da8fa4e3SBjoern A. Zeeb bool frag)
2054da8fa4e3SBjoern A. Zeeb {
2055da8fa4e3SBjoern A. Zeeb struct sk_buff *first;
2056da8fa4e3SBjoern A. Zeeb struct sk_buff *last;
2057da8fa4e3SBjoern A. Zeeb struct sk_buff *msdu, *temp;
2058da8fa4e3SBjoern A. Zeeb struct ath10k_hw_params *hw = &ar->hw_params;
2059da8fa4e3SBjoern A. Zeeb struct htt_rx_desc *rxd;
2060da8fa4e3SBjoern A. Zeeb struct rx_attention *rxd_attention;
2061da8fa4e3SBjoern A. Zeeb struct rx_mpdu_start *rxd_mpdu_start;
2062da8fa4e3SBjoern A. Zeeb
2063da8fa4e3SBjoern A. Zeeb struct ieee80211_hdr *hdr;
2064da8fa4e3SBjoern A. Zeeb enum htt_rx_mpdu_encrypt_type enctype;
2065da8fa4e3SBjoern A. Zeeb u8 first_hdr[64];
2066da8fa4e3SBjoern A. Zeeb u8 *qos;
2067da8fa4e3SBjoern A. Zeeb bool has_fcs_err;
2068da8fa4e3SBjoern A. Zeeb bool has_crypto_err;
2069da8fa4e3SBjoern A. Zeeb bool has_tkip_err;
2070da8fa4e3SBjoern A. Zeeb bool has_peer_idx_invalid;
2071da8fa4e3SBjoern A. Zeeb bool is_decrypted;
2072da8fa4e3SBjoern A. Zeeb bool is_mgmt;
2073da8fa4e3SBjoern A. Zeeb u32 attention;
2074da8fa4e3SBjoern A. Zeeb bool frag_pn_check = true, multicast_check = true;
2075da8fa4e3SBjoern A. Zeeb
2076da8fa4e3SBjoern A. Zeeb if (skb_queue_empty(amsdu))
2077da8fa4e3SBjoern A. Zeeb return;
2078da8fa4e3SBjoern A. Zeeb
2079da8fa4e3SBjoern A. Zeeb first = skb_peek(amsdu);
2080da8fa4e3SBjoern A. Zeeb rxd = HTT_RX_BUF_TO_RX_DESC(hw,
2081da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
2082da8fa4e3SBjoern A. Zeeb (void *)first->data - hw->rx_desc_ops->rx_desc_size);
2083da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
2084da8fa4e3SBjoern A. Zeeb (u8 *)first->data - hw->rx_desc_ops->rx_desc_size);
2085da8fa4e3SBjoern A. Zeeb #endif
2086da8fa4e3SBjoern A. Zeeb
2087da8fa4e3SBjoern A. Zeeb rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
2088da8fa4e3SBjoern A. Zeeb rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
2089da8fa4e3SBjoern A. Zeeb
2090da8fa4e3SBjoern A. Zeeb is_mgmt = !!(rxd_attention->flags &
2091da8fa4e3SBjoern A. Zeeb __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
2092da8fa4e3SBjoern A. Zeeb
2093da8fa4e3SBjoern A. Zeeb enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0),
2094da8fa4e3SBjoern A. Zeeb RX_MPDU_START_INFO0_ENCRYPT_TYPE);
2095da8fa4e3SBjoern A. Zeeb
2096da8fa4e3SBjoern A. Zeeb /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
2097da8fa4e3SBjoern A. Zeeb * decapped header. It'll be used for undecapping of each MSDU.
2098da8fa4e3SBjoern A. Zeeb */
2099da8fa4e3SBjoern A. Zeeb hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
2100da8fa4e3SBjoern A. Zeeb memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
2101da8fa4e3SBjoern A. Zeeb
2102da8fa4e3SBjoern A. Zeeb if (rx_hdr)
2103da8fa4e3SBjoern A. Zeeb memcpy(rx_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
2104da8fa4e3SBjoern A. Zeeb
2105da8fa4e3SBjoern A. Zeeb /* Each A-MSDU subframe will use the original header as the base and be
2106da8fa4e3SBjoern A. Zeeb * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
2107da8fa4e3SBjoern A. Zeeb */
2108da8fa4e3SBjoern A. Zeeb hdr = (void *)first_hdr;
2109da8fa4e3SBjoern A. Zeeb
2110da8fa4e3SBjoern A. Zeeb if (ieee80211_is_data_qos(hdr->frame_control)) {
2111da8fa4e3SBjoern A. Zeeb qos = ieee80211_get_qos_ctl(hdr);
2112da8fa4e3SBjoern A. Zeeb qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
2113da8fa4e3SBjoern A. Zeeb }
2114da8fa4e3SBjoern A. Zeeb
2115da8fa4e3SBjoern A. Zeeb /* Some attention flags are valid only in the last MSDU. */
2116da8fa4e3SBjoern A. Zeeb last = skb_peek_tail(amsdu);
2117da8fa4e3SBjoern A. Zeeb rxd = HTT_RX_BUF_TO_RX_DESC(hw,
2118da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
2119da8fa4e3SBjoern A. Zeeb (void *)last->data - hw->rx_desc_ops->rx_desc_size);
2120da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
2121da8fa4e3SBjoern A. Zeeb (u8 *)last->data - hw->rx_desc_ops->rx_desc_size);
2122da8fa4e3SBjoern A. Zeeb #endif
2123da8fa4e3SBjoern A. Zeeb
2124da8fa4e3SBjoern A. Zeeb rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
2125da8fa4e3SBjoern A. Zeeb attention = __le32_to_cpu(rxd_attention->flags);
2126da8fa4e3SBjoern A. Zeeb
2127da8fa4e3SBjoern A. Zeeb has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
2128da8fa4e3SBjoern A. Zeeb has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
2129da8fa4e3SBjoern A. Zeeb has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
2130da8fa4e3SBjoern A. Zeeb has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
2131da8fa4e3SBjoern A. Zeeb
2132da8fa4e3SBjoern A. Zeeb /* Note: If hardware captures an encrypted frame that it can't decrypt,
2133da8fa4e3SBjoern A. Zeeb * e.g. due to fcs error, missing peer or invalid key data it will
2134da8fa4e3SBjoern A. Zeeb * report the frame as raw.
2135da8fa4e3SBjoern A. Zeeb */
2136da8fa4e3SBjoern A. Zeeb is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
2137da8fa4e3SBjoern A. Zeeb !has_fcs_err &&
2138da8fa4e3SBjoern A. Zeeb !has_crypto_err &&
2139da8fa4e3SBjoern A. Zeeb !has_peer_idx_invalid);
2140da8fa4e3SBjoern A. Zeeb
2141da8fa4e3SBjoern A. Zeeb /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
2142da8fa4e3SBjoern A. Zeeb status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
2143da8fa4e3SBjoern A. Zeeb RX_FLAG_MMIC_ERROR |
2144da8fa4e3SBjoern A. Zeeb RX_FLAG_DECRYPTED |
2145da8fa4e3SBjoern A. Zeeb RX_FLAG_IV_STRIPPED |
2146da8fa4e3SBjoern A. Zeeb RX_FLAG_ONLY_MONITOR |
2147da8fa4e3SBjoern A. Zeeb RX_FLAG_MMIC_STRIPPED);
2148da8fa4e3SBjoern A. Zeeb
2149da8fa4e3SBjoern A. Zeeb if (has_fcs_err)
2150da8fa4e3SBjoern A. Zeeb status->flag |= RX_FLAG_FAILED_FCS_CRC;
2151da8fa4e3SBjoern A. Zeeb
2152da8fa4e3SBjoern A. Zeeb if (has_tkip_err)
2153da8fa4e3SBjoern A. Zeeb status->flag |= RX_FLAG_MMIC_ERROR;
2154da8fa4e3SBjoern A. Zeeb
2155da8fa4e3SBjoern A. Zeeb if (err) {
2156da8fa4e3SBjoern A. Zeeb if (has_fcs_err)
2157da8fa4e3SBjoern A. Zeeb *err = ATH10K_PKT_RX_ERR_FCS;
2158da8fa4e3SBjoern A. Zeeb else if (has_tkip_err)
2159da8fa4e3SBjoern A. Zeeb *err = ATH10K_PKT_RX_ERR_TKIP;
2160da8fa4e3SBjoern A. Zeeb else if (has_crypto_err)
2161da8fa4e3SBjoern A. Zeeb *err = ATH10K_PKT_RX_ERR_CRYPT;
2162da8fa4e3SBjoern A. Zeeb else if (has_peer_idx_invalid)
2163da8fa4e3SBjoern A. Zeeb *err = ATH10K_PKT_RX_ERR_PEER_IDX_INVAL;
2164da8fa4e3SBjoern A. Zeeb }
2165da8fa4e3SBjoern A. Zeeb
2166da8fa4e3SBjoern A. Zeeb /* Firmware reports all necessary management frames via WMI already.
2167da8fa4e3SBjoern A. Zeeb * They are not reported to monitor interfaces at all so pass the ones
2168da8fa4e3SBjoern A. Zeeb * coming via HTT to monitor interfaces instead. This simplifies
2169da8fa4e3SBjoern A. Zeeb * matters a lot.
2170da8fa4e3SBjoern A. Zeeb */
2171da8fa4e3SBjoern A. Zeeb if (is_mgmt)
2172da8fa4e3SBjoern A. Zeeb status->flag |= RX_FLAG_ONLY_MONITOR;
2173da8fa4e3SBjoern A. Zeeb
2174da8fa4e3SBjoern A. Zeeb if (is_decrypted) {
2175da8fa4e3SBjoern A. Zeeb status->flag |= RX_FLAG_DECRYPTED;
2176da8fa4e3SBjoern A. Zeeb
2177da8fa4e3SBjoern A. Zeeb if (likely(!is_mgmt))
2178da8fa4e3SBjoern A. Zeeb status->flag |= RX_FLAG_MMIC_STRIPPED;
2179da8fa4e3SBjoern A. Zeeb
2180da8fa4e3SBjoern A. Zeeb if (fill_crypt_header)
2181da8fa4e3SBjoern A. Zeeb status->flag |= RX_FLAG_MIC_STRIPPED |
2182da8fa4e3SBjoern A. Zeeb RX_FLAG_ICV_STRIPPED;
2183da8fa4e3SBjoern A. Zeeb else
2184da8fa4e3SBjoern A. Zeeb status->flag |= RX_FLAG_IV_STRIPPED;
2185da8fa4e3SBjoern A. Zeeb }
2186da8fa4e3SBjoern A. Zeeb
2187da8fa4e3SBjoern A. Zeeb skb_queue_walk(amsdu, msdu) {
2188da8fa4e3SBjoern A. Zeeb if (frag && !fill_crypt_header && is_decrypted &&
2189da8fa4e3SBjoern A. Zeeb enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
2190da8fa4e3SBjoern A. Zeeb frag_pn_check = ath10k_htt_rx_h_frag_pn_check(ar,
2191da8fa4e3SBjoern A. Zeeb msdu,
2192da8fa4e3SBjoern A. Zeeb peer_id,
2193da8fa4e3SBjoern A. Zeeb enctype);
2194da8fa4e3SBjoern A. Zeeb
2195da8fa4e3SBjoern A. Zeeb if (frag)
2196da8fa4e3SBjoern A. Zeeb multicast_check = ath10k_htt_rx_h_frag_multicast_check(ar,
2197*07724ba6SBjoern A. Zeeb msdu);
2198da8fa4e3SBjoern A. Zeeb
2199da8fa4e3SBjoern A. Zeeb if (!frag_pn_check || !multicast_check) {
2200da8fa4e3SBjoern A. Zeeb /* Discard the fragment with invalid PN or multicast DA
2201da8fa4e3SBjoern A. Zeeb */
2202da8fa4e3SBjoern A. Zeeb temp = msdu->prev;
2203da8fa4e3SBjoern A. Zeeb __skb_unlink(msdu, amsdu);
2204da8fa4e3SBjoern A. Zeeb dev_kfree_skb_any(msdu);
2205da8fa4e3SBjoern A. Zeeb msdu = temp;
2206da8fa4e3SBjoern A. Zeeb frag_pn_check = true;
2207da8fa4e3SBjoern A. Zeeb multicast_check = true;
2208da8fa4e3SBjoern A. Zeeb continue;
2209da8fa4e3SBjoern A. Zeeb }
2210da8fa4e3SBjoern A. Zeeb
2211da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_h_csum_offload(&ar->hw_params, msdu);
2212da8fa4e3SBjoern A. Zeeb
2213da8fa4e3SBjoern A. Zeeb if (frag && !fill_crypt_header &&
2214da8fa4e3SBjoern A. Zeeb enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
2215da8fa4e3SBjoern A. Zeeb status->flag &= ~RX_FLAG_MMIC_STRIPPED;
2216da8fa4e3SBjoern A. Zeeb
2217da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
2218da8fa4e3SBjoern A. Zeeb is_decrypted);
2219da8fa4e3SBjoern A. Zeeb
2220da8fa4e3SBjoern A. Zeeb /* Undecapping involves copying the original 802.11 header back
2221da8fa4e3SBjoern A. Zeeb * to sk_buff. If frame is protected and hardware has decrypted
2222da8fa4e3SBjoern A. Zeeb * it then remove the protected bit.
2223da8fa4e3SBjoern A. Zeeb */
2224da8fa4e3SBjoern A. Zeeb if (!is_decrypted)
2225da8fa4e3SBjoern A. Zeeb continue;
2226da8fa4e3SBjoern A. Zeeb if (is_mgmt)
2227da8fa4e3SBjoern A. Zeeb continue;
2228da8fa4e3SBjoern A. Zeeb
2229da8fa4e3SBjoern A. Zeeb if (fill_crypt_header)
2230da8fa4e3SBjoern A. Zeeb continue;
2231da8fa4e3SBjoern A. Zeeb
2232da8fa4e3SBjoern A. Zeeb hdr = (void *)msdu->data;
2233da8fa4e3SBjoern A. Zeeb hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2234da8fa4e3SBjoern A. Zeeb
2235da8fa4e3SBjoern A. Zeeb if (frag && !fill_crypt_header &&
2236da8fa4e3SBjoern A. Zeeb enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
2237da8fa4e3SBjoern A. Zeeb status->flag &= ~RX_FLAG_IV_STRIPPED &
2238da8fa4e3SBjoern A. Zeeb ~RX_FLAG_MMIC_STRIPPED;
2239da8fa4e3SBjoern A. Zeeb }
2240da8fa4e3SBjoern A. Zeeb }
2241da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_h_enqueue(struct ath10k * ar,struct sk_buff_head * amsdu,struct ieee80211_rx_status * status)2242da8fa4e3SBjoern A. Zeeb static void ath10k_htt_rx_h_enqueue(struct ath10k *ar,
2243da8fa4e3SBjoern A. Zeeb struct sk_buff_head *amsdu,
2244da8fa4e3SBjoern A. Zeeb struct ieee80211_rx_status *status)
2245da8fa4e3SBjoern A. Zeeb {
2246da8fa4e3SBjoern A. Zeeb struct sk_buff *msdu;
2247da8fa4e3SBjoern A. Zeeb struct sk_buff *first_subframe;
2248da8fa4e3SBjoern A. Zeeb
2249da8fa4e3SBjoern A. Zeeb first_subframe = skb_peek(amsdu);
2250da8fa4e3SBjoern A. Zeeb
2251da8fa4e3SBjoern A. Zeeb while ((msdu = __skb_dequeue(amsdu))) {
2252da8fa4e3SBjoern A. Zeeb /* Setup per-MSDU flags */
2253da8fa4e3SBjoern A. Zeeb if (skb_queue_empty(amsdu))
2254da8fa4e3SBjoern A. Zeeb status->flag &= ~RX_FLAG_AMSDU_MORE;
2255da8fa4e3SBjoern A. Zeeb else
2256da8fa4e3SBjoern A. Zeeb status->flag |= RX_FLAG_AMSDU_MORE;
2257da8fa4e3SBjoern A. Zeeb
2258da8fa4e3SBjoern A. Zeeb if (msdu == first_subframe) {
2259da8fa4e3SBjoern A. Zeeb first_subframe = NULL;
2260da8fa4e3SBjoern A. Zeeb status->flag &= ~RX_FLAG_ALLOW_SAME_PN;
2261da8fa4e3SBjoern A. Zeeb } else {
2262da8fa4e3SBjoern A. Zeeb status->flag |= RX_FLAG_ALLOW_SAME_PN;
2263da8fa4e3SBjoern A. Zeeb }
2264da8fa4e3SBjoern A. Zeeb
2265da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
2266da8fa4e3SBjoern A. Zeeb }
2267da8fa4e3SBjoern A. Zeeb }
2268da8fa4e3SBjoern A. Zeeb
ath10k_unchain_msdu(struct sk_buff_head * amsdu,unsigned long * unchain_cnt)2269da8fa4e3SBjoern A. Zeeb static int ath10k_unchain_msdu(struct sk_buff_head *amsdu,
2270da8fa4e3SBjoern A. Zeeb unsigned long *unchain_cnt)
2271da8fa4e3SBjoern A. Zeeb {
2272da8fa4e3SBjoern A. Zeeb struct sk_buff *skb, *first;
2273da8fa4e3SBjoern A. Zeeb int space;
2274da8fa4e3SBjoern A. Zeeb int total_len = 0;
2275da8fa4e3SBjoern A. Zeeb int amsdu_len = skb_queue_len(amsdu);
2276da8fa4e3SBjoern A. Zeeb
2277da8fa4e3SBjoern A. Zeeb /* TODO: Might could optimize this by using
2278da8fa4e3SBjoern A. Zeeb * skb_try_coalesce or similar method to
2279da8fa4e3SBjoern A. Zeeb * decrease copying, or maybe get mac80211 to
2280da8fa4e3SBjoern A. Zeeb * provide a way to just receive a list of
2281da8fa4e3SBjoern A. Zeeb * skb?
2282da8fa4e3SBjoern A. Zeeb */
2283da8fa4e3SBjoern A. Zeeb
2284da8fa4e3SBjoern A. Zeeb first = __skb_dequeue(amsdu);
2285da8fa4e3SBjoern A. Zeeb
2286da8fa4e3SBjoern A. Zeeb /* Allocate total length all at once. */
2287da8fa4e3SBjoern A. Zeeb skb_queue_walk(amsdu, skb)
2288da8fa4e3SBjoern A. Zeeb total_len += skb->len;
2289da8fa4e3SBjoern A. Zeeb
2290da8fa4e3SBjoern A. Zeeb space = total_len - skb_tailroom(first);
2291da8fa4e3SBjoern A. Zeeb if ((space > 0) &&
2292da8fa4e3SBjoern A. Zeeb (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
2293da8fa4e3SBjoern A. Zeeb /* TODO: bump some rx-oom error stat */
2294da8fa4e3SBjoern A. Zeeb /* put it back together so we can free the
2295da8fa4e3SBjoern A. Zeeb * whole list at once.
2296da8fa4e3SBjoern A. Zeeb */
2297da8fa4e3SBjoern A. Zeeb __skb_queue_head(amsdu, first);
2298da8fa4e3SBjoern A. Zeeb return -1;
2299da8fa4e3SBjoern A. Zeeb }
2300da8fa4e3SBjoern A. Zeeb
2301da8fa4e3SBjoern A. Zeeb /* Walk list again, copying contents into
2302da8fa4e3SBjoern A. Zeeb * msdu_head
2303da8fa4e3SBjoern A. Zeeb */
2304da8fa4e3SBjoern A. Zeeb while ((skb = __skb_dequeue(amsdu))) {
2305da8fa4e3SBjoern A. Zeeb skb_copy_from_linear_data(skb, skb_put(first, skb->len),
2306da8fa4e3SBjoern A. Zeeb skb->len);
2307da8fa4e3SBjoern A. Zeeb dev_kfree_skb_any(skb);
2308da8fa4e3SBjoern A. Zeeb }
2309da8fa4e3SBjoern A. Zeeb
2310da8fa4e3SBjoern A. Zeeb __skb_queue_head(amsdu, first);
2311da8fa4e3SBjoern A. Zeeb
2312da8fa4e3SBjoern A. Zeeb *unchain_cnt += amsdu_len - 1;
2313da8fa4e3SBjoern A. Zeeb
2314da8fa4e3SBjoern A. Zeeb return 0;
2315da8fa4e3SBjoern A. Zeeb }
2316da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_h_unchain(struct ath10k * ar,struct sk_buff_head * amsdu,unsigned long * drop_cnt,unsigned long * unchain_cnt)2317da8fa4e3SBjoern A. Zeeb static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
2318da8fa4e3SBjoern A. Zeeb struct sk_buff_head *amsdu,
2319da8fa4e3SBjoern A. Zeeb unsigned long *drop_cnt,
2320da8fa4e3SBjoern A. Zeeb unsigned long *unchain_cnt)
2321da8fa4e3SBjoern A. Zeeb {
2322da8fa4e3SBjoern A. Zeeb struct sk_buff *first;
2323da8fa4e3SBjoern A. Zeeb struct ath10k_hw_params *hw = &ar->hw_params;
2324da8fa4e3SBjoern A. Zeeb struct htt_rx_desc *rxd;
2325da8fa4e3SBjoern A. Zeeb struct rx_msdu_start_common *rxd_msdu_start_common;
2326da8fa4e3SBjoern A. Zeeb struct rx_frag_info_common *rxd_frag_info;
2327da8fa4e3SBjoern A. Zeeb enum rx_msdu_decap_format decap;
2328da8fa4e3SBjoern A. Zeeb
2329da8fa4e3SBjoern A. Zeeb first = skb_peek(amsdu);
2330da8fa4e3SBjoern A. Zeeb rxd = HTT_RX_BUF_TO_RX_DESC(hw,
2331da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
2332da8fa4e3SBjoern A. Zeeb (void *)first->data - hw->rx_desc_ops->rx_desc_size);
2333da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
2334da8fa4e3SBjoern A. Zeeb (u8 *)first->data - hw->rx_desc_ops->rx_desc_size);
2335da8fa4e3SBjoern A. Zeeb #endif
2336da8fa4e3SBjoern A. Zeeb
2337da8fa4e3SBjoern A. Zeeb rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
2338da8fa4e3SBjoern A. Zeeb rxd_frag_info = ath10k_htt_rx_desc_get_frag_info(hw, rxd);
2339da8fa4e3SBjoern A. Zeeb decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1),
2340da8fa4e3SBjoern A. Zeeb RX_MSDU_START_INFO1_DECAP_FORMAT);
2341da8fa4e3SBjoern A. Zeeb
2342da8fa4e3SBjoern A. Zeeb /* FIXME: Current unchaining logic can only handle simple case of raw
2343da8fa4e3SBjoern A. Zeeb * msdu chaining. If decapping is other than raw the chaining may be
2344da8fa4e3SBjoern A. Zeeb * more complex and this isn't handled by the current code. Don't even
2345da8fa4e3SBjoern A. Zeeb * try re-constructing such frames - it'll be pretty much garbage.
2346da8fa4e3SBjoern A. Zeeb */
2347da8fa4e3SBjoern A. Zeeb if (decap != RX_MSDU_DECAP_RAW ||
2348da8fa4e3SBjoern A. Zeeb skb_queue_len(amsdu) != 1 + rxd_frag_info->ring2_more_count) {
2349da8fa4e3SBjoern A. Zeeb *drop_cnt += skb_queue_len(amsdu);
2350da8fa4e3SBjoern A. Zeeb __skb_queue_purge(amsdu);
2351da8fa4e3SBjoern A. Zeeb return;
2352da8fa4e3SBjoern A. Zeeb }
2353da8fa4e3SBjoern A. Zeeb
2354da8fa4e3SBjoern A. Zeeb ath10k_unchain_msdu(amsdu, unchain_cnt);
2355da8fa4e3SBjoern A. Zeeb }
2356da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_validate_amsdu(struct ath10k * ar,struct sk_buff_head * amsdu)2357da8fa4e3SBjoern A. Zeeb static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar,
2358da8fa4e3SBjoern A. Zeeb struct sk_buff_head *amsdu)
2359da8fa4e3SBjoern A. Zeeb {
2360da8fa4e3SBjoern A. Zeeb u8 *subframe_hdr;
2361da8fa4e3SBjoern A. Zeeb struct sk_buff *first;
2362da8fa4e3SBjoern A. Zeeb bool is_first, is_last;
2363da8fa4e3SBjoern A. Zeeb struct ath10k_hw_params *hw = &ar->hw_params;
2364da8fa4e3SBjoern A. Zeeb struct htt_rx_desc *rxd;
2365da8fa4e3SBjoern A. Zeeb struct rx_msdu_end_common *rxd_msdu_end_common;
2366da8fa4e3SBjoern A. Zeeb struct rx_mpdu_start *rxd_mpdu_start;
2367da8fa4e3SBjoern A. Zeeb struct ieee80211_hdr *hdr;
2368da8fa4e3SBjoern A. Zeeb size_t hdr_len, crypto_len;
2369da8fa4e3SBjoern A. Zeeb enum htt_rx_mpdu_encrypt_type enctype;
2370da8fa4e3SBjoern A. Zeeb int bytes_aligned = ar->hw_params.decap_align_bytes;
2371da8fa4e3SBjoern A. Zeeb
2372da8fa4e3SBjoern A. Zeeb first = skb_peek(amsdu);
2373da8fa4e3SBjoern A. Zeeb
2374da8fa4e3SBjoern A. Zeeb rxd = HTT_RX_BUF_TO_RX_DESC(hw,
2375da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
2376da8fa4e3SBjoern A. Zeeb (void *)first->data - hw->rx_desc_ops->rx_desc_size);
2377da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
2378da8fa4e3SBjoern A. Zeeb (u8 *)first->data - hw->rx_desc_ops->rx_desc_size);
2379da8fa4e3SBjoern A. Zeeb #endif
2380da8fa4e3SBjoern A. Zeeb
2381da8fa4e3SBjoern A. Zeeb rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
2382da8fa4e3SBjoern A. Zeeb rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
2383da8fa4e3SBjoern A. Zeeb hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
2384da8fa4e3SBjoern A. Zeeb
2385da8fa4e3SBjoern A. Zeeb is_first = !!(rxd_msdu_end_common->info0 &
2386da8fa4e3SBjoern A. Zeeb __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
2387da8fa4e3SBjoern A. Zeeb is_last = !!(rxd_msdu_end_common->info0 &
2388da8fa4e3SBjoern A. Zeeb __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
2389da8fa4e3SBjoern A. Zeeb
2390da8fa4e3SBjoern A. Zeeb /* Return in case of non-aggregated msdu */
2391da8fa4e3SBjoern A. Zeeb if (is_first && is_last)
2392da8fa4e3SBjoern A. Zeeb return true;
2393da8fa4e3SBjoern A. Zeeb
2394da8fa4e3SBjoern A. Zeeb /* First msdu flag is not set for the first msdu of the list */
2395da8fa4e3SBjoern A. Zeeb if (!is_first)
2396da8fa4e3SBjoern A. Zeeb return false;
2397da8fa4e3SBjoern A. Zeeb
2398da8fa4e3SBjoern A. Zeeb enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0),
2399da8fa4e3SBjoern A. Zeeb RX_MPDU_START_INFO0_ENCRYPT_TYPE);
2400da8fa4e3SBjoern A. Zeeb
2401da8fa4e3SBjoern A. Zeeb hdr_len = ieee80211_hdrlen(hdr->frame_control);
2402da8fa4e3SBjoern A. Zeeb crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
2403da8fa4e3SBjoern A. Zeeb
2404da8fa4e3SBjoern A. Zeeb subframe_hdr = (u8 *)hdr + round_up(hdr_len, bytes_aligned) +
2405da8fa4e3SBjoern A. Zeeb crypto_len;
2406da8fa4e3SBjoern A. Zeeb
2407da8fa4e3SBjoern A. Zeeb /* Validate if the amsdu has a proper first subframe.
2408da8fa4e3SBjoern A. Zeeb * There are chances a single msdu can be received as amsdu when
2409da8fa4e3SBjoern A. Zeeb * the unauthenticated amsdu flag of a QoS header
2410da8fa4e3SBjoern A. Zeeb * gets flipped in non-SPP AMSDU's, in such cases the first
2411da8fa4e3SBjoern A. Zeeb * subframe has llc/snap header in place of a valid da.
2412da8fa4e3SBjoern A. Zeeb * return false if the da matches rfc1042 pattern
2413da8fa4e3SBjoern A. Zeeb */
2414da8fa4e3SBjoern A. Zeeb if (ether_addr_equal(subframe_hdr, rfc1042_header))
2415da8fa4e3SBjoern A. Zeeb return false;
2416da8fa4e3SBjoern A. Zeeb
2417da8fa4e3SBjoern A. Zeeb return true;
2418da8fa4e3SBjoern A. Zeeb }
2419da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_amsdu_allowed(struct ath10k * ar,struct sk_buff_head * amsdu,struct ieee80211_rx_status * rx_status)2420da8fa4e3SBjoern A. Zeeb static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
2421da8fa4e3SBjoern A. Zeeb struct sk_buff_head *amsdu,
2422da8fa4e3SBjoern A. Zeeb struct ieee80211_rx_status *rx_status)
2423da8fa4e3SBjoern A. Zeeb {
2424da8fa4e3SBjoern A. Zeeb if (!rx_status->freq) {
2425da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n");
2426da8fa4e3SBjoern A. Zeeb return false;
2427da8fa4e3SBjoern A. Zeeb }
2428da8fa4e3SBjoern A. Zeeb
2429da8fa4e3SBjoern A. Zeeb if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
2430da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
2431da8fa4e3SBjoern A. Zeeb return false;
2432da8fa4e3SBjoern A. Zeeb }
2433da8fa4e3SBjoern A. Zeeb
2434da8fa4e3SBjoern A. Zeeb if (!ath10k_htt_rx_validate_amsdu(ar, amsdu)) {
2435da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid amsdu received\n");
2436da8fa4e3SBjoern A. Zeeb return false;
2437da8fa4e3SBjoern A. Zeeb }
2438da8fa4e3SBjoern A. Zeeb
2439da8fa4e3SBjoern A. Zeeb return true;
2440da8fa4e3SBjoern A. Zeeb }
2441da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_h_filter(struct ath10k * ar,struct sk_buff_head * amsdu,struct ieee80211_rx_status * rx_status,unsigned long * drop_cnt)2442da8fa4e3SBjoern A. Zeeb static void ath10k_htt_rx_h_filter(struct ath10k *ar,
2443da8fa4e3SBjoern A. Zeeb struct sk_buff_head *amsdu,
2444da8fa4e3SBjoern A. Zeeb struct ieee80211_rx_status *rx_status,
2445da8fa4e3SBjoern A. Zeeb unsigned long *drop_cnt)
2446da8fa4e3SBjoern A. Zeeb {
2447da8fa4e3SBjoern A. Zeeb if (skb_queue_empty(amsdu))
2448da8fa4e3SBjoern A. Zeeb return;
2449da8fa4e3SBjoern A. Zeeb
2450da8fa4e3SBjoern A. Zeeb if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
2451da8fa4e3SBjoern A. Zeeb return;
2452da8fa4e3SBjoern A. Zeeb
2453da8fa4e3SBjoern A. Zeeb if (drop_cnt)
2454da8fa4e3SBjoern A. Zeeb *drop_cnt += skb_queue_len(amsdu);
2455da8fa4e3SBjoern A. Zeeb
2456da8fa4e3SBjoern A. Zeeb __skb_queue_purge(amsdu);
2457da8fa4e3SBjoern A. Zeeb }
2458da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_handle_amsdu(struct ath10k_htt * htt)2459da8fa4e3SBjoern A. Zeeb static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
2460da8fa4e3SBjoern A. Zeeb {
2461da8fa4e3SBjoern A. Zeeb struct ath10k *ar = htt->ar;
2462da8fa4e3SBjoern A. Zeeb struct ieee80211_rx_status *rx_status = &htt->rx_status;
2463da8fa4e3SBjoern A. Zeeb struct sk_buff_head amsdu;
2464da8fa4e3SBjoern A. Zeeb int ret;
2465da8fa4e3SBjoern A. Zeeb unsigned long drop_cnt = 0;
2466da8fa4e3SBjoern A. Zeeb unsigned long unchain_cnt = 0;
2467da8fa4e3SBjoern A. Zeeb unsigned long drop_cnt_filter = 0;
2468da8fa4e3SBjoern A. Zeeb unsigned long msdus_to_queue, num_msdus;
2469da8fa4e3SBjoern A. Zeeb enum ath10k_pkt_rx_err err = ATH10K_PKT_RX_ERR_MAX;
2470da8fa4e3SBjoern A. Zeeb u8 first_hdr[RX_HTT_HDR_STATUS_LEN];
2471da8fa4e3SBjoern A. Zeeb
2472da8fa4e3SBjoern A. Zeeb __skb_queue_head_init(&amsdu);
2473da8fa4e3SBjoern A. Zeeb
2474da8fa4e3SBjoern A. Zeeb spin_lock_bh(&htt->rx_ring.lock);
2475da8fa4e3SBjoern A. Zeeb if (htt->rx_confused) {
2476da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&htt->rx_ring.lock);
2477da8fa4e3SBjoern A. Zeeb return -EIO;
2478da8fa4e3SBjoern A. Zeeb }
2479da8fa4e3SBjoern A. Zeeb ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
2480da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&htt->rx_ring.lock);
2481da8fa4e3SBjoern A. Zeeb
2482da8fa4e3SBjoern A. Zeeb if (ret < 0) {
2483da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
2484da8fa4e3SBjoern A. Zeeb __skb_queue_purge(&amsdu);
2485da8fa4e3SBjoern A. Zeeb /* FIXME: It's probably a good idea to reboot the
2486da8fa4e3SBjoern A. Zeeb * device instead of leaving it inoperable.
2487da8fa4e3SBjoern A. Zeeb */
2488da8fa4e3SBjoern A. Zeeb htt->rx_confused = true;
2489da8fa4e3SBjoern A. Zeeb return ret;
2490da8fa4e3SBjoern A. Zeeb }
2491da8fa4e3SBjoern A. Zeeb
2492da8fa4e3SBjoern A. Zeeb num_msdus = skb_queue_len(&amsdu);
2493da8fa4e3SBjoern A. Zeeb
2494da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
2495da8fa4e3SBjoern A. Zeeb
2496da8fa4e3SBjoern A. Zeeb /* only for ret = 1 indicates chained msdus */
2497da8fa4e3SBjoern A. Zeeb if (ret > 0)
2498da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt);
2499da8fa4e3SBjoern A. Zeeb
2500da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter);
2501da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err, 0,
2502da8fa4e3SBjoern A. Zeeb false);
2503da8fa4e3SBjoern A. Zeeb msdus_to_queue = skb_queue_len(&amsdu);
2504da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);
2505da8fa4e3SBjoern A. Zeeb
2506da8fa4e3SBjoern A. Zeeb ath10k_sta_update_rx_tid_stats(ar, first_hdr, num_msdus, err,
2507da8fa4e3SBjoern A. Zeeb unchain_cnt, drop_cnt, drop_cnt_filter,
2508da8fa4e3SBjoern A. Zeeb msdus_to_queue);
2509da8fa4e3SBjoern A. Zeeb
2510da8fa4e3SBjoern A. Zeeb return 0;
2511da8fa4e3SBjoern A. Zeeb }
2512da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_mpdu_desc_pn_hl(struct htt_hl_rx_desc * rx_desc,union htt_rx_pn_t * pn,int pn_len_bits)2513da8fa4e3SBjoern A. Zeeb static void ath10k_htt_rx_mpdu_desc_pn_hl(struct htt_hl_rx_desc *rx_desc,
2514da8fa4e3SBjoern A. Zeeb union htt_rx_pn_t *pn,
2515da8fa4e3SBjoern A. Zeeb int pn_len_bits)
2516da8fa4e3SBjoern A. Zeeb {
2517da8fa4e3SBjoern A. Zeeb switch (pn_len_bits) {
2518da8fa4e3SBjoern A. Zeeb case 48:
2519da8fa4e3SBjoern A. Zeeb pn->pn48 = __le32_to_cpu(rx_desc->pn_31_0) +
2520da8fa4e3SBjoern A. Zeeb ((u64)(__le32_to_cpu(rx_desc->u0.pn_63_32) & 0xFFFF) << 32);
2521da8fa4e3SBjoern A. Zeeb break;
2522da8fa4e3SBjoern A. Zeeb case 24:
2523da8fa4e3SBjoern A. Zeeb pn->pn24 = __le32_to_cpu(rx_desc->pn_31_0);
2524da8fa4e3SBjoern A. Zeeb break;
2525da8fa4e3SBjoern A. Zeeb }
2526da8fa4e3SBjoern A. Zeeb }
2527da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_pn_cmp48(union htt_rx_pn_t * new_pn,union htt_rx_pn_t * old_pn)2528da8fa4e3SBjoern A. Zeeb static bool ath10k_htt_rx_pn_cmp48(union htt_rx_pn_t *new_pn,
2529da8fa4e3SBjoern A. Zeeb union htt_rx_pn_t *old_pn)
2530da8fa4e3SBjoern A. Zeeb {
2531da8fa4e3SBjoern A. Zeeb return ((new_pn->pn48 & 0xffffffffffffULL) <=
2532da8fa4e3SBjoern A. Zeeb (old_pn->pn48 & 0xffffffffffffULL));
2533da8fa4e3SBjoern A. Zeeb }
2534da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_pn_check_replay_hl(struct ath10k * ar,struct ath10k_peer * peer,struct htt_rx_indication_hl * rx)2535da8fa4e3SBjoern A. Zeeb static bool ath10k_htt_rx_pn_check_replay_hl(struct ath10k *ar,
2536da8fa4e3SBjoern A. Zeeb struct ath10k_peer *peer,
2537da8fa4e3SBjoern A. Zeeb struct htt_rx_indication_hl *rx)
2538da8fa4e3SBjoern A. Zeeb {
2539da8fa4e3SBjoern A. Zeeb bool last_pn_valid, pn_invalid = false;
2540da8fa4e3SBjoern A. Zeeb enum htt_txrx_sec_cast_type sec_index;
2541da8fa4e3SBjoern A. Zeeb enum htt_security_types sec_type;
2542da8fa4e3SBjoern A. Zeeb union htt_rx_pn_t new_pn = {0};
2543da8fa4e3SBjoern A. Zeeb struct htt_hl_rx_desc *rx_desc;
2544da8fa4e3SBjoern A. Zeeb union htt_rx_pn_t *last_pn;
2545da8fa4e3SBjoern A. Zeeb u32 rx_desc_info, tid;
2546da8fa4e3SBjoern A. Zeeb int num_mpdu_ranges;
2547da8fa4e3SBjoern A. Zeeb
2548da8fa4e3SBjoern A. Zeeb lockdep_assert_held(&ar->data_lock);
2549da8fa4e3SBjoern A. Zeeb
2550da8fa4e3SBjoern A. Zeeb if (!peer)
2551da8fa4e3SBjoern A. Zeeb return false;
2552da8fa4e3SBjoern A. Zeeb
2553da8fa4e3SBjoern A. Zeeb if (!(rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU))
2554da8fa4e3SBjoern A. Zeeb return false;
2555da8fa4e3SBjoern A. Zeeb
2556da8fa4e3SBjoern A. Zeeb num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
2557da8fa4e3SBjoern A. Zeeb HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
2558da8fa4e3SBjoern A. Zeeb
2559da8fa4e3SBjoern A. Zeeb rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges];
2560da8fa4e3SBjoern A. Zeeb rx_desc_info = __le32_to_cpu(rx_desc->info);
2561da8fa4e3SBjoern A. Zeeb
2562da8fa4e3SBjoern A. Zeeb if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED))
2563da8fa4e3SBjoern A. Zeeb return false;
2564da8fa4e3SBjoern A. Zeeb
2565da8fa4e3SBjoern A. Zeeb tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
2566da8fa4e3SBjoern A. Zeeb last_pn_valid = peer->tids_last_pn_valid[tid];
2567da8fa4e3SBjoern A. Zeeb last_pn = &peer->tids_last_pn[tid];
2568da8fa4e3SBjoern A. Zeeb
2569da8fa4e3SBjoern A. Zeeb if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST))
2570da8fa4e3SBjoern A. Zeeb sec_index = HTT_TXRX_SEC_MCAST;
2571da8fa4e3SBjoern A. Zeeb else
2572da8fa4e3SBjoern A. Zeeb sec_index = HTT_TXRX_SEC_UCAST;
2573da8fa4e3SBjoern A. Zeeb
2574da8fa4e3SBjoern A. Zeeb sec_type = peer->rx_pn[sec_index].sec_type;
2575da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
2576da8fa4e3SBjoern A. Zeeb
2577da8fa4e3SBjoern A. Zeeb if (sec_type != HTT_SECURITY_AES_CCMP &&
2578da8fa4e3SBjoern A. Zeeb sec_type != HTT_SECURITY_TKIP &&
2579da8fa4e3SBjoern A. Zeeb sec_type != HTT_SECURITY_TKIP_NOMIC)
2580da8fa4e3SBjoern A. Zeeb return false;
2581da8fa4e3SBjoern A. Zeeb
2582da8fa4e3SBjoern A. Zeeb if (last_pn_valid)
2583da8fa4e3SBjoern A. Zeeb pn_invalid = ath10k_htt_rx_pn_cmp48(&new_pn, last_pn);
2584da8fa4e3SBjoern A. Zeeb else
2585da8fa4e3SBjoern A. Zeeb peer->tids_last_pn_valid[tid] = true;
2586da8fa4e3SBjoern A. Zeeb
2587da8fa4e3SBjoern A. Zeeb if (!pn_invalid)
2588da8fa4e3SBjoern A. Zeeb last_pn->pn48 = new_pn.pn48;
2589da8fa4e3SBjoern A. Zeeb
2590da8fa4e3SBjoern A. Zeeb return pn_invalid;
2591da8fa4e3SBjoern A. Zeeb }
2592da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt * htt,struct htt_rx_indication_hl * rx,struct sk_buff * skb,enum htt_rx_pn_check_type check_pn_type,enum htt_rx_tkip_demic_type tkip_mic_type)2593da8fa4e3SBjoern A. Zeeb static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
2594da8fa4e3SBjoern A. Zeeb struct htt_rx_indication_hl *rx,
2595da8fa4e3SBjoern A. Zeeb struct sk_buff *skb,
2596da8fa4e3SBjoern A. Zeeb enum htt_rx_pn_check_type check_pn_type,
2597da8fa4e3SBjoern A. Zeeb enum htt_rx_tkip_demic_type tkip_mic_type)
2598da8fa4e3SBjoern A. Zeeb {
2599da8fa4e3SBjoern A. Zeeb struct ath10k *ar = htt->ar;
2600da8fa4e3SBjoern A. Zeeb struct ath10k_peer *peer;
2601da8fa4e3SBjoern A. Zeeb struct htt_rx_indication_mpdu_range *mpdu_ranges;
2602da8fa4e3SBjoern A. Zeeb struct fw_rx_desc_hl *fw_desc;
2603da8fa4e3SBjoern A. Zeeb enum htt_txrx_sec_cast_type sec_index;
2604da8fa4e3SBjoern A. Zeeb enum htt_security_types sec_type;
2605da8fa4e3SBjoern A. Zeeb union htt_rx_pn_t new_pn = {0};
2606da8fa4e3SBjoern A. Zeeb struct htt_hl_rx_desc *rx_desc;
2607da8fa4e3SBjoern A. Zeeb struct ieee80211_hdr *hdr;
2608da8fa4e3SBjoern A. Zeeb struct ieee80211_rx_status *rx_status;
2609da8fa4e3SBjoern A. Zeeb u16 peer_id;
2610da8fa4e3SBjoern A. Zeeb u8 rx_desc_len;
2611da8fa4e3SBjoern A. Zeeb int num_mpdu_ranges;
2612da8fa4e3SBjoern A. Zeeb size_t tot_hdr_len;
2613da8fa4e3SBjoern A. Zeeb struct ieee80211_channel *ch;
2614da8fa4e3SBjoern A. Zeeb bool pn_invalid, qos, first_msdu;
2615da8fa4e3SBjoern A. Zeeb u32 tid, rx_desc_info;
2616da8fa4e3SBjoern A. Zeeb
2617da8fa4e3SBjoern A. Zeeb peer_id = __le16_to_cpu(rx->hdr.peer_id);
2618da8fa4e3SBjoern A. Zeeb tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
2619da8fa4e3SBjoern A. Zeeb
2620da8fa4e3SBjoern A. Zeeb spin_lock_bh(&ar->data_lock);
2621da8fa4e3SBjoern A. Zeeb peer = ath10k_peer_find_by_id(ar, peer_id);
2622da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ar->data_lock);
2623da8fa4e3SBjoern A. Zeeb if (!peer && peer_id != HTT_INVALID_PEERID)
2624da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "Got RX ind from invalid peer: %u\n", peer_id);
2625da8fa4e3SBjoern A. Zeeb
2626da8fa4e3SBjoern A. Zeeb if (!peer)
2627da8fa4e3SBjoern A. Zeeb return true;
2628da8fa4e3SBjoern A. Zeeb
2629da8fa4e3SBjoern A. Zeeb num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
2630da8fa4e3SBjoern A. Zeeb HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
2631da8fa4e3SBjoern A. Zeeb mpdu_ranges = htt_rx_ind_get_mpdu_ranges_hl(rx);
2632da8fa4e3SBjoern A. Zeeb fw_desc = &rx->fw_desc;
2633da8fa4e3SBjoern A. Zeeb rx_desc_len = fw_desc->len;
2634da8fa4e3SBjoern A. Zeeb
2635da8fa4e3SBjoern A. Zeeb if (fw_desc->u.bits.discard) {
2636da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_HTT, "htt discard mpdu\n");
2637da8fa4e3SBjoern A. Zeeb goto err;
2638da8fa4e3SBjoern A. Zeeb }
2639da8fa4e3SBjoern A. Zeeb
2640da8fa4e3SBjoern A. Zeeb /* I have not yet seen any case where num_mpdu_ranges > 1.
2641da8fa4e3SBjoern A. Zeeb * qcacld does not seem handle that case either, so we introduce the
2642*07724ba6SBjoern A. Zeeb * same limitation here as well.
2643da8fa4e3SBjoern A. Zeeb */
2644da8fa4e3SBjoern A. Zeeb if (num_mpdu_ranges > 1)
2645da8fa4e3SBjoern A. Zeeb ath10k_warn(ar,
2646da8fa4e3SBjoern A. Zeeb "Unsupported number of MPDU ranges: %d, ignoring all but the first\n",
2647da8fa4e3SBjoern A. Zeeb num_mpdu_ranges);
2648da8fa4e3SBjoern A. Zeeb
2649da8fa4e3SBjoern A. Zeeb if (mpdu_ranges->mpdu_range_status !=
2650da8fa4e3SBjoern A. Zeeb HTT_RX_IND_MPDU_STATUS_OK &&
2651da8fa4e3SBjoern A. Zeeb mpdu_ranges->mpdu_range_status !=
2652da8fa4e3SBjoern A. Zeeb HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR) {
2653da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_HTT, "htt mpdu_range_status %d\n",
2654da8fa4e3SBjoern A. Zeeb mpdu_ranges->mpdu_range_status);
2655da8fa4e3SBjoern A. Zeeb goto err;
2656da8fa4e3SBjoern A. Zeeb }
2657da8fa4e3SBjoern A. Zeeb
2658da8fa4e3SBjoern A. Zeeb rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges];
2659da8fa4e3SBjoern A. Zeeb rx_desc_info = __le32_to_cpu(rx_desc->info);
2660da8fa4e3SBjoern A. Zeeb
2661da8fa4e3SBjoern A. Zeeb if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST))
2662da8fa4e3SBjoern A. Zeeb sec_index = HTT_TXRX_SEC_MCAST;
2663da8fa4e3SBjoern A. Zeeb else
2664da8fa4e3SBjoern A. Zeeb sec_index = HTT_TXRX_SEC_UCAST;
2665da8fa4e3SBjoern A. Zeeb
2666da8fa4e3SBjoern A. Zeeb sec_type = peer->rx_pn[sec_index].sec_type;
2667da8fa4e3SBjoern A. Zeeb first_msdu = rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU;
2668da8fa4e3SBjoern A. Zeeb
2669da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
2670da8fa4e3SBjoern A. Zeeb
2671da8fa4e3SBjoern A. Zeeb if (check_pn_type == HTT_RX_PN_CHECK && tid >= IEEE80211_NUM_TIDS) {
2672da8fa4e3SBjoern A. Zeeb spin_lock_bh(&ar->data_lock);
2673da8fa4e3SBjoern A. Zeeb pn_invalid = ath10k_htt_rx_pn_check_replay_hl(ar, peer, rx);
2674da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ar->data_lock);
2675da8fa4e3SBjoern A. Zeeb
2676da8fa4e3SBjoern A. Zeeb if (pn_invalid)
2677da8fa4e3SBjoern A. Zeeb goto err;
2678da8fa4e3SBjoern A. Zeeb }
2679da8fa4e3SBjoern A. Zeeb
2680da8fa4e3SBjoern A. Zeeb /* Strip off all headers before the MAC header before delivery to
2681da8fa4e3SBjoern A. Zeeb * mac80211
2682da8fa4e3SBjoern A. Zeeb */
2683da8fa4e3SBjoern A. Zeeb tot_hdr_len = sizeof(struct htt_resp_hdr) + sizeof(rx->hdr) +
2684da8fa4e3SBjoern A. Zeeb sizeof(rx->ppdu) + sizeof(rx->prefix) +
2685da8fa4e3SBjoern A. Zeeb sizeof(rx->fw_desc) +
2686da8fa4e3SBjoern A. Zeeb sizeof(*mpdu_ranges) * num_mpdu_ranges + rx_desc_len;
2687da8fa4e3SBjoern A. Zeeb
2688da8fa4e3SBjoern A. Zeeb skb_pull(skb, tot_hdr_len);
2689da8fa4e3SBjoern A. Zeeb
2690da8fa4e3SBjoern A. Zeeb hdr = (struct ieee80211_hdr *)skb->data;
2691da8fa4e3SBjoern A. Zeeb qos = ieee80211_is_data_qos(hdr->frame_control);
2692da8fa4e3SBjoern A. Zeeb
2693da8fa4e3SBjoern A. Zeeb rx_status = IEEE80211_SKB_RXCB(skb);
2694da8fa4e3SBjoern A. Zeeb memset(rx_status, 0, sizeof(*rx_status));
2695da8fa4e3SBjoern A. Zeeb
2696da8fa4e3SBjoern A. Zeeb if (rx->ppdu.combined_rssi == 0) {
2697da8fa4e3SBjoern A. Zeeb /* SDIO firmware does not provide signal */
2698da8fa4e3SBjoern A. Zeeb rx_status->signal = 0;
2699da8fa4e3SBjoern A. Zeeb rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2700da8fa4e3SBjoern A. Zeeb } else {
2701da8fa4e3SBjoern A. Zeeb rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
2702da8fa4e3SBjoern A. Zeeb rx->ppdu.combined_rssi;
2703da8fa4e3SBjoern A. Zeeb rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
2704da8fa4e3SBjoern A. Zeeb }
2705da8fa4e3SBjoern A. Zeeb
2706da8fa4e3SBjoern A. Zeeb spin_lock_bh(&ar->data_lock);
2707da8fa4e3SBjoern A. Zeeb ch = ar->scan_channel;
2708da8fa4e3SBjoern A. Zeeb if (!ch)
2709da8fa4e3SBjoern A. Zeeb ch = ar->rx_channel;
2710da8fa4e3SBjoern A. Zeeb if (!ch)
2711da8fa4e3SBjoern A. Zeeb ch = ath10k_htt_rx_h_any_channel(ar);
2712da8fa4e3SBjoern A. Zeeb if (!ch)
2713da8fa4e3SBjoern A. Zeeb ch = ar->tgt_oper_chan;
2714da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ar->data_lock);
2715da8fa4e3SBjoern A. Zeeb
2716da8fa4e3SBjoern A. Zeeb if (ch) {
2717da8fa4e3SBjoern A. Zeeb rx_status->band = ch->band;
2718da8fa4e3SBjoern A. Zeeb rx_status->freq = ch->center_freq;
2719da8fa4e3SBjoern A. Zeeb }
2720da8fa4e3SBjoern A. Zeeb if (rx->fw_desc.flags & FW_RX_DESC_FLAGS_LAST_MSDU)
2721da8fa4e3SBjoern A. Zeeb rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
2722da8fa4e3SBjoern A. Zeeb else
2723da8fa4e3SBjoern A. Zeeb rx_status->flag |= RX_FLAG_AMSDU_MORE;
2724da8fa4e3SBjoern A. Zeeb
2725da8fa4e3SBjoern A. Zeeb /* Not entirely sure about this, but all frames from the chipset has
2726da8fa4e3SBjoern A. Zeeb * the protected flag set even though they have already been decrypted.
2727da8fa4e3SBjoern A. Zeeb * Unmasking this flag is necessary in order for mac80211 not to drop
2728da8fa4e3SBjoern A. Zeeb * the frame.
2729da8fa4e3SBjoern A. Zeeb * TODO: Verify this is always the case or find out a way to check
2730da8fa4e3SBjoern A. Zeeb * if there has been hw decryption.
2731da8fa4e3SBjoern A. Zeeb */
2732da8fa4e3SBjoern A. Zeeb if (ieee80211_has_protected(hdr->frame_control)) {
2733da8fa4e3SBjoern A. Zeeb hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2734da8fa4e3SBjoern A. Zeeb rx_status->flag |= RX_FLAG_DECRYPTED |
2735da8fa4e3SBjoern A. Zeeb RX_FLAG_IV_STRIPPED |
2736da8fa4e3SBjoern A. Zeeb RX_FLAG_MMIC_STRIPPED;
2737da8fa4e3SBjoern A. Zeeb
2738da8fa4e3SBjoern A. Zeeb if (tid < IEEE80211_NUM_TIDS &&
2739da8fa4e3SBjoern A. Zeeb first_msdu &&
2740da8fa4e3SBjoern A. Zeeb check_pn_type == HTT_RX_PN_CHECK &&
2741da8fa4e3SBjoern A. Zeeb (sec_type == HTT_SECURITY_AES_CCMP ||
2742da8fa4e3SBjoern A. Zeeb sec_type == HTT_SECURITY_TKIP ||
2743da8fa4e3SBjoern A. Zeeb sec_type == HTT_SECURITY_TKIP_NOMIC)) {
2744da8fa4e3SBjoern A. Zeeb u8 offset, *ivp, i;
2745da8fa4e3SBjoern A. Zeeb s8 keyidx = 0;
2746da8fa4e3SBjoern A. Zeeb __le64 pn48 = cpu_to_le64(new_pn.pn48);
2747da8fa4e3SBjoern A. Zeeb
2748da8fa4e3SBjoern A. Zeeb hdr = (struct ieee80211_hdr *)skb->data;
2749da8fa4e3SBjoern A. Zeeb offset = ieee80211_hdrlen(hdr->frame_control);
2750da8fa4e3SBjoern A. Zeeb hdr->frame_control |= __cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2751da8fa4e3SBjoern A. Zeeb rx_status->flag &= ~RX_FLAG_IV_STRIPPED;
2752da8fa4e3SBjoern A. Zeeb
2753da8fa4e3SBjoern A. Zeeb memmove(skb->data - IEEE80211_CCMP_HDR_LEN,
2754da8fa4e3SBjoern A. Zeeb skb->data, offset);
2755da8fa4e3SBjoern A. Zeeb skb_push(skb, IEEE80211_CCMP_HDR_LEN);
2756da8fa4e3SBjoern A. Zeeb ivp = skb->data + offset;
2757da8fa4e3SBjoern A. Zeeb memset(skb->data + offset, 0, IEEE80211_CCMP_HDR_LEN);
2758da8fa4e3SBjoern A. Zeeb /* Ext IV */
2759da8fa4e3SBjoern A. Zeeb ivp[IEEE80211_WEP_IV_LEN - 1] |= ATH10K_IEEE80211_EXTIV;
2760da8fa4e3SBjoern A. Zeeb
2761da8fa4e3SBjoern A. Zeeb for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
2762da8fa4e3SBjoern A. Zeeb if (peer->keys[i] &&
2763da8fa4e3SBjoern A. Zeeb peer->keys[i]->flags & IEEE80211_KEY_FLAG_PAIRWISE)
2764da8fa4e3SBjoern A. Zeeb keyidx = peer->keys[i]->keyidx;
2765da8fa4e3SBjoern A. Zeeb }
2766da8fa4e3SBjoern A. Zeeb
2767da8fa4e3SBjoern A. Zeeb /* Key ID */
2768da8fa4e3SBjoern A. Zeeb ivp[IEEE80211_WEP_IV_LEN - 1] |= keyidx << 6;
2769da8fa4e3SBjoern A. Zeeb
2770da8fa4e3SBjoern A. Zeeb if (sec_type == HTT_SECURITY_AES_CCMP) {
2771da8fa4e3SBjoern A. Zeeb rx_status->flag |= RX_FLAG_MIC_STRIPPED;
2772da8fa4e3SBjoern A. Zeeb /* pn 0, pn 1 */
2773da8fa4e3SBjoern A. Zeeb memcpy(skb->data + offset, &pn48, 2);
2774da8fa4e3SBjoern A. Zeeb /* pn 1, pn 3 , pn 34 , pn 5 */
2775da8fa4e3SBjoern A. Zeeb memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4);
2776da8fa4e3SBjoern A. Zeeb } else {
2777da8fa4e3SBjoern A. Zeeb rx_status->flag |= RX_FLAG_ICV_STRIPPED;
2778da8fa4e3SBjoern A. Zeeb /* TSC 0 */
2779da8fa4e3SBjoern A. Zeeb memcpy(skb->data + offset + 2, &pn48, 1);
2780da8fa4e3SBjoern A. Zeeb /* TSC 1 */
2781da8fa4e3SBjoern A. Zeeb memcpy(skb->data + offset, ((u8 *)&pn48) + 1, 1);
2782da8fa4e3SBjoern A. Zeeb /* TSC 2 , TSC 3 , TSC 4 , TSC 5*/
2783da8fa4e3SBjoern A. Zeeb memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4);
2784da8fa4e3SBjoern A. Zeeb }
2785da8fa4e3SBjoern A. Zeeb }
2786da8fa4e3SBjoern A. Zeeb }
2787da8fa4e3SBjoern A. Zeeb
2788da8fa4e3SBjoern A. Zeeb if (tkip_mic_type == HTT_RX_TKIP_MIC)
2789da8fa4e3SBjoern A. Zeeb rx_status->flag &= ~RX_FLAG_IV_STRIPPED &
2790da8fa4e3SBjoern A. Zeeb ~RX_FLAG_MMIC_STRIPPED;
2791da8fa4e3SBjoern A. Zeeb
2792da8fa4e3SBjoern A. Zeeb if (mpdu_ranges->mpdu_range_status == HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR)
2793da8fa4e3SBjoern A. Zeeb rx_status->flag |= RX_FLAG_MMIC_ERROR;
2794da8fa4e3SBjoern A. Zeeb
2795da8fa4e3SBjoern A. Zeeb if (!qos && tid < IEEE80211_NUM_TIDS) {
2796da8fa4e3SBjoern A. Zeeb u8 offset;
2797da8fa4e3SBjoern A. Zeeb __le16 qos_ctrl = 0;
2798da8fa4e3SBjoern A. Zeeb
2799da8fa4e3SBjoern A. Zeeb hdr = (struct ieee80211_hdr *)skb->data;
2800da8fa4e3SBjoern A. Zeeb offset = ieee80211_hdrlen(hdr->frame_control);
2801da8fa4e3SBjoern A. Zeeb
2802da8fa4e3SBjoern A. Zeeb hdr->frame_control |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
2803da8fa4e3SBjoern A. Zeeb memmove(skb->data - IEEE80211_QOS_CTL_LEN, skb->data, offset);
2804da8fa4e3SBjoern A. Zeeb skb_push(skb, IEEE80211_QOS_CTL_LEN);
2805da8fa4e3SBjoern A. Zeeb qos_ctrl = cpu_to_le16(tid);
2806da8fa4e3SBjoern A. Zeeb memcpy(skb->data + offset, &qos_ctrl, IEEE80211_QOS_CTL_LEN);
2807da8fa4e3SBjoern A. Zeeb }
2808da8fa4e3SBjoern A. Zeeb
2809da8fa4e3SBjoern A. Zeeb if (ar->napi.dev)
2810da8fa4e3SBjoern A. Zeeb ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
2811da8fa4e3SBjoern A. Zeeb else
2812da8fa4e3SBjoern A. Zeeb ieee80211_rx_ni(ar->hw, skb);
2813da8fa4e3SBjoern A. Zeeb
2814da8fa4e3SBjoern A. Zeeb /* We have delivered the skb to the upper layers (mac80211) so we
2815da8fa4e3SBjoern A. Zeeb * must not free it.
2816da8fa4e3SBjoern A. Zeeb */
2817da8fa4e3SBjoern A. Zeeb return false;
2818da8fa4e3SBjoern A. Zeeb err:
2819da8fa4e3SBjoern A. Zeeb /* Tell the caller that it must free the skb since we have not
2820da8fa4e3SBjoern A. Zeeb * consumed it
2821da8fa4e3SBjoern A. Zeeb */
2822da8fa4e3SBjoern A. Zeeb return true;
2823da8fa4e3SBjoern A. Zeeb }
2824da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_frag_tkip_decap_nomic(struct sk_buff * skb,u16 head_len,u16 hdr_len)2825da8fa4e3SBjoern A. Zeeb static int ath10k_htt_rx_frag_tkip_decap_nomic(struct sk_buff *skb,
2826da8fa4e3SBjoern A. Zeeb u16 head_len,
2827da8fa4e3SBjoern A. Zeeb u16 hdr_len)
2828da8fa4e3SBjoern A. Zeeb {
2829da8fa4e3SBjoern A. Zeeb u8 *ivp, *orig_hdr;
2830da8fa4e3SBjoern A. Zeeb
2831da8fa4e3SBjoern A. Zeeb orig_hdr = skb->data;
2832da8fa4e3SBjoern A. Zeeb ivp = orig_hdr + hdr_len + head_len;
2833da8fa4e3SBjoern A. Zeeb
2834da8fa4e3SBjoern A. Zeeb /* the ExtIV bit is always set to 1 for TKIP */
2835da8fa4e3SBjoern A. Zeeb if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
2836da8fa4e3SBjoern A. Zeeb return -EINVAL;
2837da8fa4e3SBjoern A. Zeeb
2838da8fa4e3SBjoern A. Zeeb memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len);
2839da8fa4e3SBjoern A. Zeeb skb_pull(skb, IEEE80211_TKIP_IV_LEN);
2840da8fa4e3SBjoern A. Zeeb skb_trim(skb, skb->len - ATH10K_IEEE80211_TKIP_MICLEN);
2841da8fa4e3SBjoern A. Zeeb return 0;
2842da8fa4e3SBjoern A. Zeeb }
2843da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_frag_tkip_decap_withmic(struct sk_buff * skb,u16 head_len,u16 hdr_len)2844da8fa4e3SBjoern A. Zeeb static int ath10k_htt_rx_frag_tkip_decap_withmic(struct sk_buff *skb,
2845da8fa4e3SBjoern A. Zeeb u16 head_len,
2846da8fa4e3SBjoern A. Zeeb u16 hdr_len)
2847da8fa4e3SBjoern A. Zeeb {
2848da8fa4e3SBjoern A. Zeeb u8 *ivp, *orig_hdr;
2849da8fa4e3SBjoern A. Zeeb
2850da8fa4e3SBjoern A. Zeeb orig_hdr = skb->data;
2851da8fa4e3SBjoern A. Zeeb ivp = orig_hdr + hdr_len + head_len;
2852da8fa4e3SBjoern A. Zeeb
2853da8fa4e3SBjoern A. Zeeb /* the ExtIV bit is always set to 1 for TKIP */
2854da8fa4e3SBjoern A. Zeeb if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
2855da8fa4e3SBjoern A. Zeeb return -EINVAL;
2856da8fa4e3SBjoern A. Zeeb
2857da8fa4e3SBjoern A. Zeeb memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len);
2858da8fa4e3SBjoern A. Zeeb skb_pull(skb, IEEE80211_TKIP_IV_LEN);
2859da8fa4e3SBjoern A. Zeeb skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN);
2860da8fa4e3SBjoern A. Zeeb return 0;
2861da8fa4e3SBjoern A. Zeeb }
2862da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_frag_ccmp_decap(struct sk_buff * skb,u16 head_len,u16 hdr_len)2863da8fa4e3SBjoern A. Zeeb static int ath10k_htt_rx_frag_ccmp_decap(struct sk_buff *skb,
2864da8fa4e3SBjoern A. Zeeb u16 head_len,
2865da8fa4e3SBjoern A. Zeeb u16 hdr_len)
2866da8fa4e3SBjoern A. Zeeb {
2867da8fa4e3SBjoern A. Zeeb u8 *ivp, *orig_hdr;
2868da8fa4e3SBjoern A. Zeeb
2869da8fa4e3SBjoern A. Zeeb orig_hdr = skb->data;
2870da8fa4e3SBjoern A. Zeeb ivp = orig_hdr + hdr_len + head_len;
2871da8fa4e3SBjoern A. Zeeb
2872da8fa4e3SBjoern A. Zeeb /* the ExtIV bit is always set to 1 for CCMP */
2873da8fa4e3SBjoern A. Zeeb if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
2874da8fa4e3SBjoern A. Zeeb return -EINVAL;
2875da8fa4e3SBjoern A. Zeeb
2876da8fa4e3SBjoern A. Zeeb skb_trim(skb, skb->len - IEEE80211_CCMP_MIC_LEN);
2877da8fa4e3SBjoern A. Zeeb memmove(orig_hdr + IEEE80211_CCMP_HDR_LEN, orig_hdr, head_len + hdr_len);
2878da8fa4e3SBjoern A. Zeeb skb_pull(skb, IEEE80211_CCMP_HDR_LEN);
2879da8fa4e3SBjoern A. Zeeb return 0;
2880da8fa4e3SBjoern A. Zeeb }
2881da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_frag_wep_decap(struct sk_buff * skb,u16 head_len,u16 hdr_len)2882da8fa4e3SBjoern A. Zeeb static int ath10k_htt_rx_frag_wep_decap(struct sk_buff *skb,
2883da8fa4e3SBjoern A. Zeeb u16 head_len,
2884da8fa4e3SBjoern A. Zeeb u16 hdr_len)
2885da8fa4e3SBjoern A. Zeeb {
2886da8fa4e3SBjoern A. Zeeb u8 *orig_hdr;
2887da8fa4e3SBjoern A. Zeeb
2888da8fa4e3SBjoern A. Zeeb orig_hdr = skb->data;
2889da8fa4e3SBjoern A. Zeeb
2890da8fa4e3SBjoern A. Zeeb memmove(orig_hdr + IEEE80211_WEP_IV_LEN,
2891da8fa4e3SBjoern A. Zeeb orig_hdr, head_len + hdr_len);
2892da8fa4e3SBjoern A. Zeeb skb_pull(skb, IEEE80211_WEP_IV_LEN);
2893da8fa4e3SBjoern A. Zeeb skb_trim(skb, skb->len - IEEE80211_WEP_ICV_LEN);
2894da8fa4e3SBjoern A. Zeeb return 0;
2895da8fa4e3SBjoern A. Zeeb }
2896da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt * htt,struct htt_rx_fragment_indication * rx,struct sk_buff * skb)2897da8fa4e3SBjoern A. Zeeb static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt,
2898da8fa4e3SBjoern A. Zeeb struct htt_rx_fragment_indication *rx,
2899da8fa4e3SBjoern A. Zeeb struct sk_buff *skb)
2900da8fa4e3SBjoern A. Zeeb {
2901da8fa4e3SBjoern A. Zeeb struct ath10k *ar = htt->ar;
2902da8fa4e3SBjoern A. Zeeb enum htt_rx_tkip_demic_type tkip_mic = HTT_RX_NON_TKIP_MIC;
2903da8fa4e3SBjoern A. Zeeb enum htt_txrx_sec_cast_type sec_index;
2904da8fa4e3SBjoern A. Zeeb struct htt_rx_indication_hl *rx_hl;
2905da8fa4e3SBjoern A. Zeeb enum htt_security_types sec_type;
2906da8fa4e3SBjoern A. Zeeb u32 tid, frag, seq, rx_desc_info;
2907da8fa4e3SBjoern A. Zeeb union htt_rx_pn_t new_pn = {0};
2908da8fa4e3SBjoern A. Zeeb struct htt_hl_rx_desc *rx_desc;
2909da8fa4e3SBjoern A. Zeeb u16 peer_id, sc, hdr_space;
2910da8fa4e3SBjoern A. Zeeb union htt_rx_pn_t *last_pn;
2911da8fa4e3SBjoern A. Zeeb struct ieee80211_hdr *hdr;
2912da8fa4e3SBjoern A. Zeeb int ret, num_mpdu_ranges;
2913da8fa4e3SBjoern A. Zeeb struct ath10k_peer *peer;
2914da8fa4e3SBjoern A. Zeeb struct htt_resp *resp;
2915da8fa4e3SBjoern A. Zeeb size_t tot_hdr_len;
2916da8fa4e3SBjoern A. Zeeb
2917da8fa4e3SBjoern A. Zeeb resp = (struct htt_resp *)(skb->data + HTT_RX_FRAG_IND_INFO0_HEADER_LEN);
2918da8fa4e3SBjoern A. Zeeb skb_pull(skb, HTT_RX_FRAG_IND_INFO0_HEADER_LEN);
2919da8fa4e3SBjoern A. Zeeb skb_trim(skb, skb->len - FCS_LEN);
2920da8fa4e3SBjoern A. Zeeb
2921da8fa4e3SBjoern A. Zeeb peer_id = __le16_to_cpu(rx->peer_id);
2922da8fa4e3SBjoern A. Zeeb rx_hl = (struct htt_rx_indication_hl *)(&resp->rx_ind_hl);
2923da8fa4e3SBjoern A. Zeeb
2924da8fa4e3SBjoern A. Zeeb spin_lock_bh(&ar->data_lock);
2925da8fa4e3SBjoern A. Zeeb peer = ath10k_peer_find_by_id(ar, peer_id);
2926da8fa4e3SBjoern A. Zeeb if (!peer) {
2927da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer: %u\n", peer_id);
2928da8fa4e3SBjoern A. Zeeb goto err;
2929da8fa4e3SBjoern A. Zeeb }
2930da8fa4e3SBjoern A. Zeeb
2931da8fa4e3SBjoern A. Zeeb num_mpdu_ranges = MS(__le32_to_cpu(rx_hl->hdr.info1),
2932da8fa4e3SBjoern A. Zeeb HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
2933da8fa4e3SBjoern A. Zeeb
2934da8fa4e3SBjoern A. Zeeb tot_hdr_len = sizeof(struct htt_resp_hdr) +
2935da8fa4e3SBjoern A. Zeeb sizeof(rx_hl->hdr) +
2936da8fa4e3SBjoern A. Zeeb sizeof(rx_hl->ppdu) +
2937da8fa4e3SBjoern A. Zeeb sizeof(rx_hl->prefix) +
2938da8fa4e3SBjoern A. Zeeb sizeof(rx_hl->fw_desc) +
2939da8fa4e3SBjoern A. Zeeb sizeof(struct htt_rx_indication_mpdu_range) * num_mpdu_ranges;
2940da8fa4e3SBjoern A. Zeeb
2941da8fa4e3SBjoern A. Zeeb tid = MS(rx_hl->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
2942da8fa4e3SBjoern A. Zeeb rx_desc = (struct htt_hl_rx_desc *)(skb->data + tot_hdr_len);
2943da8fa4e3SBjoern A. Zeeb rx_desc_info = __le32_to_cpu(rx_desc->info);
2944da8fa4e3SBjoern A. Zeeb
2945da8fa4e3SBjoern A. Zeeb hdr = (struct ieee80211_hdr *)((u8 *)rx_desc + rx_hl->fw_desc.len);
2946da8fa4e3SBjoern A. Zeeb
2947da8fa4e3SBjoern A. Zeeb if (is_multicast_ether_addr(hdr->addr1)) {
2948da8fa4e3SBjoern A. Zeeb /* Discard the fragment with multicast DA */
2949da8fa4e3SBjoern A. Zeeb goto err;
2950da8fa4e3SBjoern A. Zeeb }
2951da8fa4e3SBjoern A. Zeeb
2952da8fa4e3SBjoern A. Zeeb if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED)) {
2953da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ar->data_lock);
2954da8fa4e3SBjoern A. Zeeb return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
2955da8fa4e3SBjoern A. Zeeb HTT_RX_NON_PN_CHECK,
2956da8fa4e3SBjoern A. Zeeb HTT_RX_NON_TKIP_MIC);
2957da8fa4e3SBjoern A. Zeeb }
2958da8fa4e3SBjoern A. Zeeb
2959da8fa4e3SBjoern A. Zeeb if (ieee80211_has_retry(hdr->frame_control))
2960da8fa4e3SBjoern A. Zeeb goto err;
2961da8fa4e3SBjoern A. Zeeb
2962da8fa4e3SBjoern A. Zeeb hdr_space = ieee80211_hdrlen(hdr->frame_control);
2963da8fa4e3SBjoern A. Zeeb sc = __le16_to_cpu(hdr->seq_ctrl);
2964*07724ba6SBjoern A. Zeeb seq = IEEE80211_SEQ_TO_SN(sc);
2965da8fa4e3SBjoern A. Zeeb frag = sc & IEEE80211_SCTL_FRAG;
2966da8fa4e3SBjoern A. Zeeb
2967da8fa4e3SBjoern A. Zeeb sec_index = MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST) ?
2968da8fa4e3SBjoern A. Zeeb HTT_TXRX_SEC_MCAST : HTT_TXRX_SEC_UCAST;
2969da8fa4e3SBjoern A. Zeeb sec_type = peer->rx_pn[sec_index].sec_type;
2970da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
2971da8fa4e3SBjoern A. Zeeb
2972da8fa4e3SBjoern A. Zeeb switch (sec_type) {
2973da8fa4e3SBjoern A. Zeeb case HTT_SECURITY_TKIP:
2974da8fa4e3SBjoern A. Zeeb tkip_mic = HTT_RX_TKIP_MIC;
2975da8fa4e3SBjoern A. Zeeb ret = ath10k_htt_rx_frag_tkip_decap_withmic(skb,
2976da8fa4e3SBjoern A. Zeeb tot_hdr_len +
2977da8fa4e3SBjoern A. Zeeb rx_hl->fw_desc.len,
2978da8fa4e3SBjoern A. Zeeb hdr_space);
2979da8fa4e3SBjoern A. Zeeb if (ret)
2980da8fa4e3SBjoern A. Zeeb goto err;
2981da8fa4e3SBjoern A. Zeeb break;
2982da8fa4e3SBjoern A. Zeeb case HTT_SECURITY_TKIP_NOMIC:
2983da8fa4e3SBjoern A. Zeeb ret = ath10k_htt_rx_frag_tkip_decap_nomic(skb,
2984da8fa4e3SBjoern A. Zeeb tot_hdr_len +
2985da8fa4e3SBjoern A. Zeeb rx_hl->fw_desc.len,
2986da8fa4e3SBjoern A. Zeeb hdr_space);
2987da8fa4e3SBjoern A. Zeeb if (ret)
2988da8fa4e3SBjoern A. Zeeb goto err;
2989da8fa4e3SBjoern A. Zeeb break;
2990da8fa4e3SBjoern A. Zeeb case HTT_SECURITY_AES_CCMP:
2991da8fa4e3SBjoern A. Zeeb ret = ath10k_htt_rx_frag_ccmp_decap(skb,
2992da8fa4e3SBjoern A. Zeeb tot_hdr_len + rx_hl->fw_desc.len,
2993da8fa4e3SBjoern A. Zeeb hdr_space);
2994da8fa4e3SBjoern A. Zeeb if (ret)
2995da8fa4e3SBjoern A. Zeeb goto err;
2996da8fa4e3SBjoern A. Zeeb break;
2997da8fa4e3SBjoern A. Zeeb case HTT_SECURITY_WEP128:
2998da8fa4e3SBjoern A. Zeeb case HTT_SECURITY_WEP104:
2999da8fa4e3SBjoern A. Zeeb case HTT_SECURITY_WEP40:
3000da8fa4e3SBjoern A. Zeeb ret = ath10k_htt_rx_frag_wep_decap(skb,
3001da8fa4e3SBjoern A. Zeeb tot_hdr_len + rx_hl->fw_desc.len,
3002da8fa4e3SBjoern A. Zeeb hdr_space);
3003da8fa4e3SBjoern A. Zeeb if (ret)
3004da8fa4e3SBjoern A. Zeeb goto err;
3005da8fa4e3SBjoern A. Zeeb break;
3006da8fa4e3SBjoern A. Zeeb default:
3007da8fa4e3SBjoern A. Zeeb break;
3008da8fa4e3SBjoern A. Zeeb }
3009da8fa4e3SBjoern A. Zeeb
3010da8fa4e3SBjoern A. Zeeb resp = (struct htt_resp *)(skb->data);
3011da8fa4e3SBjoern A. Zeeb
3012da8fa4e3SBjoern A. Zeeb if (sec_type != HTT_SECURITY_AES_CCMP &&
3013da8fa4e3SBjoern A. Zeeb sec_type != HTT_SECURITY_TKIP &&
3014da8fa4e3SBjoern A. Zeeb sec_type != HTT_SECURITY_TKIP_NOMIC) {
3015da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ar->data_lock);
3016da8fa4e3SBjoern A. Zeeb return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
3017da8fa4e3SBjoern A. Zeeb HTT_RX_NON_PN_CHECK,
3018da8fa4e3SBjoern A. Zeeb HTT_RX_NON_TKIP_MIC);
3019da8fa4e3SBjoern A. Zeeb }
3020da8fa4e3SBjoern A. Zeeb
3021da8fa4e3SBjoern A. Zeeb last_pn = &peer->frag_tids_last_pn[tid];
3022da8fa4e3SBjoern A. Zeeb
3023da8fa4e3SBjoern A. Zeeb if (frag == 0) {
3024da8fa4e3SBjoern A. Zeeb if (ath10k_htt_rx_pn_check_replay_hl(ar, peer, &resp->rx_ind_hl))
3025da8fa4e3SBjoern A. Zeeb goto err;
3026da8fa4e3SBjoern A. Zeeb
3027da8fa4e3SBjoern A. Zeeb last_pn->pn48 = new_pn.pn48;
3028da8fa4e3SBjoern A. Zeeb peer->frag_tids_seq[tid] = seq;
3029da8fa4e3SBjoern A. Zeeb } else if (sec_type == HTT_SECURITY_AES_CCMP) {
3030da8fa4e3SBjoern A. Zeeb if (seq != peer->frag_tids_seq[tid])
3031da8fa4e3SBjoern A. Zeeb goto err;
3032da8fa4e3SBjoern A. Zeeb
3033da8fa4e3SBjoern A. Zeeb if (new_pn.pn48 != last_pn->pn48 + 1)
3034da8fa4e3SBjoern A. Zeeb goto err;
3035da8fa4e3SBjoern A. Zeeb
3036da8fa4e3SBjoern A. Zeeb last_pn->pn48 = new_pn.pn48;
3037da8fa4e3SBjoern A. Zeeb last_pn = &peer->tids_last_pn[tid];
3038da8fa4e3SBjoern A. Zeeb last_pn->pn48 = new_pn.pn48;
3039da8fa4e3SBjoern A. Zeeb }
3040da8fa4e3SBjoern A. Zeeb
3041da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ar->data_lock);
3042da8fa4e3SBjoern A. Zeeb
3043da8fa4e3SBjoern A. Zeeb return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
3044da8fa4e3SBjoern A. Zeeb HTT_RX_NON_PN_CHECK, tkip_mic);
3045da8fa4e3SBjoern A. Zeeb
3046da8fa4e3SBjoern A. Zeeb err:
3047da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ar->data_lock);
3048da8fa4e3SBjoern A. Zeeb
3049da8fa4e3SBjoern A. Zeeb /* Tell the caller that it must free the skb since we have not
3050da8fa4e3SBjoern A. Zeeb * consumed it
3051da8fa4e3SBjoern A. Zeeb */
3052da8fa4e3SBjoern A. Zeeb return true;
3053da8fa4e3SBjoern A. Zeeb }
3054da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt * htt,struct htt_rx_indication * rx)3055da8fa4e3SBjoern A. Zeeb static void ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt *htt,
3056da8fa4e3SBjoern A. Zeeb struct htt_rx_indication *rx)
3057da8fa4e3SBjoern A. Zeeb {
3058da8fa4e3SBjoern A. Zeeb struct ath10k *ar = htt->ar;
3059da8fa4e3SBjoern A. Zeeb struct htt_rx_indication_mpdu_range *mpdu_ranges;
3060da8fa4e3SBjoern A. Zeeb int num_mpdu_ranges;
3061da8fa4e3SBjoern A. Zeeb int i, mpdu_count = 0;
3062da8fa4e3SBjoern A. Zeeb u16 peer_id;
3063da8fa4e3SBjoern A. Zeeb u8 tid;
3064da8fa4e3SBjoern A. Zeeb
3065da8fa4e3SBjoern A. Zeeb num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
3066da8fa4e3SBjoern A. Zeeb HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
3067da8fa4e3SBjoern A. Zeeb peer_id = __le16_to_cpu(rx->hdr.peer_id);
3068da8fa4e3SBjoern A. Zeeb tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
3069da8fa4e3SBjoern A. Zeeb
3070da8fa4e3SBjoern A. Zeeb mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
3071da8fa4e3SBjoern A. Zeeb
3072da8fa4e3SBjoern A. Zeeb ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
3073da8fa4e3SBjoern A. Zeeb rx, struct_size(rx, mpdu_ranges, num_mpdu_ranges));
3074da8fa4e3SBjoern A. Zeeb
3075da8fa4e3SBjoern A. Zeeb for (i = 0; i < num_mpdu_ranges; i++)
3076da8fa4e3SBjoern A. Zeeb mpdu_count += mpdu_ranges[i].mpdu_count;
3077da8fa4e3SBjoern A. Zeeb
3078da8fa4e3SBjoern A. Zeeb atomic_add(mpdu_count, &htt->num_mpdus_ready);
3079da8fa4e3SBjoern A. Zeeb
3080da8fa4e3SBjoern A. Zeeb ath10k_sta_update_rx_tid_stats_ampdu(ar, peer_id, tid, mpdu_ranges,
3081da8fa4e3SBjoern A. Zeeb num_mpdu_ranges);
3082da8fa4e3SBjoern A. Zeeb }
3083da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_tx_compl_ind(struct ath10k * ar,struct sk_buff * skb)3084da8fa4e3SBjoern A. Zeeb static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
3085da8fa4e3SBjoern A. Zeeb struct sk_buff *skb)
3086da8fa4e3SBjoern A. Zeeb {
3087da8fa4e3SBjoern A. Zeeb struct ath10k_htt *htt = &ar->htt;
3088da8fa4e3SBjoern A. Zeeb struct htt_resp *resp = (struct htt_resp *)skb->data;
3089da8fa4e3SBjoern A. Zeeb struct htt_tx_done tx_done = {};
3090da8fa4e3SBjoern A. Zeeb int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
3091da8fa4e3SBjoern A. Zeeb __le16 msdu_id, *msdus;
3092da8fa4e3SBjoern A. Zeeb bool rssi_enabled = false;
3093da8fa4e3SBjoern A. Zeeb u8 msdu_count = 0, num_airtime_records, tid;
3094da8fa4e3SBjoern A. Zeeb int i, htt_pad = 0;
3095da8fa4e3SBjoern A. Zeeb struct htt_data_tx_compl_ppdu_dur *ppdu_info;
3096da8fa4e3SBjoern A. Zeeb struct ath10k_peer *peer;
3097da8fa4e3SBjoern A. Zeeb u16 ppdu_info_offset = 0, peer_id;
3098da8fa4e3SBjoern A. Zeeb u32 tx_duration;
3099da8fa4e3SBjoern A. Zeeb
3100da8fa4e3SBjoern A. Zeeb switch (status) {
3101da8fa4e3SBjoern A. Zeeb case HTT_DATA_TX_STATUS_NO_ACK:
3102da8fa4e3SBjoern A. Zeeb tx_done.status = HTT_TX_COMPL_STATE_NOACK;
3103da8fa4e3SBjoern A. Zeeb break;
3104da8fa4e3SBjoern A. Zeeb case HTT_DATA_TX_STATUS_OK:
3105da8fa4e3SBjoern A. Zeeb tx_done.status = HTT_TX_COMPL_STATE_ACK;
3106da8fa4e3SBjoern A. Zeeb break;
3107da8fa4e3SBjoern A. Zeeb case HTT_DATA_TX_STATUS_DISCARD:
3108da8fa4e3SBjoern A. Zeeb case HTT_DATA_TX_STATUS_POSTPONE:
3109da8fa4e3SBjoern A. Zeeb case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
3110da8fa4e3SBjoern A. Zeeb tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
3111da8fa4e3SBjoern A. Zeeb break;
3112da8fa4e3SBjoern A. Zeeb default:
3113da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "unhandled tx completion status %d\n", status);
3114da8fa4e3SBjoern A. Zeeb tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
3115da8fa4e3SBjoern A. Zeeb break;
3116da8fa4e3SBjoern A. Zeeb }
3117da8fa4e3SBjoern A. Zeeb
3118da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
3119da8fa4e3SBjoern A. Zeeb resp->data_tx_completion.num_msdus);
3120da8fa4e3SBjoern A. Zeeb
3121da8fa4e3SBjoern A. Zeeb msdu_count = resp->data_tx_completion.num_msdus;
3122da8fa4e3SBjoern A. Zeeb msdus = resp->data_tx_completion.msdus;
3123da8fa4e3SBjoern A. Zeeb rssi_enabled = ath10k_is_rssi_enable(&ar->hw_params, resp);
3124da8fa4e3SBjoern A. Zeeb
3125da8fa4e3SBjoern A. Zeeb if (rssi_enabled)
3126da8fa4e3SBjoern A. Zeeb htt_pad = ath10k_tx_data_rssi_get_pad_bytes(&ar->hw_params,
3127da8fa4e3SBjoern A. Zeeb resp);
3128da8fa4e3SBjoern A. Zeeb
3129da8fa4e3SBjoern A. Zeeb for (i = 0; i < msdu_count; i++) {
3130da8fa4e3SBjoern A. Zeeb msdu_id = msdus[i];
3131da8fa4e3SBjoern A. Zeeb tx_done.msdu_id = __le16_to_cpu(msdu_id);
3132da8fa4e3SBjoern A. Zeeb
3133da8fa4e3SBjoern A. Zeeb if (rssi_enabled) {
3134da8fa4e3SBjoern A. Zeeb /* Total no of MSDUs should be even,
3135da8fa4e3SBjoern A. Zeeb * if odd MSDUs are sent firmware fills
3136da8fa4e3SBjoern A. Zeeb * last msdu id with 0xffff
3137da8fa4e3SBjoern A. Zeeb */
3138da8fa4e3SBjoern A. Zeeb if (msdu_count & 0x01) {
3139da8fa4e3SBjoern A. Zeeb msdu_id = msdus[msdu_count + i + 1 + htt_pad];
3140da8fa4e3SBjoern A. Zeeb tx_done.ack_rssi = __le16_to_cpu(msdu_id);
3141da8fa4e3SBjoern A. Zeeb } else {
3142da8fa4e3SBjoern A. Zeeb msdu_id = msdus[msdu_count + i + htt_pad];
3143da8fa4e3SBjoern A. Zeeb tx_done.ack_rssi = __le16_to_cpu(msdu_id);
3144da8fa4e3SBjoern A. Zeeb }
3145da8fa4e3SBjoern A. Zeeb }
3146da8fa4e3SBjoern A. Zeeb
3147da8fa4e3SBjoern A. Zeeb /* kfifo_put: In practice firmware shouldn't fire off per-CE
3148da8fa4e3SBjoern A. Zeeb * interrupt and main interrupt (MSI/-X range case) for the same
3149da8fa4e3SBjoern A. Zeeb * HTC service so it should be safe to use kfifo_put w/o lock.
3150da8fa4e3SBjoern A. Zeeb *
3151da8fa4e3SBjoern A. Zeeb * From kfifo_put() documentation:
3152da8fa4e3SBjoern A. Zeeb * Note that with only one concurrent reader and one concurrent
3153da8fa4e3SBjoern A. Zeeb * writer, you don't need extra locking to use these macro.
3154da8fa4e3SBjoern A. Zeeb */
3155da8fa4e3SBjoern A. Zeeb if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) {
3156da8fa4e3SBjoern A. Zeeb ath10k_txrx_tx_unref(htt, &tx_done);
3157da8fa4e3SBjoern A. Zeeb } else if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
3158da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
3159da8fa4e3SBjoern A. Zeeb tx_done.msdu_id, tx_done.status);
3160da8fa4e3SBjoern A. Zeeb ath10k_txrx_tx_unref(htt, &tx_done);
3161da8fa4e3SBjoern A. Zeeb }
3162da8fa4e3SBjoern A. Zeeb }
3163da8fa4e3SBjoern A. Zeeb
3164da8fa4e3SBjoern A. Zeeb if (!(resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT))
3165da8fa4e3SBjoern A. Zeeb return;
3166da8fa4e3SBjoern A. Zeeb
3167da8fa4e3SBjoern A. Zeeb ppdu_info_offset = (msdu_count & 0x01) ? msdu_count + 1 : msdu_count;
3168da8fa4e3SBjoern A. Zeeb
3169da8fa4e3SBjoern A. Zeeb if (rssi_enabled)
3170da8fa4e3SBjoern A. Zeeb ppdu_info_offset += ppdu_info_offset;
3171da8fa4e3SBjoern A. Zeeb
3172da8fa4e3SBjoern A. Zeeb if (resp->data_tx_completion.flags2 &
3173da8fa4e3SBjoern A. Zeeb (HTT_TX_CMPL_FLAG_PPID_PRESENT | HTT_TX_CMPL_FLAG_PA_PRESENT))
3174da8fa4e3SBjoern A. Zeeb ppdu_info_offset += 2;
3175da8fa4e3SBjoern A. Zeeb
3176da8fa4e3SBjoern A. Zeeb ppdu_info = (struct htt_data_tx_compl_ppdu_dur *)&msdus[ppdu_info_offset];
3177da8fa4e3SBjoern A. Zeeb num_airtime_records = FIELD_GET(HTT_TX_COMPL_PPDU_DUR_INFO0_NUM_ENTRIES_MASK,
3178da8fa4e3SBjoern A. Zeeb __le32_to_cpu(ppdu_info->info0));
3179da8fa4e3SBjoern A. Zeeb
3180da8fa4e3SBjoern A. Zeeb for (i = 0; i < num_airtime_records; i++) {
3181da8fa4e3SBjoern A. Zeeb struct htt_data_tx_ppdu_dur *ppdu_dur;
3182da8fa4e3SBjoern A. Zeeb u32 info0;
3183da8fa4e3SBjoern A. Zeeb
3184da8fa4e3SBjoern A. Zeeb ppdu_dur = &ppdu_info->ppdu_dur[i];
3185da8fa4e3SBjoern A. Zeeb info0 = __le32_to_cpu(ppdu_dur->info0);
3186da8fa4e3SBjoern A. Zeeb
3187da8fa4e3SBjoern A. Zeeb peer_id = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK,
3188da8fa4e3SBjoern A. Zeeb info0);
3189da8fa4e3SBjoern A. Zeeb rcu_read_lock();
3190da8fa4e3SBjoern A. Zeeb spin_lock_bh(&ar->data_lock);
3191da8fa4e3SBjoern A. Zeeb
3192da8fa4e3SBjoern A. Zeeb peer = ath10k_peer_find_by_id(ar, peer_id);
3193da8fa4e3SBjoern A. Zeeb if (!peer || !peer->sta) {
3194da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ar->data_lock);
3195da8fa4e3SBjoern A. Zeeb rcu_read_unlock();
3196da8fa4e3SBjoern A. Zeeb continue;
3197da8fa4e3SBjoern A. Zeeb }
3198da8fa4e3SBjoern A. Zeeb
3199da8fa4e3SBjoern A. Zeeb tid = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_TID_MASK, info0) &
3200da8fa4e3SBjoern A. Zeeb IEEE80211_QOS_CTL_TID_MASK;
3201da8fa4e3SBjoern A. Zeeb tx_duration = __le32_to_cpu(ppdu_dur->tx_duration);
3202da8fa4e3SBjoern A. Zeeb
3203da8fa4e3SBjoern A. Zeeb ieee80211_sta_register_airtime(peer->sta, tid, tx_duration, 0);
3204da8fa4e3SBjoern A. Zeeb
3205da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ar->data_lock);
3206da8fa4e3SBjoern A. Zeeb rcu_read_unlock();
3207da8fa4e3SBjoern A. Zeeb }
3208da8fa4e3SBjoern A. Zeeb }
3209da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_addba(struct ath10k * ar,struct htt_resp * resp)3210da8fa4e3SBjoern A. Zeeb static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
3211da8fa4e3SBjoern A. Zeeb {
3212da8fa4e3SBjoern A. Zeeb struct htt_rx_addba *ev = &resp->rx_addba;
3213da8fa4e3SBjoern A. Zeeb struct ath10k_peer *peer;
3214da8fa4e3SBjoern A. Zeeb struct ath10k_vif *arvif;
3215da8fa4e3SBjoern A. Zeeb u16 info0, tid, peer_id;
3216da8fa4e3SBjoern A. Zeeb
3217da8fa4e3SBjoern A. Zeeb info0 = __le16_to_cpu(ev->info0);
3218da8fa4e3SBjoern A. Zeeb tid = MS(info0, HTT_RX_BA_INFO0_TID);
3219da8fa4e3SBjoern A. Zeeb peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
3220da8fa4e3SBjoern A. Zeeb
3221da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_HTT,
3222da8fa4e3SBjoern A. Zeeb "htt rx addba tid %u peer_id %u size %u\n",
3223da8fa4e3SBjoern A. Zeeb tid, peer_id, ev->window_size);
3224da8fa4e3SBjoern A. Zeeb
3225da8fa4e3SBjoern A. Zeeb spin_lock_bh(&ar->data_lock);
3226da8fa4e3SBjoern A. Zeeb peer = ath10k_peer_find_by_id(ar, peer_id);
3227da8fa4e3SBjoern A. Zeeb if (!peer) {
3228da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "received addba event for invalid peer_id: %u\n",
3229da8fa4e3SBjoern A. Zeeb peer_id);
3230da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ar->data_lock);
3231da8fa4e3SBjoern A. Zeeb return;
3232da8fa4e3SBjoern A. Zeeb }
3233da8fa4e3SBjoern A. Zeeb
3234da8fa4e3SBjoern A. Zeeb arvif = ath10k_get_arvif(ar, peer->vdev_id);
3235da8fa4e3SBjoern A. Zeeb if (!arvif) {
3236da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
3237da8fa4e3SBjoern A. Zeeb peer->vdev_id);
3238da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ar->data_lock);
3239da8fa4e3SBjoern A. Zeeb return;
3240da8fa4e3SBjoern A. Zeeb }
3241da8fa4e3SBjoern A. Zeeb
3242da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_HTT,
3243da8fa4e3SBjoern A. Zeeb "htt rx start rx ba session sta %pM tid %u size %u\n",
3244da8fa4e3SBjoern A. Zeeb peer->addr, tid, ev->window_size);
3245da8fa4e3SBjoern A. Zeeb
3246da8fa4e3SBjoern A. Zeeb ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
3247da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ar->data_lock);
3248da8fa4e3SBjoern A. Zeeb }
3249da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_delba(struct ath10k * ar,struct htt_resp * resp)3250da8fa4e3SBjoern A. Zeeb static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
3251da8fa4e3SBjoern A. Zeeb {
3252da8fa4e3SBjoern A. Zeeb struct htt_rx_delba *ev = &resp->rx_delba;
3253da8fa4e3SBjoern A. Zeeb struct ath10k_peer *peer;
3254da8fa4e3SBjoern A. Zeeb struct ath10k_vif *arvif;
3255da8fa4e3SBjoern A. Zeeb u16 info0, tid, peer_id;
3256da8fa4e3SBjoern A. Zeeb
3257da8fa4e3SBjoern A. Zeeb info0 = __le16_to_cpu(ev->info0);
3258da8fa4e3SBjoern A. Zeeb tid = MS(info0, HTT_RX_BA_INFO0_TID);
3259da8fa4e3SBjoern A. Zeeb peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
3260da8fa4e3SBjoern A. Zeeb
3261da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_HTT,
3262da8fa4e3SBjoern A. Zeeb "htt rx delba tid %u peer_id %u\n",
3263da8fa4e3SBjoern A. Zeeb tid, peer_id);
3264da8fa4e3SBjoern A. Zeeb
3265da8fa4e3SBjoern A. Zeeb spin_lock_bh(&ar->data_lock);
3266da8fa4e3SBjoern A. Zeeb peer = ath10k_peer_find_by_id(ar, peer_id);
3267da8fa4e3SBjoern A. Zeeb if (!peer) {
3268da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "received addba event for invalid peer_id: %u\n",
3269da8fa4e3SBjoern A. Zeeb peer_id);
3270da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ar->data_lock);
3271da8fa4e3SBjoern A. Zeeb return;
3272da8fa4e3SBjoern A. Zeeb }
3273da8fa4e3SBjoern A. Zeeb
3274da8fa4e3SBjoern A. Zeeb arvif = ath10k_get_arvif(ar, peer->vdev_id);
3275da8fa4e3SBjoern A. Zeeb if (!arvif) {
3276da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
3277da8fa4e3SBjoern A. Zeeb peer->vdev_id);
3278da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ar->data_lock);
3279da8fa4e3SBjoern A. Zeeb return;
3280da8fa4e3SBjoern A. Zeeb }
3281da8fa4e3SBjoern A. Zeeb
3282da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_HTT,
3283da8fa4e3SBjoern A. Zeeb "htt rx stop rx ba session sta %pM tid %u\n",
3284da8fa4e3SBjoern A. Zeeb peer->addr, tid);
3285da8fa4e3SBjoern A. Zeeb
3286da8fa4e3SBjoern A. Zeeb ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
3287da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ar->data_lock);
3288da8fa4e3SBjoern A. Zeeb }
3289da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_extract_amsdu(struct ath10k_hw_params * hw,struct sk_buff_head * list,struct sk_buff_head * amsdu)3290da8fa4e3SBjoern A. Zeeb static int ath10k_htt_rx_extract_amsdu(struct ath10k_hw_params *hw,
3291da8fa4e3SBjoern A. Zeeb struct sk_buff_head *list,
3292da8fa4e3SBjoern A. Zeeb struct sk_buff_head *amsdu)
3293da8fa4e3SBjoern A. Zeeb {
3294da8fa4e3SBjoern A. Zeeb struct sk_buff *msdu;
3295da8fa4e3SBjoern A. Zeeb struct htt_rx_desc *rxd;
3296da8fa4e3SBjoern A. Zeeb struct rx_msdu_end_common *rxd_msdu_end_common;
3297da8fa4e3SBjoern A. Zeeb
3298da8fa4e3SBjoern A. Zeeb if (skb_queue_empty(list))
3299da8fa4e3SBjoern A. Zeeb return -ENOBUFS;
3300da8fa4e3SBjoern A. Zeeb
3301da8fa4e3SBjoern A. Zeeb if (WARN_ON(!skb_queue_empty(amsdu)))
3302da8fa4e3SBjoern A. Zeeb return -EINVAL;
3303da8fa4e3SBjoern A. Zeeb
3304da8fa4e3SBjoern A. Zeeb while ((msdu = __skb_dequeue(list))) {
3305da8fa4e3SBjoern A. Zeeb __skb_queue_tail(amsdu, msdu);
3306da8fa4e3SBjoern A. Zeeb
3307da8fa4e3SBjoern A. Zeeb rxd = HTT_RX_BUF_TO_RX_DESC(hw,
3308da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
3309da8fa4e3SBjoern A. Zeeb (void *)msdu->data -
3310da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
3311da8fa4e3SBjoern A. Zeeb (u8 *)msdu->data -
3312da8fa4e3SBjoern A. Zeeb #endif
3313da8fa4e3SBjoern A. Zeeb hw->rx_desc_ops->rx_desc_size);
3314da8fa4e3SBjoern A. Zeeb
3315da8fa4e3SBjoern A. Zeeb rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
3316da8fa4e3SBjoern A. Zeeb if (rxd_msdu_end_common->info0 &
3317da8fa4e3SBjoern A. Zeeb __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
3318da8fa4e3SBjoern A. Zeeb break;
3319da8fa4e3SBjoern A. Zeeb }
3320da8fa4e3SBjoern A. Zeeb
3321da8fa4e3SBjoern A. Zeeb msdu = skb_peek_tail(amsdu);
3322da8fa4e3SBjoern A. Zeeb rxd = HTT_RX_BUF_TO_RX_DESC(hw,
3323da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
3324da8fa4e3SBjoern A. Zeeb (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
3325da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
3326da8fa4e3SBjoern A. Zeeb (u8 *)msdu->data - hw->rx_desc_ops->rx_desc_size);
3327da8fa4e3SBjoern A. Zeeb #endif
3328da8fa4e3SBjoern A. Zeeb
3329da8fa4e3SBjoern A. Zeeb rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
3330da8fa4e3SBjoern A. Zeeb if (!(rxd_msdu_end_common->info0 &
3331da8fa4e3SBjoern A. Zeeb __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
3332da8fa4e3SBjoern A. Zeeb skb_queue_splice_init(amsdu, list);
3333da8fa4e3SBjoern A. Zeeb return -EAGAIN;
3334da8fa4e3SBjoern A. Zeeb }
3335da8fa4e3SBjoern A. Zeeb
3336da8fa4e3SBjoern A. Zeeb return 0;
3337da8fa4e3SBjoern A. Zeeb }
3338da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status * status,struct sk_buff * skb)3339da8fa4e3SBjoern A. Zeeb static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
3340da8fa4e3SBjoern A. Zeeb struct sk_buff *skb)
3341da8fa4e3SBjoern A. Zeeb {
3342da8fa4e3SBjoern A. Zeeb struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
3343da8fa4e3SBjoern A. Zeeb
3344da8fa4e3SBjoern A. Zeeb if (!ieee80211_has_protected(hdr->frame_control))
3345da8fa4e3SBjoern A. Zeeb return;
3346da8fa4e3SBjoern A. Zeeb
3347da8fa4e3SBjoern A. Zeeb /* Offloaded frames are already decrypted but firmware insists they are
3348da8fa4e3SBjoern A. Zeeb * protected in the 802.11 header. Strip the flag. Otherwise mac80211
3349da8fa4e3SBjoern A. Zeeb * will drop the frame.
3350da8fa4e3SBjoern A. Zeeb */
3351da8fa4e3SBjoern A. Zeeb
3352da8fa4e3SBjoern A. Zeeb hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
3353da8fa4e3SBjoern A. Zeeb status->flag |= RX_FLAG_DECRYPTED |
3354da8fa4e3SBjoern A. Zeeb RX_FLAG_IV_STRIPPED |
3355da8fa4e3SBjoern A. Zeeb RX_FLAG_MMIC_STRIPPED;
3356da8fa4e3SBjoern A. Zeeb }
3357da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_h_rx_offload(struct ath10k * ar,struct sk_buff_head * list)3358da8fa4e3SBjoern A. Zeeb static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
3359da8fa4e3SBjoern A. Zeeb struct sk_buff_head *list)
3360da8fa4e3SBjoern A. Zeeb {
3361da8fa4e3SBjoern A. Zeeb struct ath10k_htt *htt = &ar->htt;
3362da8fa4e3SBjoern A. Zeeb struct ieee80211_rx_status *status = &htt->rx_status;
3363da8fa4e3SBjoern A. Zeeb struct htt_rx_offload_msdu *rx;
3364da8fa4e3SBjoern A. Zeeb struct sk_buff *msdu;
3365da8fa4e3SBjoern A. Zeeb size_t offset;
3366da8fa4e3SBjoern A. Zeeb
3367da8fa4e3SBjoern A. Zeeb while ((msdu = __skb_dequeue(list))) {
3368da8fa4e3SBjoern A. Zeeb /* Offloaded frames don't have Rx descriptor. Instead they have
3369da8fa4e3SBjoern A. Zeeb * a short meta information header.
3370da8fa4e3SBjoern A. Zeeb */
3371da8fa4e3SBjoern A. Zeeb
3372da8fa4e3SBjoern A. Zeeb rx = (void *)msdu->data;
3373da8fa4e3SBjoern A. Zeeb
3374da8fa4e3SBjoern A. Zeeb skb_put(msdu, sizeof(*rx));
3375da8fa4e3SBjoern A. Zeeb skb_pull(msdu, sizeof(*rx));
3376da8fa4e3SBjoern A. Zeeb
3377da8fa4e3SBjoern A. Zeeb if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
3378da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
3379da8fa4e3SBjoern A. Zeeb dev_kfree_skb_any(msdu);
3380da8fa4e3SBjoern A. Zeeb continue;
3381da8fa4e3SBjoern A. Zeeb }
3382da8fa4e3SBjoern A. Zeeb
3383da8fa4e3SBjoern A. Zeeb skb_put(msdu, __le16_to_cpu(rx->msdu_len));
3384da8fa4e3SBjoern A. Zeeb
3385da8fa4e3SBjoern A. Zeeb /* Offloaded rx header length isn't multiple of 2 nor 4 so the
3386da8fa4e3SBjoern A. Zeeb * actual payload is unaligned. Align the frame. Otherwise
3387da8fa4e3SBjoern A. Zeeb * mac80211 complains. This shouldn't reduce performance much
3388da8fa4e3SBjoern A. Zeeb * because these offloaded frames are rare.
3389da8fa4e3SBjoern A. Zeeb */
3390da8fa4e3SBjoern A. Zeeb offset = 4 - ((unsigned long)msdu->data & 3);
3391da8fa4e3SBjoern A. Zeeb skb_put(msdu, offset);
3392da8fa4e3SBjoern A. Zeeb memmove(msdu->data + offset, msdu->data, msdu->len);
3393da8fa4e3SBjoern A. Zeeb skb_pull(msdu, offset);
3394da8fa4e3SBjoern A. Zeeb
3395da8fa4e3SBjoern A. Zeeb /* FIXME: The frame is NWifi. Re-construct QoS Control
3396da8fa4e3SBjoern A. Zeeb * if possible later.
3397da8fa4e3SBjoern A. Zeeb */
3398da8fa4e3SBjoern A. Zeeb
3399da8fa4e3SBjoern A. Zeeb memset(status, 0, sizeof(*status));
3400da8fa4e3SBjoern A. Zeeb status->flag |= RX_FLAG_NO_SIGNAL_VAL;
3401da8fa4e3SBjoern A. Zeeb
3402da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_h_rx_offload_prot(status, msdu);
3403da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
3404da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
3405da8fa4e3SBjoern A. Zeeb }
3406da8fa4e3SBjoern A. Zeeb }
3407da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_in_ord_ind(struct ath10k * ar,struct sk_buff * skb)3408da8fa4e3SBjoern A. Zeeb static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
3409da8fa4e3SBjoern A. Zeeb {
3410da8fa4e3SBjoern A. Zeeb struct ath10k_htt *htt = &ar->htt;
3411da8fa4e3SBjoern A. Zeeb struct htt_resp *resp = (void *)skb->data;
3412da8fa4e3SBjoern A. Zeeb struct ieee80211_rx_status *status = &htt->rx_status;
3413da8fa4e3SBjoern A. Zeeb struct sk_buff_head list;
3414da8fa4e3SBjoern A. Zeeb struct sk_buff_head amsdu;
3415da8fa4e3SBjoern A. Zeeb u16 peer_id;
3416da8fa4e3SBjoern A. Zeeb u16 msdu_count;
3417da8fa4e3SBjoern A. Zeeb u8 vdev_id;
3418da8fa4e3SBjoern A. Zeeb u8 tid;
3419da8fa4e3SBjoern A. Zeeb bool offload;
3420da8fa4e3SBjoern A. Zeeb bool frag;
3421da8fa4e3SBjoern A. Zeeb int ret;
3422da8fa4e3SBjoern A. Zeeb
3423da8fa4e3SBjoern A. Zeeb lockdep_assert_held(&htt->rx_ring.lock);
3424da8fa4e3SBjoern A. Zeeb
3425da8fa4e3SBjoern A. Zeeb if (htt->rx_confused)
3426da8fa4e3SBjoern A. Zeeb return -EIO;
3427da8fa4e3SBjoern A. Zeeb
3428da8fa4e3SBjoern A. Zeeb skb_pull(skb, sizeof(resp->hdr));
3429da8fa4e3SBjoern A. Zeeb skb_pull(skb, sizeof(resp->rx_in_ord_ind));
3430da8fa4e3SBjoern A. Zeeb
3431da8fa4e3SBjoern A. Zeeb peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
3432da8fa4e3SBjoern A. Zeeb msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
3433da8fa4e3SBjoern A. Zeeb vdev_id = resp->rx_in_ord_ind.vdev_id;
3434da8fa4e3SBjoern A. Zeeb tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
3435da8fa4e3SBjoern A. Zeeb offload = !!(resp->rx_in_ord_ind.info &
3436da8fa4e3SBjoern A. Zeeb HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
3437da8fa4e3SBjoern A. Zeeb frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
3438da8fa4e3SBjoern A. Zeeb
3439da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_HTT,
3440da8fa4e3SBjoern A. Zeeb "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
3441da8fa4e3SBjoern A. Zeeb vdev_id, peer_id, tid, offload, frag, msdu_count);
3442da8fa4e3SBjoern A. Zeeb
3443da8fa4e3SBjoern A. Zeeb if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) {
3444da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "dropping invalid in order rx indication\n");
3445da8fa4e3SBjoern A. Zeeb return -EINVAL;
3446da8fa4e3SBjoern A. Zeeb }
3447da8fa4e3SBjoern A. Zeeb
3448da8fa4e3SBjoern A. Zeeb /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
3449da8fa4e3SBjoern A. Zeeb * extracted and processed.
3450da8fa4e3SBjoern A. Zeeb */
3451da8fa4e3SBjoern A. Zeeb __skb_queue_head_init(&list);
3452da8fa4e3SBjoern A. Zeeb if (ar->hw_params.target_64bit)
3453da8fa4e3SBjoern A. Zeeb ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind,
3454da8fa4e3SBjoern A. Zeeb &list);
3455da8fa4e3SBjoern A. Zeeb else
3456da8fa4e3SBjoern A. Zeeb ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind,
3457da8fa4e3SBjoern A. Zeeb &list);
3458da8fa4e3SBjoern A. Zeeb
3459da8fa4e3SBjoern A. Zeeb if (ret < 0) {
3460da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
3461da8fa4e3SBjoern A. Zeeb htt->rx_confused = true;
3462da8fa4e3SBjoern A. Zeeb return -EIO;
3463da8fa4e3SBjoern A. Zeeb }
3464da8fa4e3SBjoern A. Zeeb
3465da8fa4e3SBjoern A. Zeeb /* Offloaded frames are very different and need to be handled
3466da8fa4e3SBjoern A. Zeeb * separately.
3467da8fa4e3SBjoern A. Zeeb */
3468da8fa4e3SBjoern A. Zeeb if (offload)
3469da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_h_rx_offload(ar, &list);
3470da8fa4e3SBjoern A. Zeeb
3471da8fa4e3SBjoern A. Zeeb while (!skb_queue_empty(&list)) {
3472da8fa4e3SBjoern A. Zeeb __skb_queue_head_init(&amsdu);
3473da8fa4e3SBjoern A. Zeeb ret = ath10k_htt_rx_extract_amsdu(&ar->hw_params, &list, &amsdu);
3474da8fa4e3SBjoern A. Zeeb switch (ret) {
3475da8fa4e3SBjoern A. Zeeb case 0:
3476da8fa4e3SBjoern A. Zeeb /* Note: The in-order indication may report interleaved
3477da8fa4e3SBjoern A. Zeeb * frames from different PPDUs meaning reported rx rate
3478da8fa4e3SBjoern A. Zeeb * to mac80211 isn't accurate/reliable. It's still
3479da8fa4e3SBjoern A. Zeeb * better to report something than nothing though. This
3480da8fa4e3SBjoern A. Zeeb * should still give an idea about rx rate to the user.
3481da8fa4e3SBjoern A. Zeeb */
3482da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
3483da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL);
3484da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL,
3485da8fa4e3SBjoern A. Zeeb NULL, peer_id, frag);
3486da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
3487da8fa4e3SBjoern A. Zeeb break;
3488da8fa4e3SBjoern A. Zeeb case -EAGAIN:
3489da8fa4e3SBjoern A. Zeeb fallthrough;
3490da8fa4e3SBjoern A. Zeeb default:
3491da8fa4e3SBjoern A. Zeeb /* Should not happen. */
3492da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
3493da8fa4e3SBjoern A. Zeeb htt->rx_confused = true;
3494da8fa4e3SBjoern A. Zeeb __skb_queue_purge(&list);
3495da8fa4e3SBjoern A. Zeeb return -EIO;
3496da8fa4e3SBjoern A. Zeeb }
3497da8fa4e3SBjoern A. Zeeb }
3498da8fa4e3SBjoern A. Zeeb return ret;
3499da8fa4e3SBjoern A. Zeeb }
3500da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k * ar,const __le32 * resp_ids,int num_resp_ids)3501da8fa4e3SBjoern A. Zeeb static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
3502da8fa4e3SBjoern A. Zeeb const __le32 *resp_ids,
3503da8fa4e3SBjoern A. Zeeb int num_resp_ids)
3504da8fa4e3SBjoern A. Zeeb {
3505da8fa4e3SBjoern A. Zeeb int i;
3506da8fa4e3SBjoern A. Zeeb u32 resp_id;
3507da8fa4e3SBjoern A. Zeeb
3508da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
3509da8fa4e3SBjoern A. Zeeb num_resp_ids);
3510da8fa4e3SBjoern A. Zeeb
3511da8fa4e3SBjoern A. Zeeb for (i = 0; i < num_resp_ids; i++) {
3512da8fa4e3SBjoern A. Zeeb resp_id = le32_to_cpu(resp_ids[i]);
3513da8fa4e3SBjoern A. Zeeb
3514da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
3515da8fa4e3SBjoern A. Zeeb resp_id);
3516da8fa4e3SBjoern A. Zeeb
3517da8fa4e3SBjoern A. Zeeb /* TODO: free resp_id */
3518da8fa4e3SBjoern A. Zeeb }
3519da8fa4e3SBjoern A. Zeeb }
3520da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_tx_fetch_ind(struct ath10k * ar,struct sk_buff * skb)3521da8fa4e3SBjoern A. Zeeb static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
3522da8fa4e3SBjoern A. Zeeb {
3523da8fa4e3SBjoern A. Zeeb struct ieee80211_hw *hw = ar->hw;
3524da8fa4e3SBjoern A. Zeeb struct ieee80211_txq *txq;
3525da8fa4e3SBjoern A. Zeeb struct htt_resp *resp = (struct htt_resp *)skb->data;
3526da8fa4e3SBjoern A. Zeeb struct htt_tx_fetch_record *record;
3527da8fa4e3SBjoern A. Zeeb size_t len;
3528da8fa4e3SBjoern A. Zeeb size_t max_num_bytes;
3529da8fa4e3SBjoern A. Zeeb size_t max_num_msdus;
3530da8fa4e3SBjoern A. Zeeb size_t num_bytes;
3531da8fa4e3SBjoern A. Zeeb size_t num_msdus;
3532da8fa4e3SBjoern A. Zeeb const __le32 *resp_ids;
3533da8fa4e3SBjoern A. Zeeb u16 num_records;
3534da8fa4e3SBjoern A. Zeeb u16 num_resp_ids;
3535da8fa4e3SBjoern A. Zeeb u16 peer_id;
3536da8fa4e3SBjoern A. Zeeb u8 tid;
3537da8fa4e3SBjoern A. Zeeb int ret;
3538da8fa4e3SBjoern A. Zeeb int i;
3539da8fa4e3SBjoern A. Zeeb bool may_tx;
3540da8fa4e3SBjoern A. Zeeb
3541da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
3542da8fa4e3SBjoern A. Zeeb
3543da8fa4e3SBjoern A. Zeeb len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
3544da8fa4e3SBjoern A. Zeeb if (unlikely(skb->len < len)) {
3545da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
3546da8fa4e3SBjoern A. Zeeb return;
3547da8fa4e3SBjoern A. Zeeb }
3548da8fa4e3SBjoern A. Zeeb
3549da8fa4e3SBjoern A. Zeeb num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
3550da8fa4e3SBjoern A. Zeeb num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
3551da8fa4e3SBjoern A. Zeeb
3552da8fa4e3SBjoern A. Zeeb len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
3553da8fa4e3SBjoern A. Zeeb len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
3554da8fa4e3SBjoern A. Zeeb
3555da8fa4e3SBjoern A. Zeeb if (unlikely(skb->len < len)) {
3556da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
3557da8fa4e3SBjoern A. Zeeb return;
3558da8fa4e3SBjoern A. Zeeb }
3559da8fa4e3SBjoern A. Zeeb
3560da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %u num resps %u seq %u\n",
3561da8fa4e3SBjoern A. Zeeb num_records, num_resp_ids,
3562da8fa4e3SBjoern A. Zeeb le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
3563da8fa4e3SBjoern A. Zeeb
3564da8fa4e3SBjoern A. Zeeb if (!ar->htt.tx_q_state.enabled) {
3565da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
3566da8fa4e3SBjoern A. Zeeb return;
3567da8fa4e3SBjoern A. Zeeb }
3568da8fa4e3SBjoern A. Zeeb
3569da8fa4e3SBjoern A. Zeeb if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
3570da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
3571da8fa4e3SBjoern A. Zeeb return;
3572da8fa4e3SBjoern A. Zeeb }
3573da8fa4e3SBjoern A. Zeeb
3574da8fa4e3SBjoern A. Zeeb rcu_read_lock();
3575da8fa4e3SBjoern A. Zeeb
3576da8fa4e3SBjoern A. Zeeb for (i = 0; i < num_records; i++) {
3577da8fa4e3SBjoern A. Zeeb record = &resp->tx_fetch_ind.records[i];
3578da8fa4e3SBjoern A. Zeeb peer_id = MS(le16_to_cpu(record->info),
3579da8fa4e3SBjoern A. Zeeb HTT_TX_FETCH_RECORD_INFO_PEER_ID);
3580da8fa4e3SBjoern A. Zeeb tid = MS(le16_to_cpu(record->info),
3581da8fa4e3SBjoern A. Zeeb HTT_TX_FETCH_RECORD_INFO_TID);
3582da8fa4e3SBjoern A. Zeeb max_num_msdus = le16_to_cpu(record->num_msdus);
3583da8fa4e3SBjoern A. Zeeb max_num_bytes = le32_to_cpu(record->num_bytes);
3584da8fa4e3SBjoern A. Zeeb
3585da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %u tid %u msdus %zu bytes %zu\n",
3586da8fa4e3SBjoern A. Zeeb i, peer_id, tid, max_num_msdus, max_num_bytes);
3587da8fa4e3SBjoern A. Zeeb
3588da8fa4e3SBjoern A. Zeeb if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
3589da8fa4e3SBjoern A. Zeeb unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
3590da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "received out of range peer_id %u tid %u\n",
3591da8fa4e3SBjoern A. Zeeb peer_id, tid);
3592da8fa4e3SBjoern A. Zeeb continue;
3593da8fa4e3SBjoern A. Zeeb }
3594da8fa4e3SBjoern A. Zeeb
3595da8fa4e3SBjoern A. Zeeb spin_lock_bh(&ar->data_lock);
3596da8fa4e3SBjoern A. Zeeb txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
3597da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ar->data_lock);
3598da8fa4e3SBjoern A. Zeeb
3599da8fa4e3SBjoern A. Zeeb /* It is okay to release the lock and use txq because RCU read
3600da8fa4e3SBjoern A. Zeeb * lock is held.
3601da8fa4e3SBjoern A. Zeeb */
3602da8fa4e3SBjoern A. Zeeb
3603da8fa4e3SBjoern A. Zeeb if (unlikely(!txq)) {
3604da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to lookup txq for peer_id %u tid %u\n",
3605da8fa4e3SBjoern A. Zeeb peer_id, tid);
3606da8fa4e3SBjoern A. Zeeb continue;
3607da8fa4e3SBjoern A. Zeeb }
3608da8fa4e3SBjoern A. Zeeb
3609da8fa4e3SBjoern A. Zeeb num_msdus = 0;
3610da8fa4e3SBjoern A. Zeeb num_bytes = 0;
3611da8fa4e3SBjoern A. Zeeb
3612da8fa4e3SBjoern A. Zeeb ieee80211_txq_schedule_start(hw, txq->ac);
3613da8fa4e3SBjoern A. Zeeb may_tx = ieee80211_txq_may_transmit(hw, txq);
3614da8fa4e3SBjoern A. Zeeb while (num_msdus < max_num_msdus &&
3615da8fa4e3SBjoern A. Zeeb num_bytes < max_num_bytes) {
3616da8fa4e3SBjoern A. Zeeb if (!may_tx)
3617da8fa4e3SBjoern A. Zeeb break;
3618da8fa4e3SBjoern A. Zeeb
3619da8fa4e3SBjoern A. Zeeb ret = ath10k_mac_tx_push_txq(hw, txq);
3620da8fa4e3SBjoern A. Zeeb if (ret < 0)
3621da8fa4e3SBjoern A. Zeeb break;
3622da8fa4e3SBjoern A. Zeeb
3623da8fa4e3SBjoern A. Zeeb num_msdus++;
3624da8fa4e3SBjoern A. Zeeb num_bytes += ret;
3625da8fa4e3SBjoern A. Zeeb }
3626da8fa4e3SBjoern A. Zeeb ieee80211_return_txq(hw, txq, false);
3627da8fa4e3SBjoern A. Zeeb ieee80211_txq_schedule_end(hw, txq->ac);
3628da8fa4e3SBjoern A. Zeeb
3629da8fa4e3SBjoern A. Zeeb record->num_msdus = cpu_to_le16(num_msdus);
3630da8fa4e3SBjoern A. Zeeb record->num_bytes = cpu_to_le32(num_bytes);
3631da8fa4e3SBjoern A. Zeeb
3632da8fa4e3SBjoern A. Zeeb ath10k_htt_tx_txq_recalc(hw, txq);
3633da8fa4e3SBjoern A. Zeeb }
3634da8fa4e3SBjoern A. Zeeb
3635da8fa4e3SBjoern A. Zeeb rcu_read_unlock();
3636da8fa4e3SBjoern A. Zeeb
3637da8fa4e3SBjoern A. Zeeb resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
3638da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
3639da8fa4e3SBjoern A. Zeeb
3640da8fa4e3SBjoern A. Zeeb ret = ath10k_htt_tx_fetch_resp(ar,
3641da8fa4e3SBjoern A. Zeeb resp->tx_fetch_ind.token,
3642da8fa4e3SBjoern A. Zeeb resp->tx_fetch_ind.fetch_seq_num,
3643da8fa4e3SBjoern A. Zeeb resp->tx_fetch_ind.records,
3644da8fa4e3SBjoern A. Zeeb num_records);
3645da8fa4e3SBjoern A. Zeeb if (unlikely(ret)) {
3646da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
3647da8fa4e3SBjoern A. Zeeb le32_to_cpu(resp->tx_fetch_ind.token), ret);
3648da8fa4e3SBjoern A. Zeeb /* FIXME: request fw restart */
3649da8fa4e3SBjoern A. Zeeb }
3650da8fa4e3SBjoern A. Zeeb
3651da8fa4e3SBjoern A. Zeeb ath10k_htt_tx_txq_sync(ar);
3652da8fa4e3SBjoern A. Zeeb }
3653da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_tx_fetch_confirm(struct ath10k * ar,struct sk_buff * skb)3654da8fa4e3SBjoern A. Zeeb static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
3655da8fa4e3SBjoern A. Zeeb struct sk_buff *skb)
3656da8fa4e3SBjoern A. Zeeb {
3657da8fa4e3SBjoern A. Zeeb const struct htt_resp *resp = (void *)skb->data;
3658da8fa4e3SBjoern A. Zeeb size_t len;
3659da8fa4e3SBjoern A. Zeeb int num_resp_ids;
3660da8fa4e3SBjoern A. Zeeb
3661da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
3662da8fa4e3SBjoern A. Zeeb
3663da8fa4e3SBjoern A. Zeeb len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
3664da8fa4e3SBjoern A. Zeeb if (unlikely(skb->len < len)) {
3665da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
3666da8fa4e3SBjoern A. Zeeb return;
3667da8fa4e3SBjoern A. Zeeb }
3668da8fa4e3SBjoern A. Zeeb
3669da8fa4e3SBjoern A. Zeeb num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
3670da8fa4e3SBjoern A. Zeeb len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
3671da8fa4e3SBjoern A. Zeeb
3672da8fa4e3SBjoern A. Zeeb if (unlikely(skb->len < len)) {
3673da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
3674da8fa4e3SBjoern A. Zeeb return;
3675da8fa4e3SBjoern A. Zeeb }
3676da8fa4e3SBjoern A. Zeeb
3677da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
3678da8fa4e3SBjoern A. Zeeb resp->tx_fetch_confirm.resp_ids,
3679da8fa4e3SBjoern A. Zeeb num_resp_ids);
3680da8fa4e3SBjoern A. Zeeb }
3681da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_tx_mode_switch_ind(struct ath10k * ar,struct sk_buff * skb)3682da8fa4e3SBjoern A. Zeeb static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
3683da8fa4e3SBjoern A. Zeeb struct sk_buff *skb)
3684da8fa4e3SBjoern A. Zeeb {
3685da8fa4e3SBjoern A. Zeeb const struct htt_resp *resp = (void *)skb->data;
3686da8fa4e3SBjoern A. Zeeb const struct htt_tx_mode_switch_record *record;
3687da8fa4e3SBjoern A. Zeeb struct ieee80211_txq *txq;
3688da8fa4e3SBjoern A. Zeeb struct ath10k_txq *artxq;
3689da8fa4e3SBjoern A. Zeeb size_t len;
3690da8fa4e3SBjoern A. Zeeb size_t num_records;
3691da8fa4e3SBjoern A. Zeeb enum htt_tx_mode_switch_mode mode;
3692da8fa4e3SBjoern A. Zeeb bool enable;
3693da8fa4e3SBjoern A. Zeeb u16 info0;
3694da8fa4e3SBjoern A. Zeeb u16 info1;
3695da8fa4e3SBjoern A. Zeeb u16 threshold;
3696da8fa4e3SBjoern A. Zeeb u16 peer_id;
3697da8fa4e3SBjoern A. Zeeb u8 tid;
3698da8fa4e3SBjoern A. Zeeb int i;
3699da8fa4e3SBjoern A. Zeeb
3700da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
3701da8fa4e3SBjoern A. Zeeb
3702da8fa4e3SBjoern A. Zeeb len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
3703da8fa4e3SBjoern A. Zeeb if (unlikely(skb->len < len)) {
3704da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
3705da8fa4e3SBjoern A. Zeeb return;
3706da8fa4e3SBjoern A. Zeeb }
3707da8fa4e3SBjoern A. Zeeb
3708da8fa4e3SBjoern A. Zeeb info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
3709da8fa4e3SBjoern A. Zeeb info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
3710da8fa4e3SBjoern A. Zeeb
3711da8fa4e3SBjoern A. Zeeb enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
3712da8fa4e3SBjoern A. Zeeb num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
3713da8fa4e3SBjoern A. Zeeb mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
3714da8fa4e3SBjoern A. Zeeb threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
3715da8fa4e3SBjoern A. Zeeb
3716da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_HTT,
3717*07724ba6SBjoern A. Zeeb "htt rx tx mode switch ind info0 0x%04x info1 0x%04x enable %d num records %zd mode %d threshold %u\n",
3718da8fa4e3SBjoern A. Zeeb info0, info1, enable, num_records, mode, threshold);
3719da8fa4e3SBjoern A. Zeeb
3720da8fa4e3SBjoern A. Zeeb len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
3721da8fa4e3SBjoern A. Zeeb
3722da8fa4e3SBjoern A. Zeeb if (unlikely(skb->len < len)) {
3723da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
3724da8fa4e3SBjoern A. Zeeb return;
3725da8fa4e3SBjoern A. Zeeb }
3726da8fa4e3SBjoern A. Zeeb
3727da8fa4e3SBjoern A. Zeeb switch (mode) {
3728da8fa4e3SBjoern A. Zeeb case HTT_TX_MODE_SWITCH_PUSH:
3729da8fa4e3SBjoern A. Zeeb case HTT_TX_MODE_SWITCH_PUSH_PULL:
3730da8fa4e3SBjoern A. Zeeb break;
3731da8fa4e3SBjoern A. Zeeb default:
3732da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
3733da8fa4e3SBjoern A. Zeeb mode);
3734da8fa4e3SBjoern A. Zeeb return;
3735da8fa4e3SBjoern A. Zeeb }
3736da8fa4e3SBjoern A. Zeeb
3737da8fa4e3SBjoern A. Zeeb if (!enable)
3738da8fa4e3SBjoern A. Zeeb return;
3739da8fa4e3SBjoern A. Zeeb
3740da8fa4e3SBjoern A. Zeeb ar->htt.tx_q_state.enabled = enable;
3741da8fa4e3SBjoern A. Zeeb ar->htt.tx_q_state.mode = mode;
3742da8fa4e3SBjoern A. Zeeb ar->htt.tx_q_state.num_push_allowed = threshold;
3743da8fa4e3SBjoern A. Zeeb
3744da8fa4e3SBjoern A. Zeeb rcu_read_lock();
3745da8fa4e3SBjoern A. Zeeb
3746da8fa4e3SBjoern A. Zeeb for (i = 0; i < num_records; i++) {
3747da8fa4e3SBjoern A. Zeeb record = &resp->tx_mode_switch_ind.records[i];
3748da8fa4e3SBjoern A. Zeeb info0 = le16_to_cpu(record->info0);
3749da8fa4e3SBjoern A. Zeeb peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
3750da8fa4e3SBjoern A. Zeeb tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
3751da8fa4e3SBjoern A. Zeeb
3752da8fa4e3SBjoern A. Zeeb if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
3753da8fa4e3SBjoern A. Zeeb unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
3754da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "received out of range peer_id %u tid %u\n",
3755da8fa4e3SBjoern A. Zeeb peer_id, tid);
3756da8fa4e3SBjoern A. Zeeb continue;
3757da8fa4e3SBjoern A. Zeeb }
3758da8fa4e3SBjoern A. Zeeb
3759da8fa4e3SBjoern A. Zeeb spin_lock_bh(&ar->data_lock);
3760da8fa4e3SBjoern A. Zeeb txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
3761da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ar->data_lock);
3762da8fa4e3SBjoern A. Zeeb
3763da8fa4e3SBjoern A. Zeeb /* It is okay to release the lock and use txq because RCU read
3764da8fa4e3SBjoern A. Zeeb * lock is held.
3765da8fa4e3SBjoern A. Zeeb */
3766da8fa4e3SBjoern A. Zeeb
3767da8fa4e3SBjoern A. Zeeb if (unlikely(!txq)) {
3768da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to lookup txq for peer_id %u tid %u\n",
3769da8fa4e3SBjoern A. Zeeb peer_id, tid);
3770da8fa4e3SBjoern A. Zeeb continue;
3771da8fa4e3SBjoern A. Zeeb }
3772da8fa4e3SBjoern A. Zeeb
3773da8fa4e3SBjoern A. Zeeb spin_lock_bh(&ar->htt.tx_lock);
3774da8fa4e3SBjoern A. Zeeb artxq = (void *)txq->drv_priv;
3775da8fa4e3SBjoern A. Zeeb artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
3776da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ar->htt.tx_lock);
3777da8fa4e3SBjoern A. Zeeb }
3778da8fa4e3SBjoern A. Zeeb
3779da8fa4e3SBjoern A. Zeeb rcu_read_unlock();
3780da8fa4e3SBjoern A. Zeeb
3781da8fa4e3SBjoern A. Zeeb ath10k_mac_tx_push_pending(ar);
3782da8fa4e3SBjoern A. Zeeb }
3783da8fa4e3SBjoern A. Zeeb
ath10k_htt_htc_t2h_msg_handler(struct ath10k * ar,struct sk_buff * skb)3784da8fa4e3SBjoern A. Zeeb void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
3785da8fa4e3SBjoern A. Zeeb {
3786da8fa4e3SBjoern A. Zeeb bool release;
3787da8fa4e3SBjoern A. Zeeb
3788da8fa4e3SBjoern A. Zeeb release = ath10k_htt_t2h_msg_handler(ar, skb);
3789da8fa4e3SBjoern A. Zeeb
3790da8fa4e3SBjoern A. Zeeb /* Free the indication buffer */
3791da8fa4e3SBjoern A. Zeeb if (release)
3792da8fa4e3SBjoern A. Zeeb dev_kfree_skb_any(skb);
3793da8fa4e3SBjoern A. Zeeb }
3794da8fa4e3SBjoern A. Zeeb
ath10k_get_legacy_rate_idx(struct ath10k * ar,u8 rate)3795da8fa4e3SBjoern A. Zeeb static inline s8 ath10k_get_legacy_rate_idx(struct ath10k *ar, u8 rate)
3796da8fa4e3SBjoern A. Zeeb {
3797da8fa4e3SBjoern A. Zeeb static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12,
3798da8fa4e3SBjoern A. Zeeb 18, 24, 36, 48, 54};
3799da8fa4e3SBjoern A. Zeeb int i;
3800da8fa4e3SBjoern A. Zeeb
3801da8fa4e3SBjoern A. Zeeb for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) {
3802da8fa4e3SBjoern A. Zeeb if (rate == legacy_rates[i])
3803da8fa4e3SBjoern A. Zeeb return i;
3804da8fa4e3SBjoern A. Zeeb }
3805da8fa4e3SBjoern A. Zeeb
3806da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "Invalid legacy rate %d peer stats", rate);
3807da8fa4e3SBjoern A. Zeeb return -EINVAL;
3808da8fa4e3SBjoern A. Zeeb }
3809da8fa4e3SBjoern A. Zeeb
3810da8fa4e3SBjoern A. Zeeb static void
ath10k_accumulate_per_peer_tx_stats(struct ath10k * ar,struct ath10k_sta * arsta,struct ath10k_per_peer_tx_stats * pstats,s8 legacy_rate_idx)3811da8fa4e3SBjoern A. Zeeb ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
3812da8fa4e3SBjoern A. Zeeb struct ath10k_sta *arsta,
3813da8fa4e3SBjoern A. Zeeb struct ath10k_per_peer_tx_stats *pstats,
3814da8fa4e3SBjoern A. Zeeb s8 legacy_rate_idx)
3815da8fa4e3SBjoern A. Zeeb {
3816da8fa4e3SBjoern A. Zeeb struct rate_info *txrate = &arsta->txrate;
3817da8fa4e3SBjoern A. Zeeb struct ath10k_htt_tx_stats *tx_stats;
3818da8fa4e3SBjoern A. Zeeb int idx, ht_idx, gi, mcs, bw, nss;
3819da8fa4e3SBjoern A. Zeeb unsigned long flags;
3820da8fa4e3SBjoern A. Zeeb
3821da8fa4e3SBjoern A. Zeeb if (!arsta->tx_stats)
3822da8fa4e3SBjoern A. Zeeb return;
3823da8fa4e3SBjoern A. Zeeb
3824da8fa4e3SBjoern A. Zeeb tx_stats = arsta->tx_stats;
3825da8fa4e3SBjoern A. Zeeb flags = txrate->flags;
3826da8fa4e3SBjoern A. Zeeb gi = test_bit(ATH10K_RATE_INFO_FLAGS_SGI_BIT, &flags);
3827da8fa4e3SBjoern A. Zeeb mcs = ATH10K_HW_MCS_RATE(pstats->ratecode);
3828da8fa4e3SBjoern A. Zeeb bw = txrate->bw;
3829da8fa4e3SBjoern A. Zeeb nss = txrate->nss;
3830da8fa4e3SBjoern A. Zeeb ht_idx = mcs + (nss - 1) * 8;
3831da8fa4e3SBjoern A. Zeeb idx = mcs * 8 + 8 * 10 * (nss - 1);
3832da8fa4e3SBjoern A. Zeeb idx += bw * 2 + gi;
3833da8fa4e3SBjoern A. Zeeb
3834da8fa4e3SBjoern A. Zeeb #define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name]
3835da8fa4e3SBjoern A. Zeeb
3836da8fa4e3SBjoern A. Zeeb if (txrate->flags & RATE_INFO_FLAGS_VHT_MCS) {
3837da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(SUCC).vht[0][mcs] += pstats->succ_bytes;
3838da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(SUCC).vht[1][mcs] += pstats->succ_pkts;
3839da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(FAIL).vht[0][mcs] += pstats->failed_bytes;
3840da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(FAIL).vht[1][mcs] += pstats->failed_pkts;
3841da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(RETRY).vht[0][mcs] += pstats->retry_bytes;
3842da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(RETRY).vht[1][mcs] += pstats->retry_pkts;
3843da8fa4e3SBjoern A. Zeeb } else if (txrate->flags & RATE_INFO_FLAGS_MCS) {
3844da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(SUCC).ht[0][ht_idx] += pstats->succ_bytes;
3845da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(SUCC).ht[1][ht_idx] += pstats->succ_pkts;
3846da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(FAIL).ht[0][ht_idx] += pstats->failed_bytes;
3847da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(FAIL).ht[1][ht_idx] += pstats->failed_pkts;
3848da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(RETRY).ht[0][ht_idx] += pstats->retry_bytes;
3849da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(RETRY).ht[1][ht_idx] += pstats->retry_pkts;
3850da8fa4e3SBjoern A. Zeeb } else {
3851da8fa4e3SBjoern A. Zeeb mcs = legacy_rate_idx;
3852da8fa4e3SBjoern A. Zeeb
3853da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(SUCC).legacy[0][mcs] += pstats->succ_bytes;
3854da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(SUCC).legacy[1][mcs] += pstats->succ_pkts;
3855da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(FAIL).legacy[0][mcs] += pstats->failed_bytes;
3856da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(FAIL).legacy[1][mcs] += pstats->failed_pkts;
3857da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(RETRY).legacy[0][mcs] += pstats->retry_bytes;
3858da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(RETRY).legacy[1][mcs] += pstats->retry_pkts;
3859da8fa4e3SBjoern A. Zeeb }
3860da8fa4e3SBjoern A. Zeeb
3861da8fa4e3SBjoern A. Zeeb if (ATH10K_HW_AMPDU(pstats->flags)) {
3862da8fa4e3SBjoern A. Zeeb tx_stats->ba_fails += ATH10K_HW_BA_FAIL(pstats->flags);
3863da8fa4e3SBjoern A. Zeeb
3864da8fa4e3SBjoern A. Zeeb if (txrate->flags & RATE_INFO_FLAGS_MCS) {
3865da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(AMPDU).ht[0][ht_idx] +=
3866da8fa4e3SBjoern A. Zeeb pstats->succ_bytes + pstats->retry_bytes;
3867da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(AMPDU).ht[1][ht_idx] +=
3868da8fa4e3SBjoern A. Zeeb pstats->succ_pkts + pstats->retry_pkts;
3869da8fa4e3SBjoern A. Zeeb } else {
3870da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(AMPDU).vht[0][mcs] +=
3871da8fa4e3SBjoern A. Zeeb pstats->succ_bytes + pstats->retry_bytes;
3872da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(AMPDU).vht[1][mcs] +=
3873da8fa4e3SBjoern A. Zeeb pstats->succ_pkts + pstats->retry_pkts;
3874da8fa4e3SBjoern A. Zeeb }
3875da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(AMPDU).bw[0][bw] +=
3876da8fa4e3SBjoern A. Zeeb pstats->succ_bytes + pstats->retry_bytes;
3877da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(AMPDU).nss[0][nss - 1] +=
3878da8fa4e3SBjoern A. Zeeb pstats->succ_bytes + pstats->retry_bytes;
3879da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(AMPDU).gi[0][gi] +=
3880da8fa4e3SBjoern A. Zeeb pstats->succ_bytes + pstats->retry_bytes;
3881da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(AMPDU).rate_table[0][idx] +=
3882da8fa4e3SBjoern A. Zeeb pstats->succ_bytes + pstats->retry_bytes;
3883da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(AMPDU).bw[1][bw] +=
3884da8fa4e3SBjoern A. Zeeb pstats->succ_pkts + pstats->retry_pkts;
3885da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(AMPDU).nss[1][nss - 1] +=
3886da8fa4e3SBjoern A. Zeeb pstats->succ_pkts + pstats->retry_pkts;
3887da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(AMPDU).gi[1][gi] +=
3888da8fa4e3SBjoern A. Zeeb pstats->succ_pkts + pstats->retry_pkts;
3889da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(AMPDU).rate_table[1][idx] +=
3890da8fa4e3SBjoern A. Zeeb pstats->succ_pkts + pstats->retry_pkts;
3891da8fa4e3SBjoern A. Zeeb } else {
3892da8fa4e3SBjoern A. Zeeb tx_stats->ack_fails +=
3893da8fa4e3SBjoern A. Zeeb ATH10K_HW_BA_FAIL(pstats->flags);
3894da8fa4e3SBjoern A. Zeeb }
3895da8fa4e3SBjoern A. Zeeb
3896da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(SUCC).bw[0][bw] += pstats->succ_bytes;
3897da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(SUCC).nss[0][nss - 1] += pstats->succ_bytes;
3898da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(SUCC).gi[0][gi] += pstats->succ_bytes;
3899da8fa4e3SBjoern A. Zeeb
3900da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(SUCC).bw[1][bw] += pstats->succ_pkts;
3901da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(SUCC).nss[1][nss - 1] += pstats->succ_pkts;
3902da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(SUCC).gi[1][gi] += pstats->succ_pkts;
3903da8fa4e3SBjoern A. Zeeb
3904da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(FAIL).bw[0][bw] += pstats->failed_bytes;
3905da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(FAIL).nss[0][nss - 1] += pstats->failed_bytes;
3906da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(FAIL).gi[0][gi] += pstats->failed_bytes;
3907da8fa4e3SBjoern A. Zeeb
3908da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(FAIL).bw[1][bw] += pstats->failed_pkts;
3909da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(FAIL).nss[1][nss - 1] += pstats->failed_pkts;
3910da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(FAIL).gi[1][gi] += pstats->failed_pkts;
3911da8fa4e3SBjoern A. Zeeb
3912da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(RETRY).bw[0][bw] += pstats->retry_bytes;
3913da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(RETRY).nss[0][nss - 1] += pstats->retry_bytes;
3914da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(RETRY).gi[0][gi] += pstats->retry_bytes;
3915da8fa4e3SBjoern A. Zeeb
3916da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(RETRY).bw[1][bw] += pstats->retry_pkts;
3917da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(RETRY).nss[1][nss - 1] += pstats->retry_pkts;
3918da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(RETRY).gi[1][gi] += pstats->retry_pkts;
3919da8fa4e3SBjoern A. Zeeb
3920da8fa4e3SBjoern A. Zeeb if (txrate->flags >= RATE_INFO_FLAGS_MCS) {
3921da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(SUCC).rate_table[0][idx] += pstats->succ_bytes;
3922da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(SUCC).rate_table[1][idx] += pstats->succ_pkts;
3923da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(FAIL).rate_table[0][idx] += pstats->failed_bytes;
3924da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(FAIL).rate_table[1][idx] += pstats->failed_pkts;
3925da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(RETRY).rate_table[0][idx] += pstats->retry_bytes;
3926da8fa4e3SBjoern A. Zeeb STATS_OP_FMT(RETRY).rate_table[1][idx] += pstats->retry_pkts;
3927da8fa4e3SBjoern A. Zeeb }
3928da8fa4e3SBjoern A. Zeeb
3929da8fa4e3SBjoern A. Zeeb tx_stats->tx_duration += pstats->duration;
3930da8fa4e3SBjoern A. Zeeb }
3931da8fa4e3SBjoern A. Zeeb
3932da8fa4e3SBjoern A. Zeeb static void
ath10k_update_per_peer_tx_stats(struct ath10k * ar,struct ieee80211_sta * sta,struct ath10k_per_peer_tx_stats * peer_stats)3933da8fa4e3SBjoern A. Zeeb ath10k_update_per_peer_tx_stats(struct ath10k *ar,
3934da8fa4e3SBjoern A. Zeeb struct ieee80211_sta *sta,
3935da8fa4e3SBjoern A. Zeeb struct ath10k_per_peer_tx_stats *peer_stats)
3936da8fa4e3SBjoern A. Zeeb {
3937da8fa4e3SBjoern A. Zeeb struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
3938da8fa4e3SBjoern A. Zeeb struct ieee80211_chanctx_conf *conf = NULL;
3939da8fa4e3SBjoern A. Zeeb u8 rate = 0, sgi;
3940da8fa4e3SBjoern A. Zeeb s8 rate_idx = 0;
3941da8fa4e3SBjoern A. Zeeb bool skip_auto_rate;
3942da8fa4e3SBjoern A. Zeeb struct rate_info txrate;
3943da8fa4e3SBjoern A. Zeeb
3944da8fa4e3SBjoern A. Zeeb lockdep_assert_held(&ar->data_lock);
3945da8fa4e3SBjoern A. Zeeb
3946da8fa4e3SBjoern A. Zeeb txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode);
3947da8fa4e3SBjoern A. Zeeb txrate.bw = ATH10K_HW_BW(peer_stats->flags);
3948da8fa4e3SBjoern A. Zeeb txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode);
3949da8fa4e3SBjoern A. Zeeb txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode);
3950da8fa4e3SBjoern A. Zeeb sgi = ATH10K_HW_GI(peer_stats->flags);
3951da8fa4e3SBjoern A. Zeeb skip_auto_rate = ATH10K_FW_SKIPPED_RATE_CTRL(peer_stats->flags);
3952da8fa4e3SBjoern A. Zeeb
3953da8fa4e3SBjoern A. Zeeb /* Firmware's rate control skips broadcast/management frames,
3954da8fa4e3SBjoern A. Zeeb * if host has configure fixed rates and in some other special cases.
3955da8fa4e3SBjoern A. Zeeb */
3956da8fa4e3SBjoern A. Zeeb if (skip_auto_rate)
3957da8fa4e3SBjoern A. Zeeb return;
3958da8fa4e3SBjoern A. Zeeb
3959da8fa4e3SBjoern A. Zeeb if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) {
3960da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "Invalid VHT mcs %d peer stats", txrate.mcs);
3961da8fa4e3SBjoern A. Zeeb return;
3962da8fa4e3SBjoern A. Zeeb }
3963da8fa4e3SBjoern A. Zeeb
3964da8fa4e3SBjoern A. Zeeb if (txrate.flags == WMI_RATE_PREAMBLE_HT &&
3965da8fa4e3SBjoern A. Zeeb (txrate.mcs > 7 || txrate.nss < 1)) {
3966da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "Invalid HT mcs %d nss %d peer stats",
3967da8fa4e3SBjoern A. Zeeb txrate.mcs, txrate.nss);
3968da8fa4e3SBjoern A. Zeeb return;
3969da8fa4e3SBjoern A. Zeeb }
3970da8fa4e3SBjoern A. Zeeb
3971da8fa4e3SBjoern A. Zeeb memset(&arsta->txrate, 0, sizeof(arsta->txrate));
3972da8fa4e3SBjoern A. Zeeb memset(&arsta->tx_info.status, 0, sizeof(arsta->tx_info.status));
3973da8fa4e3SBjoern A. Zeeb if (txrate.flags == WMI_RATE_PREAMBLE_CCK ||
3974da8fa4e3SBjoern A. Zeeb txrate.flags == WMI_RATE_PREAMBLE_OFDM) {
3975da8fa4e3SBjoern A. Zeeb rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode);
3976da8fa4e3SBjoern A. Zeeb /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
3977da8fa4e3SBjoern A. Zeeb if (rate == 6 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
3978da8fa4e3SBjoern A. Zeeb rate = 5;
3979da8fa4e3SBjoern A. Zeeb rate_idx = ath10k_get_legacy_rate_idx(ar, rate);
3980da8fa4e3SBjoern A. Zeeb if (rate_idx < 0)
3981da8fa4e3SBjoern A. Zeeb return;
3982da8fa4e3SBjoern A. Zeeb arsta->txrate.legacy = rate;
3983da8fa4e3SBjoern A. Zeeb } else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
3984da8fa4e3SBjoern A. Zeeb arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
3985da8fa4e3SBjoern A. Zeeb arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1);
3986da8fa4e3SBjoern A. Zeeb } else {
3987da8fa4e3SBjoern A. Zeeb arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
3988da8fa4e3SBjoern A. Zeeb arsta->txrate.mcs = txrate.mcs;
3989da8fa4e3SBjoern A. Zeeb }
3990da8fa4e3SBjoern A. Zeeb
3991da8fa4e3SBjoern A. Zeeb switch (txrate.flags) {
3992da8fa4e3SBjoern A. Zeeb case WMI_RATE_PREAMBLE_OFDM:
3993da8fa4e3SBjoern A. Zeeb if (arsta->arvif && arsta->arvif->vif)
3994*07724ba6SBjoern A. Zeeb conf = rcu_dereference(arsta->arvif->vif->bss_conf.chanctx_conf);
3995da8fa4e3SBjoern A. Zeeb if (conf && conf->def.chan->band == NL80211_BAND_5GHZ)
3996da8fa4e3SBjoern A. Zeeb arsta->tx_info.status.rates[0].idx = rate_idx - 4;
3997da8fa4e3SBjoern A. Zeeb break;
3998da8fa4e3SBjoern A. Zeeb case WMI_RATE_PREAMBLE_CCK:
3999da8fa4e3SBjoern A. Zeeb arsta->tx_info.status.rates[0].idx = rate_idx;
4000da8fa4e3SBjoern A. Zeeb if (sgi)
4001da8fa4e3SBjoern A. Zeeb arsta->tx_info.status.rates[0].flags |=
4002da8fa4e3SBjoern A. Zeeb (IEEE80211_TX_RC_USE_SHORT_PREAMBLE |
4003da8fa4e3SBjoern A. Zeeb IEEE80211_TX_RC_SHORT_GI);
4004da8fa4e3SBjoern A. Zeeb break;
4005da8fa4e3SBjoern A. Zeeb case WMI_RATE_PREAMBLE_HT:
4006da8fa4e3SBjoern A. Zeeb arsta->tx_info.status.rates[0].idx =
4007da8fa4e3SBjoern A. Zeeb txrate.mcs + ((txrate.nss - 1) * 8);
4008da8fa4e3SBjoern A. Zeeb if (sgi)
4009da8fa4e3SBjoern A. Zeeb arsta->tx_info.status.rates[0].flags |=
4010da8fa4e3SBjoern A. Zeeb IEEE80211_TX_RC_SHORT_GI;
4011da8fa4e3SBjoern A. Zeeb arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_MCS;
4012da8fa4e3SBjoern A. Zeeb break;
4013da8fa4e3SBjoern A. Zeeb case WMI_RATE_PREAMBLE_VHT:
4014da8fa4e3SBjoern A. Zeeb ieee80211_rate_set_vht(&arsta->tx_info.status.rates[0],
4015da8fa4e3SBjoern A. Zeeb txrate.mcs, txrate.nss);
4016da8fa4e3SBjoern A. Zeeb if (sgi)
4017da8fa4e3SBjoern A. Zeeb arsta->tx_info.status.rates[0].flags |=
4018da8fa4e3SBjoern A. Zeeb IEEE80211_TX_RC_SHORT_GI;
4019da8fa4e3SBjoern A. Zeeb arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_VHT_MCS;
4020da8fa4e3SBjoern A. Zeeb break;
4021da8fa4e3SBjoern A. Zeeb }
4022da8fa4e3SBjoern A. Zeeb
4023da8fa4e3SBjoern A. Zeeb arsta->txrate.nss = txrate.nss;
4024da8fa4e3SBjoern A. Zeeb arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw);
4025da8fa4e3SBjoern A. Zeeb arsta->last_tx_bitrate = cfg80211_calculate_bitrate(&arsta->txrate);
4026da8fa4e3SBjoern A. Zeeb if (sgi)
4027da8fa4e3SBjoern A. Zeeb arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
4028da8fa4e3SBjoern A. Zeeb
4029da8fa4e3SBjoern A. Zeeb switch (arsta->txrate.bw) {
4030da8fa4e3SBjoern A. Zeeb case RATE_INFO_BW_40:
4031da8fa4e3SBjoern A. Zeeb arsta->tx_info.status.rates[0].flags |=
4032da8fa4e3SBjoern A. Zeeb IEEE80211_TX_RC_40_MHZ_WIDTH;
4033da8fa4e3SBjoern A. Zeeb break;
4034da8fa4e3SBjoern A. Zeeb case RATE_INFO_BW_80:
4035da8fa4e3SBjoern A. Zeeb arsta->tx_info.status.rates[0].flags |=
4036da8fa4e3SBjoern A. Zeeb IEEE80211_TX_RC_80_MHZ_WIDTH;
4037da8fa4e3SBjoern A. Zeeb break;
4038*07724ba6SBjoern A. Zeeb case RATE_INFO_BW_160:
4039*07724ba6SBjoern A. Zeeb arsta->tx_info.status.rates[0].flags |=
4040*07724ba6SBjoern A. Zeeb IEEE80211_TX_RC_160_MHZ_WIDTH;
4041*07724ba6SBjoern A. Zeeb break;
4042da8fa4e3SBjoern A. Zeeb }
4043da8fa4e3SBjoern A. Zeeb
4044da8fa4e3SBjoern A. Zeeb if (peer_stats->succ_pkts) {
4045da8fa4e3SBjoern A. Zeeb arsta->tx_info.flags = IEEE80211_TX_STAT_ACK;
4046da8fa4e3SBjoern A. Zeeb arsta->tx_info.status.rates[0].count = 1;
4047da8fa4e3SBjoern A. Zeeb ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info);
4048da8fa4e3SBjoern A. Zeeb }
4049da8fa4e3SBjoern A. Zeeb
4050da8fa4e3SBjoern A. Zeeb if (ar->htt.disable_tx_comp) {
4051da8fa4e3SBjoern A. Zeeb arsta->tx_failed += peer_stats->failed_pkts;
4052da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_HTT, "tx failed %d\n",
4053da8fa4e3SBjoern A. Zeeb arsta->tx_failed);
4054da8fa4e3SBjoern A. Zeeb }
4055da8fa4e3SBjoern A. Zeeb
4056da8fa4e3SBjoern A. Zeeb arsta->tx_retries += peer_stats->retry_pkts;
4057da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx retries %d", arsta->tx_retries);
4058da8fa4e3SBjoern A. Zeeb
4059da8fa4e3SBjoern A. Zeeb if (ath10k_debug_is_extd_tx_stats_enabled(ar))
4060da8fa4e3SBjoern A. Zeeb ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats,
4061da8fa4e3SBjoern A. Zeeb rate_idx);
4062da8fa4e3SBjoern A. Zeeb }
4063da8fa4e3SBjoern A. Zeeb
ath10k_htt_fetch_peer_stats(struct ath10k * ar,struct sk_buff * skb)4064da8fa4e3SBjoern A. Zeeb static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
4065da8fa4e3SBjoern A. Zeeb struct sk_buff *skb)
4066da8fa4e3SBjoern A. Zeeb {
4067da8fa4e3SBjoern A. Zeeb struct htt_resp *resp = (struct htt_resp *)skb->data;
4068da8fa4e3SBjoern A. Zeeb struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
4069da8fa4e3SBjoern A. Zeeb struct htt_per_peer_tx_stats_ind *tx_stats;
4070da8fa4e3SBjoern A. Zeeb struct ieee80211_sta *sta;
4071da8fa4e3SBjoern A. Zeeb struct ath10k_peer *peer;
4072da8fa4e3SBjoern A. Zeeb int peer_id, i;
4073da8fa4e3SBjoern A. Zeeb u8 ppdu_len, num_ppdu;
4074da8fa4e3SBjoern A. Zeeb
4075da8fa4e3SBjoern A. Zeeb num_ppdu = resp->peer_tx_stats.num_ppdu;
4076da8fa4e3SBjoern A. Zeeb ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32);
4077da8fa4e3SBjoern A. Zeeb
4078da8fa4e3SBjoern A. Zeeb if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) {
4079da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len);
4080da8fa4e3SBjoern A. Zeeb return;
4081da8fa4e3SBjoern A. Zeeb }
4082da8fa4e3SBjoern A. Zeeb
4083da8fa4e3SBjoern A. Zeeb tx_stats = (struct htt_per_peer_tx_stats_ind *)
4084da8fa4e3SBjoern A. Zeeb (resp->peer_tx_stats.payload);
4085da8fa4e3SBjoern A. Zeeb peer_id = __le16_to_cpu(tx_stats->peer_id);
4086da8fa4e3SBjoern A. Zeeb
4087da8fa4e3SBjoern A. Zeeb rcu_read_lock();
4088da8fa4e3SBjoern A. Zeeb spin_lock_bh(&ar->data_lock);
4089da8fa4e3SBjoern A. Zeeb peer = ath10k_peer_find_by_id(ar, peer_id);
4090da8fa4e3SBjoern A. Zeeb if (!peer || !peer->sta) {
4091da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n",
4092da8fa4e3SBjoern A. Zeeb peer_id);
4093da8fa4e3SBjoern A. Zeeb goto out;
4094da8fa4e3SBjoern A. Zeeb }
4095da8fa4e3SBjoern A. Zeeb
4096da8fa4e3SBjoern A. Zeeb sta = peer->sta;
4097da8fa4e3SBjoern A. Zeeb for (i = 0; i < num_ppdu; i++) {
4098da8fa4e3SBjoern A. Zeeb tx_stats = (struct htt_per_peer_tx_stats_ind *)
4099da8fa4e3SBjoern A. Zeeb (resp->peer_tx_stats.payload + i * ppdu_len);
4100da8fa4e3SBjoern A. Zeeb
4101da8fa4e3SBjoern A. Zeeb p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes);
4102da8fa4e3SBjoern A. Zeeb p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes);
4103da8fa4e3SBjoern A. Zeeb p_tx_stats->failed_bytes =
4104da8fa4e3SBjoern A. Zeeb __le32_to_cpu(tx_stats->failed_bytes);
4105da8fa4e3SBjoern A. Zeeb p_tx_stats->ratecode = tx_stats->ratecode;
4106da8fa4e3SBjoern A. Zeeb p_tx_stats->flags = tx_stats->flags;
4107da8fa4e3SBjoern A. Zeeb p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts);
4108da8fa4e3SBjoern A. Zeeb p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts);
4109da8fa4e3SBjoern A. Zeeb p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts);
4110da8fa4e3SBjoern A. Zeeb p_tx_stats->duration = __le16_to_cpu(tx_stats->tx_duration);
4111da8fa4e3SBjoern A. Zeeb
4112da8fa4e3SBjoern A. Zeeb ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
4113da8fa4e3SBjoern A. Zeeb }
4114da8fa4e3SBjoern A. Zeeb
4115da8fa4e3SBjoern A. Zeeb out:
4116da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ar->data_lock);
4117da8fa4e3SBjoern A. Zeeb rcu_read_unlock();
4118da8fa4e3SBjoern A. Zeeb }
4119da8fa4e3SBjoern A. Zeeb
ath10k_fetch_10_2_tx_stats(struct ath10k * ar,u8 * data)4120da8fa4e3SBjoern A. Zeeb static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data)
4121da8fa4e3SBjoern A. Zeeb {
4122da8fa4e3SBjoern A. Zeeb struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data;
4123da8fa4e3SBjoern A. Zeeb struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
4124da8fa4e3SBjoern A. Zeeb struct ath10k_10_2_peer_tx_stats *tx_stats;
4125da8fa4e3SBjoern A. Zeeb struct ieee80211_sta *sta;
4126da8fa4e3SBjoern A. Zeeb struct ath10k_peer *peer;
4127da8fa4e3SBjoern A. Zeeb u16 log_type = __le16_to_cpu(hdr->log_type);
4128da8fa4e3SBjoern A. Zeeb u32 peer_id = 0, i;
4129da8fa4e3SBjoern A. Zeeb
4130da8fa4e3SBjoern A. Zeeb if (log_type != ATH_PKTLOG_TYPE_TX_STAT)
4131da8fa4e3SBjoern A. Zeeb return;
4132da8fa4e3SBjoern A. Zeeb
4133da8fa4e3SBjoern A. Zeeb tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) +
4134da8fa4e3SBjoern A. Zeeb ATH10K_10_2_TX_STATS_OFFSET);
4135da8fa4e3SBjoern A. Zeeb
4136da8fa4e3SBjoern A. Zeeb if (!tx_stats->tx_ppdu_cnt)
4137da8fa4e3SBjoern A. Zeeb return;
4138da8fa4e3SBjoern A. Zeeb
4139da8fa4e3SBjoern A. Zeeb peer_id = tx_stats->peer_id;
4140da8fa4e3SBjoern A. Zeeb
4141da8fa4e3SBjoern A. Zeeb rcu_read_lock();
4142da8fa4e3SBjoern A. Zeeb spin_lock_bh(&ar->data_lock);
4143da8fa4e3SBjoern A. Zeeb peer = ath10k_peer_find_by_id(ar, peer_id);
4144da8fa4e3SBjoern A. Zeeb if (!peer || !peer->sta) {
4145da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n",
4146da8fa4e3SBjoern A. Zeeb peer_id);
4147da8fa4e3SBjoern A. Zeeb goto out;
4148da8fa4e3SBjoern A. Zeeb }
4149da8fa4e3SBjoern A. Zeeb
4150da8fa4e3SBjoern A. Zeeb sta = peer->sta;
4151da8fa4e3SBjoern A. Zeeb for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) {
4152da8fa4e3SBjoern A. Zeeb p_tx_stats->succ_bytes =
4153da8fa4e3SBjoern A. Zeeb __le16_to_cpu(tx_stats->success_bytes[i]);
4154da8fa4e3SBjoern A. Zeeb p_tx_stats->retry_bytes =
4155da8fa4e3SBjoern A. Zeeb __le16_to_cpu(tx_stats->retry_bytes[i]);
4156da8fa4e3SBjoern A. Zeeb p_tx_stats->failed_bytes =
4157da8fa4e3SBjoern A. Zeeb __le16_to_cpu(tx_stats->failed_bytes[i]);
4158da8fa4e3SBjoern A. Zeeb p_tx_stats->ratecode = tx_stats->ratecode[i];
4159da8fa4e3SBjoern A. Zeeb p_tx_stats->flags = tx_stats->flags[i];
4160da8fa4e3SBjoern A. Zeeb p_tx_stats->succ_pkts = tx_stats->success_pkts[i];
4161da8fa4e3SBjoern A. Zeeb p_tx_stats->retry_pkts = tx_stats->retry_pkts[i];
4162da8fa4e3SBjoern A. Zeeb p_tx_stats->failed_pkts = tx_stats->failed_pkts[i];
4163da8fa4e3SBjoern A. Zeeb
4164da8fa4e3SBjoern A. Zeeb ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
4165da8fa4e3SBjoern A. Zeeb }
4166da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ar->data_lock);
4167da8fa4e3SBjoern A. Zeeb rcu_read_unlock();
4168da8fa4e3SBjoern A. Zeeb
4169da8fa4e3SBjoern A. Zeeb return;
4170da8fa4e3SBjoern A. Zeeb
4171da8fa4e3SBjoern A. Zeeb out:
4172da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ar->data_lock);
4173da8fa4e3SBjoern A. Zeeb rcu_read_unlock();
4174da8fa4e3SBjoern A. Zeeb }
4175da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_pn_len(enum htt_security_types sec_type)4176da8fa4e3SBjoern A. Zeeb static int ath10k_htt_rx_pn_len(enum htt_security_types sec_type)
4177da8fa4e3SBjoern A. Zeeb {
4178da8fa4e3SBjoern A. Zeeb switch (sec_type) {
4179da8fa4e3SBjoern A. Zeeb case HTT_SECURITY_TKIP:
4180da8fa4e3SBjoern A. Zeeb case HTT_SECURITY_TKIP_NOMIC:
4181da8fa4e3SBjoern A. Zeeb case HTT_SECURITY_AES_CCMP:
4182da8fa4e3SBjoern A. Zeeb return 48;
4183da8fa4e3SBjoern A. Zeeb default:
4184da8fa4e3SBjoern A. Zeeb return 0;
4185da8fa4e3SBjoern A. Zeeb }
4186da8fa4e3SBjoern A. Zeeb }
4187da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_sec_ind_handler(struct ath10k * ar,struct htt_security_indication * ev)4188da8fa4e3SBjoern A. Zeeb static void ath10k_htt_rx_sec_ind_handler(struct ath10k *ar,
4189da8fa4e3SBjoern A. Zeeb struct htt_security_indication *ev)
4190da8fa4e3SBjoern A. Zeeb {
4191da8fa4e3SBjoern A. Zeeb enum htt_txrx_sec_cast_type sec_index;
4192da8fa4e3SBjoern A. Zeeb enum htt_security_types sec_type;
4193da8fa4e3SBjoern A. Zeeb struct ath10k_peer *peer;
4194da8fa4e3SBjoern A. Zeeb
4195da8fa4e3SBjoern A. Zeeb spin_lock_bh(&ar->data_lock);
4196da8fa4e3SBjoern A. Zeeb
4197da8fa4e3SBjoern A. Zeeb peer = ath10k_peer_find_by_id(ar, __le16_to_cpu(ev->peer_id));
4198da8fa4e3SBjoern A. Zeeb if (!peer) {
4199da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to find peer id %d for security indication",
4200da8fa4e3SBjoern A. Zeeb __le16_to_cpu(ev->peer_id));
4201da8fa4e3SBjoern A. Zeeb goto out;
4202da8fa4e3SBjoern A. Zeeb }
4203da8fa4e3SBjoern A. Zeeb
4204da8fa4e3SBjoern A. Zeeb sec_type = MS(ev->flags, HTT_SECURITY_TYPE);
4205da8fa4e3SBjoern A. Zeeb
4206da8fa4e3SBjoern A. Zeeb if (ev->flags & HTT_SECURITY_IS_UNICAST)
4207da8fa4e3SBjoern A. Zeeb sec_index = HTT_TXRX_SEC_UCAST;
4208da8fa4e3SBjoern A. Zeeb else
4209da8fa4e3SBjoern A. Zeeb sec_index = HTT_TXRX_SEC_MCAST;
4210da8fa4e3SBjoern A. Zeeb
4211da8fa4e3SBjoern A. Zeeb peer->rx_pn[sec_index].sec_type = sec_type;
4212da8fa4e3SBjoern A. Zeeb peer->rx_pn[sec_index].pn_len = ath10k_htt_rx_pn_len(sec_type);
4213da8fa4e3SBjoern A. Zeeb
4214da8fa4e3SBjoern A. Zeeb memset(peer->tids_last_pn_valid, 0, sizeof(peer->tids_last_pn_valid));
4215da8fa4e3SBjoern A. Zeeb memset(peer->tids_last_pn, 0, sizeof(peer->tids_last_pn));
4216da8fa4e3SBjoern A. Zeeb
4217da8fa4e3SBjoern A. Zeeb out:
4218da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ar->data_lock);
4219da8fa4e3SBjoern A. Zeeb }
4220da8fa4e3SBjoern A. Zeeb
ath10k_htt_t2h_msg_handler(struct ath10k * ar,struct sk_buff * skb)4221da8fa4e3SBjoern A. Zeeb bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
4222da8fa4e3SBjoern A. Zeeb {
4223da8fa4e3SBjoern A. Zeeb struct ath10k_htt *htt = &ar->htt;
4224da8fa4e3SBjoern A. Zeeb struct htt_resp *resp = (struct htt_resp *)skb->data;
4225da8fa4e3SBjoern A. Zeeb enum htt_t2h_msg_type type;
4226da8fa4e3SBjoern A. Zeeb
4227da8fa4e3SBjoern A. Zeeb /* confirm alignment */
4228da8fa4e3SBjoern A. Zeeb if (!IS_ALIGNED((unsigned long)skb->data, 4))
4229da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "unaligned htt message, expect trouble\n");
4230da8fa4e3SBjoern A. Zeeb
4231da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
4232da8fa4e3SBjoern A. Zeeb resp->hdr.msg_type);
4233da8fa4e3SBjoern A. Zeeb
4234da8fa4e3SBjoern A. Zeeb if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
4235da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
4236da8fa4e3SBjoern A. Zeeb resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
4237da8fa4e3SBjoern A. Zeeb return true;
4238da8fa4e3SBjoern A. Zeeb }
4239da8fa4e3SBjoern A. Zeeb type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
4240da8fa4e3SBjoern A. Zeeb
4241da8fa4e3SBjoern A. Zeeb switch (type) {
4242da8fa4e3SBjoern A. Zeeb case HTT_T2H_MSG_TYPE_VERSION_CONF: {
4243da8fa4e3SBjoern A. Zeeb htt->target_version_major = resp->ver_resp.major;
4244da8fa4e3SBjoern A. Zeeb htt->target_version_minor = resp->ver_resp.minor;
4245da8fa4e3SBjoern A. Zeeb complete(&htt->target_version_received);
4246da8fa4e3SBjoern A. Zeeb break;
4247da8fa4e3SBjoern A. Zeeb }
4248da8fa4e3SBjoern A. Zeeb case HTT_T2H_MSG_TYPE_RX_IND:
4249da8fa4e3SBjoern A. Zeeb if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) {
4250da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_proc_rx_ind_ll(htt, &resp->rx_ind);
4251da8fa4e3SBjoern A. Zeeb } else {
4252da8fa4e3SBjoern A. Zeeb skb_queue_tail(&htt->rx_indication_head, skb);
4253da8fa4e3SBjoern A. Zeeb return false;
4254da8fa4e3SBjoern A. Zeeb }
4255da8fa4e3SBjoern A. Zeeb break;
4256da8fa4e3SBjoern A. Zeeb case HTT_T2H_MSG_TYPE_PEER_MAP: {
4257da8fa4e3SBjoern A. Zeeb struct htt_peer_map_event ev = {
4258da8fa4e3SBjoern A. Zeeb .vdev_id = resp->peer_map.vdev_id,
4259da8fa4e3SBjoern A. Zeeb .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
4260da8fa4e3SBjoern A. Zeeb };
4261da8fa4e3SBjoern A. Zeeb memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
4262da8fa4e3SBjoern A. Zeeb ath10k_peer_map_event(htt, &ev);
4263da8fa4e3SBjoern A. Zeeb break;
4264da8fa4e3SBjoern A. Zeeb }
4265da8fa4e3SBjoern A. Zeeb case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
4266da8fa4e3SBjoern A. Zeeb struct htt_peer_unmap_event ev = {
4267da8fa4e3SBjoern A. Zeeb .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
4268da8fa4e3SBjoern A. Zeeb };
4269da8fa4e3SBjoern A. Zeeb ath10k_peer_unmap_event(htt, &ev);
4270da8fa4e3SBjoern A. Zeeb break;
4271da8fa4e3SBjoern A. Zeeb }
4272da8fa4e3SBjoern A. Zeeb case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
4273da8fa4e3SBjoern A. Zeeb struct htt_tx_done tx_done = {};
4274da8fa4e3SBjoern A. Zeeb struct ath10k_htt *htt = &ar->htt;
4275da8fa4e3SBjoern A. Zeeb struct ath10k_htc *htc = &ar->htc;
4276da8fa4e3SBjoern A. Zeeb struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid];
4277da8fa4e3SBjoern A. Zeeb int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
4278da8fa4e3SBjoern A. Zeeb int info = __le32_to_cpu(resp->mgmt_tx_completion.info);
4279da8fa4e3SBjoern A. Zeeb
4280da8fa4e3SBjoern A. Zeeb tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
4281da8fa4e3SBjoern A. Zeeb
4282da8fa4e3SBjoern A. Zeeb switch (status) {
4283da8fa4e3SBjoern A. Zeeb case HTT_MGMT_TX_STATUS_OK:
4284da8fa4e3SBjoern A. Zeeb tx_done.status = HTT_TX_COMPL_STATE_ACK;
4285da8fa4e3SBjoern A. Zeeb if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
4286da8fa4e3SBjoern A. Zeeb ar->wmi.svc_map) &&
4287da8fa4e3SBjoern A. Zeeb (resp->mgmt_tx_completion.flags &
4288da8fa4e3SBjoern A. Zeeb HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI)) {
4289da8fa4e3SBjoern A. Zeeb tx_done.ack_rssi =
4290da8fa4e3SBjoern A. Zeeb FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK,
4291da8fa4e3SBjoern A. Zeeb info);
4292da8fa4e3SBjoern A. Zeeb }
4293da8fa4e3SBjoern A. Zeeb break;
4294da8fa4e3SBjoern A. Zeeb case HTT_MGMT_TX_STATUS_RETRY:
4295da8fa4e3SBjoern A. Zeeb tx_done.status = HTT_TX_COMPL_STATE_NOACK;
4296da8fa4e3SBjoern A. Zeeb break;
4297da8fa4e3SBjoern A. Zeeb case HTT_MGMT_TX_STATUS_DROP:
4298da8fa4e3SBjoern A. Zeeb tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
4299da8fa4e3SBjoern A. Zeeb break;
4300da8fa4e3SBjoern A. Zeeb }
4301da8fa4e3SBjoern A. Zeeb
4302da8fa4e3SBjoern A. Zeeb if (htt->disable_tx_comp) {
4303da8fa4e3SBjoern A. Zeeb spin_lock_bh(&htc->tx_lock);
4304da8fa4e3SBjoern A. Zeeb ep->tx_credits++;
4305da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&htc->tx_lock);
4306da8fa4e3SBjoern A. Zeeb }
4307da8fa4e3SBjoern A. Zeeb
4308da8fa4e3SBjoern A. Zeeb status = ath10k_txrx_tx_unref(htt, &tx_done);
4309da8fa4e3SBjoern A. Zeeb if (!status) {
4310da8fa4e3SBjoern A. Zeeb spin_lock_bh(&htt->tx_lock);
4311da8fa4e3SBjoern A. Zeeb ath10k_htt_tx_mgmt_dec_pending(htt);
4312da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&htt->tx_lock);
4313da8fa4e3SBjoern A. Zeeb }
4314da8fa4e3SBjoern A. Zeeb break;
4315da8fa4e3SBjoern A. Zeeb }
4316da8fa4e3SBjoern A. Zeeb case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
4317da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
4318da8fa4e3SBjoern A. Zeeb break;
4319da8fa4e3SBjoern A. Zeeb case HTT_T2H_MSG_TYPE_SEC_IND: {
4320da8fa4e3SBjoern A. Zeeb struct ath10k *ar = htt->ar;
4321da8fa4e3SBjoern A. Zeeb struct htt_security_indication *ev = &resp->security_indication;
4322da8fa4e3SBjoern A. Zeeb
4323da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_sec_ind_handler(ar, ev);
4324da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_HTT,
4325da8fa4e3SBjoern A. Zeeb "sec ind peer_id %d unicast %d type %d\n",
4326da8fa4e3SBjoern A. Zeeb __le16_to_cpu(ev->peer_id),
4327da8fa4e3SBjoern A. Zeeb !!(ev->flags & HTT_SECURITY_IS_UNICAST),
4328da8fa4e3SBjoern A. Zeeb MS(ev->flags, HTT_SECURITY_TYPE));
4329da8fa4e3SBjoern A. Zeeb complete(&ar->install_key_done);
4330da8fa4e3SBjoern A. Zeeb break;
4331da8fa4e3SBjoern A. Zeeb }
4332da8fa4e3SBjoern A. Zeeb case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
4333da8fa4e3SBjoern A. Zeeb ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
4334da8fa4e3SBjoern A. Zeeb skb->data, skb->len);
4335da8fa4e3SBjoern A. Zeeb atomic_inc(&htt->num_mpdus_ready);
4336da8fa4e3SBjoern A. Zeeb
4337da8fa4e3SBjoern A. Zeeb return ath10k_htt_rx_proc_rx_frag_ind(htt,
4338da8fa4e3SBjoern A. Zeeb &resp->rx_frag_ind,
4339da8fa4e3SBjoern A. Zeeb skb);
4340da8fa4e3SBjoern A. Zeeb }
4341da8fa4e3SBjoern A. Zeeb case HTT_T2H_MSG_TYPE_TEST:
4342da8fa4e3SBjoern A. Zeeb break;
4343da8fa4e3SBjoern A. Zeeb case HTT_T2H_MSG_TYPE_STATS_CONF:
4344da8fa4e3SBjoern A. Zeeb trace_ath10k_htt_stats(ar, skb->data, skb->len);
4345da8fa4e3SBjoern A. Zeeb break;
4346da8fa4e3SBjoern A. Zeeb case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
4347da8fa4e3SBjoern A. Zeeb /* Firmware can return tx frames if it's unable to fully
4348da8fa4e3SBjoern A. Zeeb * process them and suspects host may be able to fix it. ath10k
4349da8fa4e3SBjoern A. Zeeb * sends all tx frames as already inspected so this shouldn't
4350da8fa4e3SBjoern A. Zeeb * happen unless fw has a bug.
4351da8fa4e3SBjoern A. Zeeb */
4352da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
4353da8fa4e3SBjoern A. Zeeb break;
4354da8fa4e3SBjoern A. Zeeb case HTT_T2H_MSG_TYPE_RX_ADDBA:
4355da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_addba(ar, resp);
4356da8fa4e3SBjoern A. Zeeb break;
4357da8fa4e3SBjoern A. Zeeb case HTT_T2H_MSG_TYPE_RX_DELBA:
4358da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_delba(ar, resp);
4359da8fa4e3SBjoern A. Zeeb break;
4360da8fa4e3SBjoern A. Zeeb case HTT_T2H_MSG_TYPE_PKTLOG: {
4361da8fa4e3SBjoern A. Zeeb trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
4362da8fa4e3SBjoern A. Zeeb skb->len -
4363da8fa4e3SBjoern A. Zeeb offsetof(struct htt_resp,
4364da8fa4e3SBjoern A. Zeeb pktlog_msg.payload));
4365da8fa4e3SBjoern A. Zeeb
4366da8fa4e3SBjoern A. Zeeb if (ath10k_peer_stats_enabled(ar))
4367da8fa4e3SBjoern A. Zeeb ath10k_fetch_10_2_tx_stats(ar,
4368da8fa4e3SBjoern A. Zeeb resp->pktlog_msg.payload);
4369da8fa4e3SBjoern A. Zeeb break;
4370da8fa4e3SBjoern A. Zeeb }
4371da8fa4e3SBjoern A. Zeeb case HTT_T2H_MSG_TYPE_RX_FLUSH: {
4372da8fa4e3SBjoern A. Zeeb /* Ignore this event because mac80211 takes care of Rx
4373da8fa4e3SBjoern A. Zeeb * aggregation reordering.
4374da8fa4e3SBjoern A. Zeeb */
4375da8fa4e3SBjoern A. Zeeb break;
4376da8fa4e3SBjoern A. Zeeb }
4377da8fa4e3SBjoern A. Zeeb case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
4378da8fa4e3SBjoern A. Zeeb skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
4379da8fa4e3SBjoern A. Zeeb return false;
4380da8fa4e3SBjoern A. Zeeb }
4381da8fa4e3SBjoern A. Zeeb case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: {
4382da8fa4e3SBjoern A. Zeeb struct ath10k_htt *htt = &ar->htt;
4383da8fa4e3SBjoern A. Zeeb struct ath10k_htc *htc = &ar->htc;
4384da8fa4e3SBjoern A. Zeeb struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid];
4385da8fa4e3SBjoern A. Zeeb u32 msg_word = __le32_to_cpu(*(__le32 *)resp);
4386da8fa4e3SBjoern A. Zeeb int htt_credit_delta;
4387da8fa4e3SBjoern A. Zeeb
4388da8fa4e3SBjoern A. Zeeb htt_credit_delta = HTT_TX_CREDIT_DELTA_ABS_GET(msg_word);
4389da8fa4e3SBjoern A. Zeeb if (HTT_TX_CREDIT_SIGN_BIT_GET(msg_word))
4390da8fa4e3SBjoern A. Zeeb htt_credit_delta = -htt_credit_delta;
4391da8fa4e3SBjoern A. Zeeb
4392da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_HTT,
4393da8fa4e3SBjoern A. Zeeb "htt credit update delta %d\n",
4394da8fa4e3SBjoern A. Zeeb htt_credit_delta);
4395da8fa4e3SBjoern A. Zeeb
4396da8fa4e3SBjoern A. Zeeb if (htt->disable_tx_comp) {
4397da8fa4e3SBjoern A. Zeeb spin_lock_bh(&htc->tx_lock);
4398da8fa4e3SBjoern A. Zeeb ep->tx_credits += htt_credit_delta;
4399da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&htc->tx_lock);
4400da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_HTT,
4401da8fa4e3SBjoern A. Zeeb "htt credit total %d\n",
4402da8fa4e3SBjoern A. Zeeb ep->tx_credits);
4403da8fa4e3SBjoern A. Zeeb ep->ep_ops.ep_tx_credits(htc->ar);
4404da8fa4e3SBjoern A. Zeeb }
4405da8fa4e3SBjoern A. Zeeb break;
4406da8fa4e3SBjoern A. Zeeb }
4407da8fa4e3SBjoern A. Zeeb case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
4408da8fa4e3SBjoern A. Zeeb u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
4409da8fa4e3SBjoern A. Zeeb u32 freq = __le32_to_cpu(resp->chan_change.freq);
4410da8fa4e3SBjoern A. Zeeb
4411da8fa4e3SBjoern A. Zeeb ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq);
4412da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_HTT,
4413da8fa4e3SBjoern A. Zeeb "htt chan change freq %u phymode %s\n",
4414da8fa4e3SBjoern A. Zeeb freq, ath10k_wmi_phymode_str(phymode));
4415da8fa4e3SBjoern A. Zeeb break;
4416da8fa4e3SBjoern A. Zeeb }
4417da8fa4e3SBjoern A. Zeeb case HTT_T2H_MSG_TYPE_AGGR_CONF:
4418da8fa4e3SBjoern A. Zeeb break;
4419da8fa4e3SBjoern A. Zeeb case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
4420da8fa4e3SBjoern A. Zeeb struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
4421da8fa4e3SBjoern A. Zeeb
4422da8fa4e3SBjoern A. Zeeb if (!tx_fetch_ind) {
4423da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
4424da8fa4e3SBjoern A. Zeeb break;
4425da8fa4e3SBjoern A. Zeeb }
4426da8fa4e3SBjoern A. Zeeb skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
4427da8fa4e3SBjoern A. Zeeb break;
4428da8fa4e3SBjoern A. Zeeb }
4429da8fa4e3SBjoern A. Zeeb case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
4430da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_tx_fetch_confirm(ar, skb);
4431da8fa4e3SBjoern A. Zeeb break;
4432da8fa4e3SBjoern A. Zeeb case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
4433da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
4434da8fa4e3SBjoern A. Zeeb break;
4435da8fa4e3SBjoern A. Zeeb case HTT_T2H_MSG_TYPE_PEER_STATS:
4436da8fa4e3SBjoern A. Zeeb ath10k_htt_fetch_peer_stats(ar, skb);
4437da8fa4e3SBjoern A. Zeeb break;
4438da8fa4e3SBjoern A. Zeeb case HTT_T2H_MSG_TYPE_EN_STATS:
4439da8fa4e3SBjoern A. Zeeb default:
4440da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "htt event (%d) not handled\n",
4441da8fa4e3SBjoern A. Zeeb resp->hdr.msg_type);
4442da8fa4e3SBjoern A. Zeeb ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
4443da8fa4e3SBjoern A. Zeeb skb->data, skb->len);
4444da8fa4e3SBjoern A. Zeeb break;
4445da8fa4e3SBjoern A. Zeeb }
4446da8fa4e3SBjoern A. Zeeb return true;
4447da8fa4e3SBjoern A. Zeeb }
4448da8fa4e3SBjoern A. Zeeb EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
4449da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_pktlog_completion_handler(struct ath10k * ar,struct sk_buff * skb)4450da8fa4e3SBjoern A. Zeeb void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
4451da8fa4e3SBjoern A. Zeeb struct sk_buff *skb)
4452da8fa4e3SBjoern A. Zeeb {
4453da8fa4e3SBjoern A. Zeeb trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
4454da8fa4e3SBjoern A. Zeeb dev_kfree_skb_any(skb);
4455da8fa4e3SBjoern A. Zeeb }
4456da8fa4e3SBjoern A. Zeeb EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
4457da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_deliver_msdu(struct ath10k * ar,int quota,int budget)4458da8fa4e3SBjoern A. Zeeb static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
4459da8fa4e3SBjoern A. Zeeb {
4460da8fa4e3SBjoern A. Zeeb struct sk_buff *skb;
4461da8fa4e3SBjoern A. Zeeb
4462da8fa4e3SBjoern A. Zeeb while (quota < budget) {
4463da8fa4e3SBjoern A. Zeeb if (skb_queue_empty(&ar->htt.rx_msdus_q))
4464da8fa4e3SBjoern A. Zeeb break;
4465da8fa4e3SBjoern A. Zeeb
4466da8fa4e3SBjoern A. Zeeb skb = skb_dequeue(&ar->htt.rx_msdus_q);
4467da8fa4e3SBjoern A. Zeeb if (!skb)
4468da8fa4e3SBjoern A. Zeeb break;
4469da8fa4e3SBjoern A. Zeeb ath10k_process_rx(ar, skb);
4470da8fa4e3SBjoern A. Zeeb quota++;
4471da8fa4e3SBjoern A. Zeeb }
4472da8fa4e3SBjoern A. Zeeb
4473da8fa4e3SBjoern A. Zeeb return quota;
4474da8fa4e3SBjoern A. Zeeb }
4475da8fa4e3SBjoern A. Zeeb
ath10k_htt_rx_hl_indication(struct ath10k * ar,int budget)4476da8fa4e3SBjoern A. Zeeb int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget)
4477da8fa4e3SBjoern A. Zeeb {
4478da8fa4e3SBjoern A. Zeeb struct htt_resp *resp;
4479da8fa4e3SBjoern A. Zeeb struct ath10k_htt *htt = &ar->htt;
4480da8fa4e3SBjoern A. Zeeb struct sk_buff *skb;
4481da8fa4e3SBjoern A. Zeeb bool release;
4482da8fa4e3SBjoern A. Zeeb int quota;
4483da8fa4e3SBjoern A. Zeeb
4484da8fa4e3SBjoern A. Zeeb for (quota = 0; quota < budget; quota++) {
4485da8fa4e3SBjoern A. Zeeb skb = skb_dequeue(&htt->rx_indication_head);
4486da8fa4e3SBjoern A. Zeeb if (!skb)
4487da8fa4e3SBjoern A. Zeeb break;
4488da8fa4e3SBjoern A. Zeeb
4489da8fa4e3SBjoern A. Zeeb resp = (struct htt_resp *)skb->data;
4490da8fa4e3SBjoern A. Zeeb
4491da8fa4e3SBjoern A. Zeeb release = ath10k_htt_rx_proc_rx_ind_hl(htt,
4492da8fa4e3SBjoern A. Zeeb &resp->rx_ind_hl,
4493da8fa4e3SBjoern A. Zeeb skb,
4494da8fa4e3SBjoern A. Zeeb HTT_RX_PN_CHECK,
4495da8fa4e3SBjoern A. Zeeb HTT_RX_NON_TKIP_MIC);
4496da8fa4e3SBjoern A. Zeeb
4497da8fa4e3SBjoern A. Zeeb if (release)
4498da8fa4e3SBjoern A. Zeeb dev_kfree_skb_any(skb);
4499da8fa4e3SBjoern A. Zeeb
4500da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_HTT, "rx indication poll pending count:%d\n",
4501da8fa4e3SBjoern A. Zeeb skb_queue_len(&htt->rx_indication_head));
4502da8fa4e3SBjoern A. Zeeb }
4503da8fa4e3SBjoern A. Zeeb return quota;
4504da8fa4e3SBjoern A. Zeeb }
4505da8fa4e3SBjoern A. Zeeb EXPORT_SYMBOL(ath10k_htt_rx_hl_indication);
4506da8fa4e3SBjoern A. Zeeb
ath10k_htt_txrx_compl_task(struct ath10k * ar,int budget)4507da8fa4e3SBjoern A. Zeeb int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
4508da8fa4e3SBjoern A. Zeeb {
4509da8fa4e3SBjoern A. Zeeb struct ath10k_htt *htt = &ar->htt;
4510da8fa4e3SBjoern A. Zeeb struct htt_tx_done tx_done = {};
4511da8fa4e3SBjoern A. Zeeb struct sk_buff_head tx_ind_q;
4512da8fa4e3SBjoern A. Zeeb struct sk_buff *skb;
4513da8fa4e3SBjoern A. Zeeb unsigned long flags;
4514da8fa4e3SBjoern A. Zeeb int quota = 0, done, ret;
4515da8fa4e3SBjoern A. Zeeb bool resched_napi = false;
4516da8fa4e3SBjoern A. Zeeb
4517da8fa4e3SBjoern A. Zeeb __skb_queue_head_init(&tx_ind_q);
4518da8fa4e3SBjoern A. Zeeb
4519da8fa4e3SBjoern A. Zeeb /* Process pending frames before dequeuing more data
4520da8fa4e3SBjoern A. Zeeb * from hardware.
4521da8fa4e3SBjoern A. Zeeb */
4522da8fa4e3SBjoern A. Zeeb quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
4523da8fa4e3SBjoern A. Zeeb if (quota == budget) {
4524da8fa4e3SBjoern A. Zeeb resched_napi = true;
4525da8fa4e3SBjoern A. Zeeb goto exit;
4526da8fa4e3SBjoern A. Zeeb }
4527da8fa4e3SBjoern A. Zeeb
4528da8fa4e3SBjoern A. Zeeb while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) {
4529da8fa4e3SBjoern A. Zeeb spin_lock_bh(&htt->rx_ring.lock);
4530da8fa4e3SBjoern A. Zeeb ret = ath10k_htt_rx_in_ord_ind(ar, skb);
4531da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&htt->rx_ring.lock);
4532da8fa4e3SBjoern A. Zeeb
4533da8fa4e3SBjoern A. Zeeb dev_kfree_skb_any(skb);
4534da8fa4e3SBjoern A. Zeeb if (ret == -EIO) {
4535da8fa4e3SBjoern A. Zeeb resched_napi = true;
4536da8fa4e3SBjoern A. Zeeb goto exit;
4537da8fa4e3SBjoern A. Zeeb }
4538da8fa4e3SBjoern A. Zeeb }
4539da8fa4e3SBjoern A. Zeeb
4540da8fa4e3SBjoern A. Zeeb while (atomic_read(&htt->num_mpdus_ready)) {
4541da8fa4e3SBjoern A. Zeeb ret = ath10k_htt_rx_handle_amsdu(htt);
4542da8fa4e3SBjoern A. Zeeb if (ret == -EIO) {
4543da8fa4e3SBjoern A. Zeeb resched_napi = true;
4544da8fa4e3SBjoern A. Zeeb goto exit;
4545da8fa4e3SBjoern A. Zeeb }
4546da8fa4e3SBjoern A. Zeeb atomic_dec(&htt->num_mpdus_ready);
4547da8fa4e3SBjoern A. Zeeb }
4548da8fa4e3SBjoern A. Zeeb
4549da8fa4e3SBjoern A. Zeeb /* Deliver received data after processing data from hardware */
4550da8fa4e3SBjoern A. Zeeb quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
4551da8fa4e3SBjoern A. Zeeb
4552da8fa4e3SBjoern A. Zeeb /* From NAPI documentation:
4553da8fa4e3SBjoern A. Zeeb * The napi poll() function may also process TX completions, in which
4554da8fa4e3SBjoern A. Zeeb * case if it processes the entire TX ring then it should count that
4555da8fa4e3SBjoern A. Zeeb * work as the rest of the budget.
4556da8fa4e3SBjoern A. Zeeb */
4557da8fa4e3SBjoern A. Zeeb if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))
4558da8fa4e3SBjoern A. Zeeb quota = budget;
4559da8fa4e3SBjoern A. Zeeb
4560da8fa4e3SBjoern A. Zeeb /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
4561da8fa4e3SBjoern A. Zeeb * From kfifo_get() documentation:
4562da8fa4e3SBjoern A. Zeeb * Note that with only one concurrent reader and one concurrent writer,
4563da8fa4e3SBjoern A. Zeeb * you don't need extra locking to use these macro.
4564da8fa4e3SBjoern A. Zeeb */
4565da8fa4e3SBjoern A. Zeeb while (kfifo_get(&htt->txdone_fifo, &tx_done))
4566da8fa4e3SBjoern A. Zeeb ath10k_txrx_tx_unref(htt, &tx_done);
4567da8fa4e3SBjoern A. Zeeb
4568da8fa4e3SBjoern A. Zeeb ath10k_mac_tx_push_pending(ar);
4569da8fa4e3SBjoern A. Zeeb
4570da8fa4e3SBjoern A. Zeeb spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
4571da8fa4e3SBjoern A. Zeeb skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
4572da8fa4e3SBjoern A. Zeeb spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
4573da8fa4e3SBjoern A. Zeeb
4574da8fa4e3SBjoern A. Zeeb while ((skb = __skb_dequeue(&tx_ind_q))) {
4575da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_tx_fetch_ind(ar, skb);
4576da8fa4e3SBjoern A. Zeeb dev_kfree_skb_any(skb);
4577da8fa4e3SBjoern A. Zeeb }
4578da8fa4e3SBjoern A. Zeeb
4579da8fa4e3SBjoern A. Zeeb exit:
4580da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_msdu_buff_replenish(htt);
4581da8fa4e3SBjoern A. Zeeb /* In case of rx failure or more data to read, report budget
4582da8fa4e3SBjoern A. Zeeb * to reschedule NAPI poll
4583da8fa4e3SBjoern A. Zeeb */
4584da8fa4e3SBjoern A. Zeeb done = resched_napi ? budget : quota;
4585da8fa4e3SBjoern A. Zeeb
4586da8fa4e3SBjoern A. Zeeb return done;
4587da8fa4e3SBjoern A. Zeeb }
4588da8fa4e3SBjoern A. Zeeb EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
4589da8fa4e3SBjoern A. Zeeb
4590da8fa4e3SBjoern A. Zeeb static const struct ath10k_htt_rx_ops htt_rx_ops_32 = {
4591da8fa4e3SBjoern A. Zeeb .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32,
4592da8fa4e3SBjoern A. Zeeb .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32,
4593da8fa4e3SBjoern A. Zeeb .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32,
4594da8fa4e3SBjoern A. Zeeb .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32,
4595da8fa4e3SBjoern A. Zeeb .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32,
4596da8fa4e3SBjoern A. Zeeb };
4597da8fa4e3SBjoern A. Zeeb
4598da8fa4e3SBjoern A. Zeeb static const struct ath10k_htt_rx_ops htt_rx_ops_64 = {
4599da8fa4e3SBjoern A. Zeeb .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64,
4600da8fa4e3SBjoern A. Zeeb .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64,
4601da8fa4e3SBjoern A. Zeeb .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64,
4602da8fa4e3SBjoern A. Zeeb .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64,
4603da8fa4e3SBjoern A. Zeeb .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64,
4604da8fa4e3SBjoern A. Zeeb };
4605da8fa4e3SBjoern A. Zeeb
4606da8fa4e3SBjoern A. Zeeb static const struct ath10k_htt_rx_ops htt_rx_ops_hl = {
4607da8fa4e3SBjoern A. Zeeb .htt_rx_proc_rx_frag_ind = ath10k_htt_rx_proc_rx_frag_ind_hl,
4608da8fa4e3SBjoern A. Zeeb };
4609da8fa4e3SBjoern A. Zeeb
ath10k_htt_set_rx_ops(struct ath10k_htt * htt)4610da8fa4e3SBjoern A. Zeeb void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)
4611da8fa4e3SBjoern A. Zeeb {
4612da8fa4e3SBjoern A. Zeeb struct ath10k *ar = htt->ar;
4613da8fa4e3SBjoern A. Zeeb
4614da8fa4e3SBjoern A. Zeeb if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
4615da8fa4e3SBjoern A. Zeeb htt->rx_ops = &htt_rx_ops_hl;
4616da8fa4e3SBjoern A. Zeeb else if (ar->hw_params.target_64bit)
4617da8fa4e3SBjoern A. Zeeb htt->rx_ops = &htt_rx_ops_64;
4618da8fa4e3SBjoern A. Zeeb else
4619da8fa4e3SBjoern A. Zeeb htt->rx_ops = &htt_rx_ops_32;
4620da8fa4e3SBjoern A. Zeeb }
4621