1dd4f32aeSBjoern A. Zeeb // SPDX-License-Identifier: BSD-3-Clause-Clear
2dd4f32aeSBjoern A. Zeeb /*
3dd4f32aeSBjoern A. Zeeb * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4dd4f32aeSBjoern A. Zeeb */
5dd4f32aeSBjoern A. Zeeb
6dd4f32aeSBjoern A. Zeeb #include <linux/ieee80211.h>
7dd4f32aeSBjoern A. Zeeb #include <linux/kernel.h>
8dd4f32aeSBjoern A. Zeeb #include <linux/skbuff.h>
9dd4f32aeSBjoern A. Zeeb #include <crypto/hash.h>
10dd4f32aeSBjoern A. Zeeb #include "core.h"
11dd4f32aeSBjoern A. Zeeb #include "debug.h"
12dd4f32aeSBjoern A. Zeeb #include "debugfs_htt_stats.h"
13dd4f32aeSBjoern A. Zeeb #include "debugfs_sta.h"
14dd4f32aeSBjoern A. Zeeb #include "hal_desc.h"
15dd4f32aeSBjoern A. Zeeb #include "hw.h"
16dd4f32aeSBjoern A. Zeeb #include "dp_rx.h"
17dd4f32aeSBjoern A. Zeeb #include "hal_rx.h"
18dd4f32aeSBjoern A. Zeeb #include "dp_tx.h"
19dd4f32aeSBjoern A. Zeeb #include "peer.h"
20dd4f32aeSBjoern A. Zeeb
21dd4f32aeSBjoern A. Zeeb #define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
22dd4f32aeSBjoern A. Zeeb
23dd4f32aeSBjoern A. Zeeb static inline
ath11k_dp_rx_h_80211_hdr(struct ath11k_base * ab,struct hal_rx_desc * desc)24dd4f32aeSBjoern A. Zeeb u8 *ath11k_dp_rx_h_80211_hdr(struct ath11k_base *ab, struct hal_rx_desc *desc)
25dd4f32aeSBjoern A. Zeeb {
26dd4f32aeSBjoern A. Zeeb return ab->hw_params.hw_ops->rx_desc_get_hdr_status(desc);
27dd4f32aeSBjoern A. Zeeb }
28dd4f32aeSBjoern A. Zeeb
29dd4f32aeSBjoern A. Zeeb static inline
ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base * ab,struct hal_rx_desc * desc)30dd4f32aeSBjoern A. Zeeb enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base *ab,
31dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *desc)
32dd4f32aeSBjoern A. Zeeb {
33dd4f32aeSBjoern A. Zeeb if (!ab->hw_params.hw_ops->rx_desc_encrypt_valid(desc))
34dd4f32aeSBjoern A. Zeeb return HAL_ENCRYPT_TYPE_OPEN;
35dd4f32aeSBjoern A. Zeeb
36dd4f32aeSBjoern A. Zeeb return ab->hw_params.hw_ops->rx_desc_get_encrypt_type(desc);
37dd4f32aeSBjoern A. Zeeb }
38dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base * ab,struct hal_rx_desc * desc)39dd4f32aeSBjoern A. Zeeb static inline u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab,
40dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *desc)
41dd4f32aeSBjoern A. Zeeb {
42dd4f32aeSBjoern A. Zeeb return ab->hw_params.hw_ops->rx_desc_get_decap_type(desc);
43dd4f32aeSBjoern A. Zeeb }
44dd4f32aeSBjoern A. Zeeb
45dd4f32aeSBjoern A. Zeeb static inline
ath11k_dp_rx_h_msdu_start_ldpc_support(struct ath11k_base * ab,struct hal_rx_desc * desc)46dd4f32aeSBjoern A. Zeeb bool ath11k_dp_rx_h_msdu_start_ldpc_support(struct ath11k_base *ab,
47dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *desc)
48dd4f32aeSBjoern A. Zeeb {
49dd4f32aeSBjoern A. Zeeb return ab->hw_params.hw_ops->rx_desc_get_ldpc_support(desc);
50dd4f32aeSBjoern A. Zeeb }
51dd4f32aeSBjoern A. Zeeb
52dd4f32aeSBjoern A. Zeeb static inline
ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base * ab,struct hal_rx_desc * desc)53dd4f32aeSBjoern A. Zeeb u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab,
54dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *desc)
55dd4f32aeSBjoern A. Zeeb {
56dd4f32aeSBjoern A. Zeeb return ab->hw_params.hw_ops->rx_desc_get_mesh_ctl(desc);
57dd4f32aeSBjoern A. Zeeb }
58dd4f32aeSBjoern A. Zeeb
59dd4f32aeSBjoern A. Zeeb static inline
ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base * ab,struct hal_rx_desc * desc)60dd4f32aeSBjoern A. Zeeb bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base *ab,
61dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *desc)
62dd4f32aeSBjoern A. Zeeb {
63dd4f32aeSBjoern A. Zeeb return ab->hw_params.hw_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
64dd4f32aeSBjoern A. Zeeb }
65dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base * ab,struct hal_rx_desc * desc)66dd4f32aeSBjoern A. Zeeb static inline bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base *ab,
67dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *desc)
68dd4f32aeSBjoern A. Zeeb {
69dd4f32aeSBjoern A. Zeeb return ab->hw_params.hw_ops->rx_desc_get_mpdu_fc_valid(desc);
70dd4f32aeSBjoern A. Zeeb }
71dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base * ab,struct sk_buff * skb)72dd4f32aeSBjoern A. Zeeb static inline bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab,
73dd4f32aeSBjoern A. Zeeb struct sk_buff *skb)
74dd4f32aeSBjoern A. Zeeb {
75dd4f32aeSBjoern A. Zeeb struct ieee80211_hdr *hdr;
76dd4f32aeSBjoern A. Zeeb
77dd4f32aeSBjoern A. Zeeb hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz);
78dd4f32aeSBjoern A. Zeeb return ieee80211_has_morefrags(hdr->frame_control);
79dd4f32aeSBjoern A. Zeeb }
80dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base * ab,struct sk_buff * skb)81dd4f32aeSBjoern A. Zeeb static inline u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab,
82dd4f32aeSBjoern A. Zeeb struct sk_buff *skb)
83dd4f32aeSBjoern A. Zeeb {
84dd4f32aeSBjoern A. Zeeb struct ieee80211_hdr *hdr;
85dd4f32aeSBjoern A. Zeeb
86dd4f32aeSBjoern A. Zeeb hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz);
87dd4f32aeSBjoern A. Zeeb return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
88dd4f32aeSBjoern A. Zeeb }
89dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base * ab,struct hal_rx_desc * desc)90dd4f32aeSBjoern A. Zeeb static inline u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base *ab,
91dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *desc)
92dd4f32aeSBjoern A. Zeeb {
93dd4f32aeSBjoern A. Zeeb return ab->hw_params.hw_ops->rx_desc_get_mpdu_start_seq_no(desc);
94dd4f32aeSBjoern A. Zeeb }
95dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_get_attention(struct ath11k_base * ab,struct hal_rx_desc * desc)96dd4f32aeSBjoern A. Zeeb static inline void *ath11k_dp_rx_get_attention(struct ath11k_base *ab,
97dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *desc)
98dd4f32aeSBjoern A. Zeeb {
99dd4f32aeSBjoern A. Zeeb return ab->hw_params.hw_ops->rx_desc_get_attention(desc);
100dd4f32aeSBjoern A. Zeeb }
101dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_attn_msdu_done(struct rx_attention * attn)102dd4f32aeSBjoern A. Zeeb static inline bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention *attn)
103dd4f32aeSBjoern A. Zeeb {
104dd4f32aeSBjoern A. Zeeb return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE,
105dd4f32aeSBjoern A. Zeeb __le32_to_cpu(attn->info2));
106dd4f32aeSBjoern A. Zeeb }
107dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention * attn)108dd4f32aeSBjoern A. Zeeb static inline bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention *attn)
109dd4f32aeSBjoern A. Zeeb {
110dd4f32aeSBjoern A. Zeeb return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL,
111dd4f32aeSBjoern A. Zeeb __le32_to_cpu(attn->info1));
112dd4f32aeSBjoern A. Zeeb }
113dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention * attn)114dd4f32aeSBjoern A. Zeeb static inline bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention *attn)
115dd4f32aeSBjoern A. Zeeb {
116dd4f32aeSBjoern A. Zeeb return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL,
117dd4f32aeSBjoern A. Zeeb __le32_to_cpu(attn->info1));
118dd4f32aeSBjoern A. Zeeb }
119dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention * attn)120dd4f32aeSBjoern A. Zeeb static inline bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention *attn)
121dd4f32aeSBjoern A. Zeeb {
122dd4f32aeSBjoern A. Zeeb return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE,
123dd4f32aeSBjoern A. Zeeb __le32_to_cpu(attn->info2)) ==
124dd4f32aeSBjoern A. Zeeb RX_DESC_DECRYPT_STATUS_CODE_OK);
125dd4f32aeSBjoern A. Zeeb }
126dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_attn_mpdu_err(struct rx_attention * attn)127dd4f32aeSBjoern A. Zeeb static u32 ath11k_dp_rx_h_attn_mpdu_err(struct rx_attention *attn)
128dd4f32aeSBjoern A. Zeeb {
129dd4f32aeSBjoern A. Zeeb u32 info = __le32_to_cpu(attn->info1);
130dd4f32aeSBjoern A. Zeeb u32 errmap = 0;
131dd4f32aeSBjoern A. Zeeb
132dd4f32aeSBjoern A. Zeeb if (info & RX_ATTENTION_INFO1_FCS_ERR)
133dd4f32aeSBjoern A. Zeeb errmap |= DP_RX_MPDU_ERR_FCS;
134dd4f32aeSBjoern A. Zeeb
135dd4f32aeSBjoern A. Zeeb if (info & RX_ATTENTION_INFO1_DECRYPT_ERR)
136dd4f32aeSBjoern A. Zeeb errmap |= DP_RX_MPDU_ERR_DECRYPT;
137dd4f32aeSBjoern A. Zeeb
138dd4f32aeSBjoern A. Zeeb if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR)
139dd4f32aeSBjoern A. Zeeb errmap |= DP_RX_MPDU_ERR_TKIP_MIC;
140dd4f32aeSBjoern A. Zeeb
141dd4f32aeSBjoern A. Zeeb if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR)
142dd4f32aeSBjoern A. Zeeb errmap |= DP_RX_MPDU_ERR_AMSDU_ERR;
143dd4f32aeSBjoern A. Zeeb
144dd4f32aeSBjoern A. Zeeb if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR)
145dd4f32aeSBjoern A. Zeeb errmap |= DP_RX_MPDU_ERR_OVERFLOW;
146dd4f32aeSBjoern A. Zeeb
147dd4f32aeSBjoern A. Zeeb if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR)
148dd4f32aeSBjoern A. Zeeb errmap |= DP_RX_MPDU_ERR_MSDU_LEN;
149dd4f32aeSBjoern A. Zeeb
150dd4f32aeSBjoern A. Zeeb if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR)
151dd4f32aeSBjoern A. Zeeb errmap |= DP_RX_MPDU_ERR_MPDU_LEN;
152dd4f32aeSBjoern A. Zeeb
153dd4f32aeSBjoern A. Zeeb return errmap;
154dd4f32aeSBjoern A. Zeeb }
155dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_attn_msdu_len_err(struct ath11k_base * ab,struct hal_rx_desc * desc)156dd4f32aeSBjoern A. Zeeb static bool ath11k_dp_rx_h_attn_msdu_len_err(struct ath11k_base *ab,
157dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *desc)
158dd4f32aeSBjoern A. Zeeb {
159dd4f32aeSBjoern A. Zeeb struct rx_attention *rx_attention;
160dd4f32aeSBjoern A. Zeeb u32 errmap;
161dd4f32aeSBjoern A. Zeeb
162dd4f32aeSBjoern A. Zeeb rx_attention = ath11k_dp_rx_get_attention(ab, desc);
163dd4f32aeSBjoern A. Zeeb errmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
164dd4f32aeSBjoern A. Zeeb
165dd4f32aeSBjoern A. Zeeb return errmap & DP_RX_MPDU_ERR_MSDU_LEN;
166dd4f32aeSBjoern A. Zeeb }
167dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base * ab,struct hal_rx_desc * desc)168dd4f32aeSBjoern A. Zeeb static inline u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab,
169dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *desc)
170dd4f32aeSBjoern A. Zeeb {
171dd4f32aeSBjoern A. Zeeb return ab->hw_params.hw_ops->rx_desc_get_msdu_len(desc);
172dd4f32aeSBjoern A. Zeeb }
173dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base * ab,struct hal_rx_desc * desc)174dd4f32aeSBjoern A. Zeeb static inline u8 ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base *ab,
175dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *desc)
176dd4f32aeSBjoern A. Zeeb {
177dd4f32aeSBjoern A. Zeeb return ab->hw_params.hw_ops->rx_desc_get_msdu_sgi(desc);
178dd4f32aeSBjoern A. Zeeb }
179dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base * ab,struct hal_rx_desc * desc)180dd4f32aeSBjoern A. Zeeb static inline u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base *ab,
181dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *desc)
182dd4f32aeSBjoern A. Zeeb {
183dd4f32aeSBjoern A. Zeeb return ab->hw_params.hw_ops->rx_desc_get_msdu_rate_mcs(desc);
184dd4f32aeSBjoern A. Zeeb }
185dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base * ab,struct hal_rx_desc * desc)186dd4f32aeSBjoern A. Zeeb static inline u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base *ab,
187dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *desc)
188dd4f32aeSBjoern A. Zeeb {
189dd4f32aeSBjoern A. Zeeb return ab->hw_params.hw_ops->rx_desc_get_msdu_rx_bw(desc);
190dd4f32aeSBjoern A. Zeeb }
191dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base * ab,struct hal_rx_desc * desc)192dd4f32aeSBjoern A. Zeeb static inline u32 ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base *ab,
193dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *desc)
194dd4f32aeSBjoern A. Zeeb {
195dd4f32aeSBjoern A. Zeeb return ab->hw_params.hw_ops->rx_desc_get_msdu_freq(desc);
196dd4f32aeSBjoern A. Zeeb }
197dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base * ab,struct hal_rx_desc * desc)198dd4f32aeSBjoern A. Zeeb static inline u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base *ab,
199dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *desc)
200dd4f32aeSBjoern A. Zeeb {
201dd4f32aeSBjoern A. Zeeb return ab->hw_params.hw_ops->rx_desc_get_msdu_pkt_type(desc);
202dd4f32aeSBjoern A. Zeeb }
203dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base * ab,struct hal_rx_desc * desc)204dd4f32aeSBjoern A. Zeeb static inline u8 ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base *ab,
205dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *desc)
206dd4f32aeSBjoern A. Zeeb {
207dd4f32aeSBjoern A. Zeeb return hweight8(ab->hw_params.hw_ops->rx_desc_get_msdu_nss(desc));
208dd4f32aeSBjoern A. Zeeb }
209dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base * ab,struct hal_rx_desc * desc)210dd4f32aeSBjoern A. Zeeb static inline u8 ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base *ab,
211dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *desc)
212dd4f32aeSBjoern A. Zeeb {
213dd4f32aeSBjoern A. Zeeb return ab->hw_params.hw_ops->rx_desc_get_mpdu_tid(desc);
214dd4f32aeSBjoern A. Zeeb }
215dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base * ab,struct hal_rx_desc * desc)216dd4f32aeSBjoern A. Zeeb static inline u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base *ab,
217dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *desc)
218dd4f32aeSBjoern A. Zeeb {
219dd4f32aeSBjoern A. Zeeb return ab->hw_params.hw_ops->rx_desc_get_mpdu_peer_id(desc);
220dd4f32aeSBjoern A. Zeeb }
221dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base * ab,struct hal_rx_desc * desc)222dd4f32aeSBjoern A. Zeeb static inline u8 ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base *ab,
223dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *desc)
224dd4f32aeSBjoern A. Zeeb {
225dd4f32aeSBjoern A. Zeeb return ab->hw_params.hw_ops->rx_desc_get_l3_pad_bytes(desc);
226dd4f32aeSBjoern A. Zeeb }
227dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base * ab,struct hal_rx_desc * desc)228dd4f32aeSBjoern A. Zeeb static inline bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base *ab,
229dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *desc)
230dd4f32aeSBjoern A. Zeeb {
231dd4f32aeSBjoern A. Zeeb return ab->hw_params.hw_ops->rx_desc_get_first_msdu(desc);
232dd4f32aeSBjoern A. Zeeb }
233dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_msdu_end_last_msdu(struct ath11k_base * ab,struct hal_rx_desc * desc)234dd4f32aeSBjoern A. Zeeb static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct ath11k_base *ab,
235dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *desc)
236dd4f32aeSBjoern A. Zeeb {
237dd4f32aeSBjoern A. Zeeb return ab->hw_params.hw_ops->rx_desc_get_last_msdu(desc);
238dd4f32aeSBjoern A. Zeeb }
239dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_desc_end_tlv_copy(struct ath11k_base * ab,struct hal_rx_desc * fdesc,struct hal_rx_desc * ldesc)240dd4f32aeSBjoern A. Zeeb static void ath11k_dp_rx_desc_end_tlv_copy(struct ath11k_base *ab,
241dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *fdesc,
242dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *ldesc)
243dd4f32aeSBjoern A. Zeeb {
244dd4f32aeSBjoern A. Zeeb ab->hw_params.hw_ops->rx_desc_copy_attn_end_tlv(fdesc, ldesc);
245dd4f32aeSBjoern A. Zeeb }
246dd4f32aeSBjoern A. Zeeb
ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention * attn)247dd4f32aeSBjoern A. Zeeb static inline u32 ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention *attn)
248dd4f32aeSBjoern A. Zeeb {
249dd4f32aeSBjoern A. Zeeb return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR,
250dd4f32aeSBjoern A. Zeeb __le32_to_cpu(attn->info1));
251dd4f32aeSBjoern A. Zeeb }
252dd4f32aeSBjoern A. Zeeb
ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base * ab,struct hal_rx_desc * rx_desc)253dd4f32aeSBjoern A. Zeeb static inline u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab,
254dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *rx_desc)
255dd4f32aeSBjoern A. Zeeb {
256dd4f32aeSBjoern A. Zeeb u8 *rx_pkt_hdr;
257dd4f32aeSBjoern A. Zeeb
258dd4f32aeSBjoern A. Zeeb rx_pkt_hdr = ab->hw_params.hw_ops->rx_desc_get_msdu_payload(rx_desc);
259dd4f32aeSBjoern A. Zeeb
260dd4f32aeSBjoern A. Zeeb return rx_pkt_hdr;
261dd4f32aeSBjoern A. Zeeb }
262dd4f32aeSBjoern A. Zeeb
ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base * ab,struct hal_rx_desc * rx_desc)263dd4f32aeSBjoern A. Zeeb static inline bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab,
264dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *rx_desc)
265dd4f32aeSBjoern A. Zeeb {
266dd4f32aeSBjoern A. Zeeb u32 tlv_tag;
267dd4f32aeSBjoern A. Zeeb
268dd4f32aeSBjoern A. Zeeb tlv_tag = ab->hw_params.hw_ops->rx_desc_get_mpdu_start_tag(rx_desc);
269dd4f32aeSBjoern A. Zeeb
270dd4f32aeSBjoern A. Zeeb return tlv_tag == HAL_RX_MPDU_START;
271dd4f32aeSBjoern A. Zeeb }
272dd4f32aeSBjoern A. Zeeb
ath11k_dp_rxdesc_get_ppduid(struct ath11k_base * ab,struct hal_rx_desc * rx_desc)273dd4f32aeSBjoern A. Zeeb static inline u32 ath11k_dp_rxdesc_get_ppduid(struct ath11k_base *ab,
274dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *rx_desc)
275dd4f32aeSBjoern A. Zeeb {
276dd4f32aeSBjoern A. Zeeb return ab->hw_params.hw_ops->rx_desc_get_mpdu_ppdu_id(rx_desc);
277dd4f32aeSBjoern A. Zeeb }
278dd4f32aeSBjoern A. Zeeb
ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base * ab,struct hal_rx_desc * desc,u16 len)279dd4f32aeSBjoern A. Zeeb static inline void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab,
280dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *desc,
281dd4f32aeSBjoern A. Zeeb u16 len)
282dd4f32aeSBjoern A. Zeeb {
283dd4f32aeSBjoern A. Zeeb ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len);
284dd4f32aeSBjoern A. Zeeb }
285dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_attn_is_mcbc(struct ath11k_base * ab,struct hal_rx_desc * desc)286dd4f32aeSBjoern A. Zeeb static bool ath11k_dp_rx_h_attn_is_mcbc(struct ath11k_base *ab,
287dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *desc)
288dd4f32aeSBjoern A. Zeeb {
289dd4f32aeSBjoern A. Zeeb struct rx_attention *attn = ath11k_dp_rx_get_attention(ab, desc);
290dd4f32aeSBjoern A. Zeeb
291dd4f32aeSBjoern A. Zeeb return ath11k_dp_rx_h_msdu_end_first_msdu(ab, desc) &&
292dd4f32aeSBjoern A. Zeeb (!!FIELD_GET(RX_ATTENTION_INFO1_MCAST_BCAST,
293dd4f32aeSBjoern A. Zeeb __le32_to_cpu(attn->info1)));
294dd4f32aeSBjoern A. Zeeb }
295dd4f32aeSBjoern A. Zeeb
ath11k_dp_rxdesc_mac_addr2_valid(struct ath11k_base * ab,struct hal_rx_desc * desc)296dd4f32aeSBjoern A. Zeeb static bool ath11k_dp_rxdesc_mac_addr2_valid(struct ath11k_base *ab,
297dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *desc)
298dd4f32aeSBjoern A. Zeeb {
299dd4f32aeSBjoern A. Zeeb return ab->hw_params.hw_ops->rx_desc_mac_addr2_valid(desc);
300dd4f32aeSBjoern A. Zeeb }
301dd4f32aeSBjoern A. Zeeb
ath11k_dp_rxdesc_mpdu_start_addr2(struct ath11k_base * ab,struct hal_rx_desc * desc)302dd4f32aeSBjoern A. Zeeb static u8 *ath11k_dp_rxdesc_mpdu_start_addr2(struct ath11k_base *ab,
303dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *desc)
304dd4f32aeSBjoern A. Zeeb {
305dd4f32aeSBjoern A. Zeeb return ab->hw_params.hw_ops->rx_desc_mpdu_start_addr2(desc);
306dd4f32aeSBjoern A. Zeeb }
307dd4f32aeSBjoern A. Zeeb
ath11k_dp_service_mon_ring(struct timer_list * t)308dd4f32aeSBjoern A. Zeeb static void ath11k_dp_service_mon_ring(struct timer_list *t)
309dd4f32aeSBjoern A. Zeeb {
310dd4f32aeSBjoern A. Zeeb struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer);
311dd4f32aeSBjoern A. Zeeb int i;
312dd4f32aeSBjoern A. Zeeb
313dd4f32aeSBjoern A. Zeeb for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++)
314dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET);
315dd4f32aeSBjoern A. Zeeb
316dd4f32aeSBjoern A. Zeeb mod_timer(&ab->mon_reap_timer, jiffies +
317dd4f32aeSBjoern A. Zeeb msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
318dd4f32aeSBjoern A. Zeeb }
319dd4f32aeSBjoern A. Zeeb
ath11k_dp_purge_mon_ring(struct ath11k_base * ab)320dd4f32aeSBjoern A. Zeeb static int ath11k_dp_purge_mon_ring(struct ath11k_base *ab)
321dd4f32aeSBjoern A. Zeeb {
322dd4f32aeSBjoern A. Zeeb int i, reaped = 0;
323dd4f32aeSBjoern A. Zeeb unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS);
324dd4f32aeSBjoern A. Zeeb
325dd4f32aeSBjoern A. Zeeb do {
326dd4f32aeSBjoern A. Zeeb for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++)
327dd4f32aeSBjoern A. Zeeb reaped += ath11k_dp_rx_process_mon_rings(ab, i,
328dd4f32aeSBjoern A. Zeeb NULL,
329dd4f32aeSBjoern A. Zeeb DP_MON_SERVICE_BUDGET);
330dd4f32aeSBjoern A. Zeeb
331dd4f32aeSBjoern A. Zeeb /* nothing more to reap */
332dd4f32aeSBjoern A. Zeeb if (reaped < DP_MON_SERVICE_BUDGET)
333dd4f32aeSBjoern A. Zeeb return 0;
334dd4f32aeSBjoern A. Zeeb
335dd4f32aeSBjoern A. Zeeb } while (time_before(jiffies, timeout));
336dd4f32aeSBjoern A. Zeeb
337dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "dp mon ring purge timeout");
338dd4f32aeSBjoern A. Zeeb
339dd4f32aeSBjoern A. Zeeb return -ETIMEDOUT;
340dd4f32aeSBjoern A. Zeeb }
341dd4f32aeSBjoern A. Zeeb
342dd4f32aeSBjoern A. Zeeb /* Returns number of Rx buffers replenished */
ath11k_dp_rxbufs_replenish(struct ath11k_base * ab,int mac_id,struct dp_rxdma_ring * rx_ring,int req_entries,enum hal_rx_buf_return_buf_manager mgr)343dd4f32aeSBjoern A. Zeeb int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
344dd4f32aeSBjoern A. Zeeb struct dp_rxdma_ring *rx_ring,
345dd4f32aeSBjoern A. Zeeb int req_entries,
346dd4f32aeSBjoern A. Zeeb enum hal_rx_buf_return_buf_manager mgr)
347dd4f32aeSBjoern A. Zeeb {
348dd4f32aeSBjoern A. Zeeb struct hal_srng *srng;
349dd4f32aeSBjoern A. Zeeb u32 *desc;
350dd4f32aeSBjoern A. Zeeb struct sk_buff *skb;
351dd4f32aeSBjoern A. Zeeb int num_free;
352dd4f32aeSBjoern A. Zeeb int num_remain;
353dd4f32aeSBjoern A. Zeeb int buf_id;
354dd4f32aeSBjoern A. Zeeb u32 cookie;
355dd4f32aeSBjoern A. Zeeb dma_addr_t paddr;
356dd4f32aeSBjoern A. Zeeb
357dd4f32aeSBjoern A. Zeeb req_entries = min(req_entries, rx_ring->bufs_max);
358dd4f32aeSBjoern A. Zeeb
359dd4f32aeSBjoern A. Zeeb srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
360dd4f32aeSBjoern A. Zeeb
361dd4f32aeSBjoern A. Zeeb spin_lock_bh(&srng->lock);
362dd4f32aeSBjoern A. Zeeb
363dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_begin(ab, srng);
364dd4f32aeSBjoern A. Zeeb
365dd4f32aeSBjoern A. Zeeb num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
366dd4f32aeSBjoern A. Zeeb if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
367dd4f32aeSBjoern A. Zeeb req_entries = num_free;
368dd4f32aeSBjoern A. Zeeb
369dd4f32aeSBjoern A. Zeeb req_entries = min(num_free, req_entries);
370dd4f32aeSBjoern A. Zeeb num_remain = req_entries;
371dd4f32aeSBjoern A. Zeeb
372dd4f32aeSBjoern A. Zeeb while (num_remain > 0) {
373dd4f32aeSBjoern A. Zeeb skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
374dd4f32aeSBjoern A. Zeeb DP_RX_BUFFER_ALIGN_SIZE);
375dd4f32aeSBjoern A. Zeeb if (!skb)
376dd4f32aeSBjoern A. Zeeb break;
377dd4f32aeSBjoern A. Zeeb
378dd4f32aeSBjoern A. Zeeb if (!IS_ALIGNED((unsigned long)skb->data,
379dd4f32aeSBjoern A. Zeeb DP_RX_BUFFER_ALIGN_SIZE)) {
380dd4f32aeSBjoern A. Zeeb skb_pull(skb,
381dd4f32aeSBjoern A. Zeeb PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
382dd4f32aeSBjoern A. Zeeb skb->data);
383dd4f32aeSBjoern A. Zeeb }
384dd4f32aeSBjoern A. Zeeb
385dd4f32aeSBjoern A. Zeeb paddr = dma_map_single(ab->dev, skb->data,
386dd4f32aeSBjoern A. Zeeb skb->len + skb_tailroom(skb),
387dd4f32aeSBjoern A. Zeeb DMA_FROM_DEVICE);
388dd4f32aeSBjoern A. Zeeb if (dma_mapping_error(ab->dev, paddr))
389dd4f32aeSBjoern A. Zeeb goto fail_free_skb;
390dd4f32aeSBjoern A. Zeeb
391dd4f32aeSBjoern A. Zeeb spin_lock_bh(&rx_ring->idr_lock);
392*28348caeSBjoern A. Zeeb buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 1,
393*28348caeSBjoern A. Zeeb (rx_ring->bufs_max * 3) + 1, GFP_ATOMIC);
394dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&rx_ring->idr_lock);
395*28348caeSBjoern A. Zeeb if (buf_id <= 0)
396dd4f32aeSBjoern A. Zeeb goto fail_dma_unmap;
397dd4f32aeSBjoern A. Zeeb
398dd4f32aeSBjoern A. Zeeb desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
399dd4f32aeSBjoern A. Zeeb if (!desc)
400dd4f32aeSBjoern A. Zeeb goto fail_idr_remove;
401dd4f32aeSBjoern A. Zeeb
402dd4f32aeSBjoern A. Zeeb ATH11K_SKB_RXCB(skb)->paddr = paddr;
403dd4f32aeSBjoern A. Zeeb
404dd4f32aeSBjoern A. Zeeb cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
405dd4f32aeSBjoern A. Zeeb FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
406dd4f32aeSBjoern A. Zeeb
407dd4f32aeSBjoern A. Zeeb num_remain--;
408dd4f32aeSBjoern A. Zeeb
409dd4f32aeSBjoern A. Zeeb ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
410dd4f32aeSBjoern A. Zeeb }
411dd4f32aeSBjoern A. Zeeb
412dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_end(ab, srng);
413dd4f32aeSBjoern A. Zeeb
414dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&srng->lock);
415dd4f32aeSBjoern A. Zeeb
416dd4f32aeSBjoern A. Zeeb return req_entries - num_remain;
417dd4f32aeSBjoern A. Zeeb
418dd4f32aeSBjoern A. Zeeb fail_idr_remove:
419dd4f32aeSBjoern A. Zeeb spin_lock_bh(&rx_ring->idr_lock);
420dd4f32aeSBjoern A. Zeeb idr_remove(&rx_ring->bufs_idr, buf_id);
421dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&rx_ring->idr_lock);
422dd4f32aeSBjoern A. Zeeb fail_dma_unmap:
423dd4f32aeSBjoern A. Zeeb dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
424dd4f32aeSBjoern A. Zeeb DMA_FROM_DEVICE);
425dd4f32aeSBjoern A. Zeeb fail_free_skb:
426dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(skb);
427dd4f32aeSBjoern A. Zeeb
428dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_end(ab, srng);
429dd4f32aeSBjoern A. Zeeb
430dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&srng->lock);
431dd4f32aeSBjoern A. Zeeb
432dd4f32aeSBjoern A. Zeeb return req_entries - num_remain;
433dd4f32aeSBjoern A. Zeeb }
434dd4f32aeSBjoern A. Zeeb
ath11k_dp_rxdma_buf_ring_free(struct ath11k * ar,struct dp_rxdma_ring * rx_ring)435dd4f32aeSBjoern A. Zeeb static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar,
436dd4f32aeSBjoern A. Zeeb struct dp_rxdma_ring *rx_ring)
437dd4f32aeSBjoern A. Zeeb {
438dd4f32aeSBjoern A. Zeeb struct sk_buff *skb;
439dd4f32aeSBjoern A. Zeeb int buf_id;
440dd4f32aeSBjoern A. Zeeb
441dd4f32aeSBjoern A. Zeeb spin_lock_bh(&rx_ring->idr_lock);
442dd4f32aeSBjoern A. Zeeb idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
443dd4f32aeSBjoern A. Zeeb idr_remove(&rx_ring->bufs_idr, buf_id);
444dd4f32aeSBjoern A. Zeeb /* TODO: Understand where internal driver does this dma_unmap
445dd4f32aeSBjoern A. Zeeb * of rxdma_buffer.
446dd4f32aeSBjoern A. Zeeb */
447dd4f32aeSBjoern A. Zeeb dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
448dd4f32aeSBjoern A. Zeeb skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
449dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(skb);
450dd4f32aeSBjoern A. Zeeb }
451dd4f32aeSBjoern A. Zeeb
452dd4f32aeSBjoern A. Zeeb idr_destroy(&rx_ring->bufs_idr);
453dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&rx_ring->idr_lock);
454dd4f32aeSBjoern A. Zeeb
455dd4f32aeSBjoern A. Zeeb return 0;
456dd4f32aeSBjoern A. Zeeb }
457dd4f32aeSBjoern A. Zeeb
ath11k_dp_rxdma_pdev_buf_free(struct ath11k * ar)458dd4f32aeSBjoern A. Zeeb static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar)
459dd4f32aeSBjoern A. Zeeb {
460dd4f32aeSBjoern A. Zeeb struct ath11k_pdev_dp *dp = &ar->dp;
461dd4f32aeSBjoern A. Zeeb struct ath11k_base *ab = ar->ab;
462dd4f32aeSBjoern A. Zeeb struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
463dd4f32aeSBjoern A. Zeeb int i;
464dd4f32aeSBjoern A. Zeeb
465dd4f32aeSBjoern A. Zeeb ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
466dd4f32aeSBjoern A. Zeeb
467dd4f32aeSBjoern A. Zeeb rx_ring = &dp->rxdma_mon_buf_ring;
468dd4f32aeSBjoern A. Zeeb ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
469dd4f32aeSBjoern A. Zeeb
470dd4f32aeSBjoern A. Zeeb for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
471dd4f32aeSBjoern A. Zeeb rx_ring = &dp->rx_mon_status_refill_ring[i];
472dd4f32aeSBjoern A. Zeeb ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
473dd4f32aeSBjoern A. Zeeb }
474dd4f32aeSBjoern A. Zeeb
475dd4f32aeSBjoern A. Zeeb return 0;
476dd4f32aeSBjoern A. Zeeb }
477dd4f32aeSBjoern A. Zeeb
ath11k_dp_rxdma_ring_buf_setup(struct ath11k * ar,struct dp_rxdma_ring * rx_ring,u32 ringtype)478dd4f32aeSBjoern A. Zeeb static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar,
479dd4f32aeSBjoern A. Zeeb struct dp_rxdma_ring *rx_ring,
480dd4f32aeSBjoern A. Zeeb u32 ringtype)
481dd4f32aeSBjoern A. Zeeb {
482dd4f32aeSBjoern A. Zeeb struct ath11k_pdev_dp *dp = &ar->dp;
483dd4f32aeSBjoern A. Zeeb int num_entries;
484dd4f32aeSBjoern A. Zeeb
485dd4f32aeSBjoern A. Zeeb num_entries = rx_ring->refill_buf_ring.size /
486dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_get_entrysize(ar->ab, ringtype);
487dd4f32aeSBjoern A. Zeeb
488dd4f32aeSBjoern A. Zeeb rx_ring->bufs_max = num_entries;
489dd4f32aeSBjoern A. Zeeb ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries,
490dd4f32aeSBjoern A. Zeeb ar->ab->hw_params.hal_params->rx_buf_rbm);
491dd4f32aeSBjoern A. Zeeb return 0;
492dd4f32aeSBjoern A. Zeeb }
493dd4f32aeSBjoern A. Zeeb
ath11k_dp_rxdma_pdev_buf_setup(struct ath11k * ar)494dd4f32aeSBjoern A. Zeeb static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar)
495dd4f32aeSBjoern A. Zeeb {
496dd4f32aeSBjoern A. Zeeb struct ath11k_pdev_dp *dp = &ar->dp;
497dd4f32aeSBjoern A. Zeeb struct ath11k_base *ab = ar->ab;
498dd4f32aeSBjoern A. Zeeb struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
499dd4f32aeSBjoern A. Zeeb int i;
500dd4f32aeSBjoern A. Zeeb
501dd4f32aeSBjoern A. Zeeb ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF);
502dd4f32aeSBjoern A. Zeeb
503dd4f32aeSBjoern A. Zeeb if (ar->ab->hw_params.rxdma1_enable) {
504dd4f32aeSBjoern A. Zeeb rx_ring = &dp->rxdma_mon_buf_ring;
505dd4f32aeSBjoern A. Zeeb ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF);
506dd4f32aeSBjoern A. Zeeb }
507dd4f32aeSBjoern A. Zeeb
508dd4f32aeSBjoern A. Zeeb for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
509dd4f32aeSBjoern A. Zeeb rx_ring = &dp->rx_mon_status_refill_ring[i];
510dd4f32aeSBjoern A. Zeeb ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS);
511dd4f32aeSBjoern A. Zeeb }
512dd4f32aeSBjoern A. Zeeb
513dd4f32aeSBjoern A. Zeeb return 0;
514dd4f32aeSBjoern A. Zeeb }
515dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_pdev_srng_free(struct ath11k * ar)516dd4f32aeSBjoern A. Zeeb static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar)
517dd4f32aeSBjoern A. Zeeb {
518dd4f32aeSBjoern A. Zeeb struct ath11k_pdev_dp *dp = &ar->dp;
519dd4f32aeSBjoern A. Zeeb struct ath11k_base *ab = ar->ab;
520dd4f32aeSBjoern A. Zeeb int i;
521dd4f32aeSBjoern A. Zeeb
522dd4f32aeSBjoern A. Zeeb ath11k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
523dd4f32aeSBjoern A. Zeeb
524dd4f32aeSBjoern A. Zeeb for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
525dd4f32aeSBjoern A. Zeeb if (ab->hw_params.rx_mac_buf_ring)
526dd4f32aeSBjoern A. Zeeb ath11k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
527dd4f32aeSBjoern A. Zeeb
528dd4f32aeSBjoern A. Zeeb ath11k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);
529dd4f32aeSBjoern A. Zeeb ath11k_dp_srng_cleanup(ab,
530dd4f32aeSBjoern A. Zeeb &dp->rx_mon_status_refill_ring[i].refill_buf_ring);
531dd4f32aeSBjoern A. Zeeb }
532dd4f32aeSBjoern A. Zeeb
533dd4f32aeSBjoern A. Zeeb ath11k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
534dd4f32aeSBjoern A. Zeeb }
535dd4f32aeSBjoern A. Zeeb
ath11k_dp_pdev_reo_cleanup(struct ath11k_base * ab)536dd4f32aeSBjoern A. Zeeb void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab)
537dd4f32aeSBjoern A. Zeeb {
538dd4f32aeSBjoern A. Zeeb struct ath11k_dp *dp = &ab->dp;
539dd4f32aeSBjoern A. Zeeb int i;
540dd4f32aeSBjoern A. Zeeb
541dd4f32aeSBjoern A. Zeeb for (i = 0; i < DP_REO_DST_RING_MAX; i++)
542dd4f32aeSBjoern A. Zeeb ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]);
543dd4f32aeSBjoern A. Zeeb }
544dd4f32aeSBjoern A. Zeeb
ath11k_dp_pdev_reo_setup(struct ath11k_base * ab)545dd4f32aeSBjoern A. Zeeb int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab)
546dd4f32aeSBjoern A. Zeeb {
547dd4f32aeSBjoern A. Zeeb struct ath11k_dp *dp = &ab->dp;
548dd4f32aeSBjoern A. Zeeb int ret;
549dd4f32aeSBjoern A. Zeeb int i;
550dd4f32aeSBjoern A. Zeeb
551dd4f32aeSBjoern A. Zeeb for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
552dd4f32aeSBjoern A. Zeeb ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring[i],
553dd4f32aeSBjoern A. Zeeb HAL_REO_DST, i, 0,
554dd4f32aeSBjoern A. Zeeb DP_REO_DST_RING_SIZE);
555dd4f32aeSBjoern A. Zeeb if (ret) {
556dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "failed to setup reo_dst_ring\n");
557dd4f32aeSBjoern A. Zeeb goto err_reo_cleanup;
558dd4f32aeSBjoern A. Zeeb }
559dd4f32aeSBjoern A. Zeeb }
560dd4f32aeSBjoern A. Zeeb
561dd4f32aeSBjoern A. Zeeb return 0;
562dd4f32aeSBjoern A. Zeeb
563dd4f32aeSBjoern A. Zeeb err_reo_cleanup:
564dd4f32aeSBjoern A. Zeeb ath11k_dp_pdev_reo_cleanup(ab);
565dd4f32aeSBjoern A. Zeeb
566dd4f32aeSBjoern A. Zeeb return ret;
567dd4f32aeSBjoern A. Zeeb }
568dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_pdev_srng_alloc(struct ath11k * ar)569dd4f32aeSBjoern A. Zeeb static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
570dd4f32aeSBjoern A. Zeeb {
571dd4f32aeSBjoern A. Zeeb struct ath11k_pdev_dp *dp = &ar->dp;
572dd4f32aeSBjoern A. Zeeb struct ath11k_base *ab = ar->ab;
573dd4f32aeSBjoern A. Zeeb struct dp_srng *srng = NULL;
574dd4f32aeSBjoern A. Zeeb int i;
575dd4f32aeSBjoern A. Zeeb int ret;
576dd4f32aeSBjoern A. Zeeb
577dd4f32aeSBjoern A. Zeeb ret = ath11k_dp_srng_setup(ar->ab,
578dd4f32aeSBjoern A. Zeeb &dp->rx_refill_buf_ring.refill_buf_ring,
579dd4f32aeSBjoern A. Zeeb HAL_RXDMA_BUF, 0,
580dd4f32aeSBjoern A. Zeeb dp->mac_id, DP_RXDMA_BUF_RING_SIZE);
581dd4f32aeSBjoern A. Zeeb if (ret) {
582dd4f32aeSBjoern A. Zeeb ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n");
583dd4f32aeSBjoern A. Zeeb return ret;
584dd4f32aeSBjoern A. Zeeb }
585dd4f32aeSBjoern A. Zeeb
586dd4f32aeSBjoern A. Zeeb if (ar->ab->hw_params.rx_mac_buf_ring) {
587dd4f32aeSBjoern A. Zeeb for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
588dd4f32aeSBjoern A. Zeeb ret = ath11k_dp_srng_setup(ar->ab,
589dd4f32aeSBjoern A. Zeeb &dp->rx_mac_buf_ring[i],
590dd4f32aeSBjoern A. Zeeb HAL_RXDMA_BUF, 1,
591dd4f32aeSBjoern A. Zeeb dp->mac_id + i, 1024);
592dd4f32aeSBjoern A. Zeeb if (ret) {
593dd4f32aeSBjoern A. Zeeb ath11k_warn(ar->ab, "failed to setup rx_mac_buf_ring %d\n",
594dd4f32aeSBjoern A. Zeeb i);
595dd4f32aeSBjoern A. Zeeb return ret;
596dd4f32aeSBjoern A. Zeeb }
597dd4f32aeSBjoern A. Zeeb }
598dd4f32aeSBjoern A. Zeeb }
599dd4f32aeSBjoern A. Zeeb
600dd4f32aeSBjoern A. Zeeb for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
601dd4f32aeSBjoern A. Zeeb ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring[i],
602dd4f32aeSBjoern A. Zeeb HAL_RXDMA_DST, 0, dp->mac_id + i,
603dd4f32aeSBjoern A. Zeeb DP_RXDMA_ERR_DST_RING_SIZE);
604dd4f32aeSBjoern A. Zeeb if (ret) {
605dd4f32aeSBjoern A. Zeeb ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring %d\n", i);
606dd4f32aeSBjoern A. Zeeb return ret;
607dd4f32aeSBjoern A. Zeeb }
608dd4f32aeSBjoern A. Zeeb }
609dd4f32aeSBjoern A. Zeeb
610dd4f32aeSBjoern A. Zeeb for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
611dd4f32aeSBjoern A. Zeeb srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
612dd4f32aeSBjoern A. Zeeb ret = ath11k_dp_srng_setup(ar->ab,
613dd4f32aeSBjoern A. Zeeb srng,
614dd4f32aeSBjoern A. Zeeb HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id + i,
615dd4f32aeSBjoern A. Zeeb DP_RXDMA_MON_STATUS_RING_SIZE);
616dd4f32aeSBjoern A. Zeeb if (ret) {
617dd4f32aeSBjoern A. Zeeb ath11k_warn(ar->ab,
618dd4f32aeSBjoern A. Zeeb "failed to setup rx_mon_status_refill_ring %d\n", i);
619dd4f32aeSBjoern A. Zeeb return ret;
620dd4f32aeSBjoern A. Zeeb }
621dd4f32aeSBjoern A. Zeeb }
622dd4f32aeSBjoern A. Zeeb
623dd4f32aeSBjoern A. Zeeb /* if rxdma1_enable is false, then it doesn't need
624dd4f32aeSBjoern A. Zeeb * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring
625dd4f32aeSBjoern A. Zeeb * and rxdma_mon_desc_ring.
626dd4f32aeSBjoern A. Zeeb * init reap timer for QCA6390.
627dd4f32aeSBjoern A. Zeeb */
628dd4f32aeSBjoern A. Zeeb if (!ar->ab->hw_params.rxdma1_enable) {
629dd4f32aeSBjoern A. Zeeb //init mon status buffer reap timer
630dd4f32aeSBjoern A. Zeeb timer_setup(&ar->ab->mon_reap_timer,
631dd4f32aeSBjoern A. Zeeb ath11k_dp_service_mon_ring, 0);
632dd4f32aeSBjoern A. Zeeb return 0;
633dd4f32aeSBjoern A. Zeeb }
634dd4f32aeSBjoern A. Zeeb
635dd4f32aeSBjoern A. Zeeb ret = ath11k_dp_srng_setup(ar->ab,
636dd4f32aeSBjoern A. Zeeb &dp->rxdma_mon_buf_ring.refill_buf_ring,
637dd4f32aeSBjoern A. Zeeb HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id,
638dd4f32aeSBjoern A. Zeeb DP_RXDMA_MONITOR_BUF_RING_SIZE);
639dd4f32aeSBjoern A. Zeeb if (ret) {
640dd4f32aeSBjoern A. Zeeb ath11k_warn(ar->ab,
641dd4f32aeSBjoern A. Zeeb "failed to setup HAL_RXDMA_MONITOR_BUF\n");
642dd4f32aeSBjoern A. Zeeb return ret;
643dd4f32aeSBjoern A. Zeeb }
644dd4f32aeSBjoern A. Zeeb
645dd4f32aeSBjoern A. Zeeb ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring,
646dd4f32aeSBjoern A. Zeeb HAL_RXDMA_MONITOR_DST, 0, dp->mac_id,
647dd4f32aeSBjoern A. Zeeb DP_RXDMA_MONITOR_DST_RING_SIZE);
648dd4f32aeSBjoern A. Zeeb if (ret) {
649dd4f32aeSBjoern A. Zeeb ath11k_warn(ar->ab,
650dd4f32aeSBjoern A. Zeeb "failed to setup HAL_RXDMA_MONITOR_DST\n");
651dd4f32aeSBjoern A. Zeeb return ret;
652dd4f32aeSBjoern A. Zeeb }
653dd4f32aeSBjoern A. Zeeb
654dd4f32aeSBjoern A. Zeeb ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring,
655dd4f32aeSBjoern A. Zeeb HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id,
656dd4f32aeSBjoern A. Zeeb DP_RXDMA_MONITOR_DESC_RING_SIZE);
657dd4f32aeSBjoern A. Zeeb if (ret) {
658dd4f32aeSBjoern A. Zeeb ath11k_warn(ar->ab,
659dd4f32aeSBjoern A. Zeeb "failed to setup HAL_RXDMA_MONITOR_DESC\n");
660dd4f32aeSBjoern A. Zeeb return ret;
661dd4f32aeSBjoern A. Zeeb }
662dd4f32aeSBjoern A. Zeeb
663dd4f32aeSBjoern A. Zeeb return 0;
664dd4f32aeSBjoern A. Zeeb }
665dd4f32aeSBjoern A. Zeeb
ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base * ab)666dd4f32aeSBjoern A. Zeeb void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
667dd4f32aeSBjoern A. Zeeb {
668dd4f32aeSBjoern A. Zeeb struct ath11k_dp *dp = &ab->dp;
669dd4f32aeSBjoern A. Zeeb struct dp_reo_cmd *cmd, *tmp;
670dd4f32aeSBjoern A. Zeeb struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache;
671*28348caeSBjoern A. Zeeb struct dp_rx_tid *rx_tid;
672dd4f32aeSBjoern A. Zeeb
673dd4f32aeSBjoern A. Zeeb spin_lock_bh(&dp->reo_cmd_lock);
674dd4f32aeSBjoern A. Zeeb list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
675dd4f32aeSBjoern A. Zeeb list_del(&cmd->list);
676*28348caeSBjoern A. Zeeb rx_tid = &cmd->data;
677*28348caeSBjoern A. Zeeb if (rx_tid->vaddr) {
678*28348caeSBjoern A. Zeeb dma_unmap_single(ab->dev, rx_tid->paddr,
679*28348caeSBjoern A. Zeeb rx_tid->size, DMA_BIDIRECTIONAL);
680*28348caeSBjoern A. Zeeb kfree(rx_tid->vaddr);
681*28348caeSBjoern A. Zeeb rx_tid->vaddr = NULL;
682*28348caeSBjoern A. Zeeb }
683dd4f32aeSBjoern A. Zeeb kfree(cmd);
684dd4f32aeSBjoern A. Zeeb }
685dd4f32aeSBjoern A. Zeeb
686dd4f32aeSBjoern A. Zeeb list_for_each_entry_safe(cmd_cache, tmp_cache,
687dd4f32aeSBjoern A. Zeeb &dp->reo_cmd_cache_flush_list, list) {
688dd4f32aeSBjoern A. Zeeb list_del(&cmd_cache->list);
689dd4f32aeSBjoern A. Zeeb dp->reo_cmd_cache_flush_count--;
690*28348caeSBjoern A. Zeeb rx_tid = &cmd_cache->data;
691*28348caeSBjoern A. Zeeb if (rx_tid->vaddr) {
692*28348caeSBjoern A. Zeeb dma_unmap_single(ab->dev, rx_tid->paddr,
693*28348caeSBjoern A. Zeeb rx_tid->size, DMA_BIDIRECTIONAL);
694*28348caeSBjoern A. Zeeb kfree(rx_tid->vaddr);
695*28348caeSBjoern A. Zeeb rx_tid->vaddr = NULL;
696*28348caeSBjoern A. Zeeb }
697dd4f32aeSBjoern A. Zeeb kfree(cmd_cache);
698dd4f32aeSBjoern A. Zeeb }
699dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&dp->reo_cmd_lock);
700dd4f32aeSBjoern A. Zeeb }
701dd4f32aeSBjoern A. Zeeb
ath11k_dp_reo_cmd_free(struct ath11k_dp * dp,void * ctx,enum hal_reo_cmd_status status)702dd4f32aeSBjoern A. Zeeb static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx,
703dd4f32aeSBjoern A. Zeeb enum hal_reo_cmd_status status)
704dd4f32aeSBjoern A. Zeeb {
705dd4f32aeSBjoern A. Zeeb struct dp_rx_tid *rx_tid = ctx;
706dd4f32aeSBjoern A. Zeeb
707dd4f32aeSBjoern A. Zeeb if (status != HAL_REO_CMD_SUCCESS)
708dd4f32aeSBjoern A. Zeeb ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
709dd4f32aeSBjoern A. Zeeb rx_tid->tid, status);
710*28348caeSBjoern A. Zeeb if (rx_tid->vaddr) {
711dd4f32aeSBjoern A. Zeeb dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
712dd4f32aeSBjoern A. Zeeb DMA_BIDIRECTIONAL);
713dd4f32aeSBjoern A. Zeeb kfree(rx_tid->vaddr);
714*28348caeSBjoern A. Zeeb rx_tid->vaddr = NULL;
715*28348caeSBjoern A. Zeeb }
716dd4f32aeSBjoern A. Zeeb }
717dd4f32aeSBjoern A. Zeeb
ath11k_dp_reo_cache_flush(struct ath11k_base * ab,struct dp_rx_tid * rx_tid)718dd4f32aeSBjoern A. Zeeb static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab,
719dd4f32aeSBjoern A. Zeeb struct dp_rx_tid *rx_tid)
720dd4f32aeSBjoern A. Zeeb {
721dd4f32aeSBjoern A. Zeeb struct ath11k_hal_reo_cmd cmd = {0};
722dd4f32aeSBjoern A. Zeeb unsigned long tot_desc_sz, desc_sz;
723dd4f32aeSBjoern A. Zeeb int ret;
724dd4f32aeSBjoern A. Zeeb
725dd4f32aeSBjoern A. Zeeb tot_desc_sz = rx_tid->size;
726dd4f32aeSBjoern A. Zeeb desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
727dd4f32aeSBjoern A. Zeeb
728dd4f32aeSBjoern A. Zeeb while (tot_desc_sz > desc_sz) {
729dd4f32aeSBjoern A. Zeeb tot_desc_sz -= desc_sz;
730dd4f32aeSBjoern A. Zeeb cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
731dd4f32aeSBjoern A. Zeeb cmd.addr_hi = upper_32_bits(rx_tid->paddr);
732dd4f32aeSBjoern A. Zeeb ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
733dd4f32aeSBjoern A. Zeeb HAL_REO_CMD_FLUSH_CACHE, &cmd,
734dd4f32aeSBjoern A. Zeeb NULL);
735dd4f32aeSBjoern A. Zeeb if (ret)
736dd4f32aeSBjoern A. Zeeb ath11k_warn(ab,
737dd4f32aeSBjoern A. Zeeb "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
738dd4f32aeSBjoern A. Zeeb rx_tid->tid, ret);
739dd4f32aeSBjoern A. Zeeb }
740dd4f32aeSBjoern A. Zeeb
741dd4f32aeSBjoern A. Zeeb memset(&cmd, 0, sizeof(cmd));
742dd4f32aeSBjoern A. Zeeb cmd.addr_lo = lower_32_bits(rx_tid->paddr);
743dd4f32aeSBjoern A. Zeeb cmd.addr_hi = upper_32_bits(rx_tid->paddr);
744dd4f32aeSBjoern A. Zeeb cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
745dd4f32aeSBjoern A. Zeeb ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
746dd4f32aeSBjoern A. Zeeb HAL_REO_CMD_FLUSH_CACHE,
747dd4f32aeSBjoern A. Zeeb &cmd, ath11k_dp_reo_cmd_free);
748dd4f32aeSBjoern A. Zeeb if (ret) {
749dd4f32aeSBjoern A. Zeeb ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
750dd4f32aeSBjoern A. Zeeb rx_tid->tid, ret);
751dd4f32aeSBjoern A. Zeeb dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
752dd4f32aeSBjoern A. Zeeb DMA_BIDIRECTIONAL);
753dd4f32aeSBjoern A. Zeeb kfree(rx_tid->vaddr);
754*28348caeSBjoern A. Zeeb rx_tid->vaddr = NULL;
755dd4f32aeSBjoern A. Zeeb }
756dd4f32aeSBjoern A. Zeeb }
757dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_tid_del_func(struct ath11k_dp * dp,void * ctx,enum hal_reo_cmd_status status)758dd4f32aeSBjoern A. Zeeb static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
759dd4f32aeSBjoern A. Zeeb enum hal_reo_cmd_status status)
760dd4f32aeSBjoern A. Zeeb {
761dd4f32aeSBjoern A. Zeeb struct ath11k_base *ab = dp->ab;
762dd4f32aeSBjoern A. Zeeb struct dp_rx_tid *rx_tid = ctx;
763dd4f32aeSBjoern A. Zeeb struct dp_reo_cache_flush_elem *elem, *tmp;
764dd4f32aeSBjoern A. Zeeb
765dd4f32aeSBjoern A. Zeeb if (status == HAL_REO_CMD_DRAIN) {
766dd4f32aeSBjoern A. Zeeb goto free_desc;
767dd4f32aeSBjoern A. Zeeb } else if (status != HAL_REO_CMD_SUCCESS) {
768dd4f32aeSBjoern A. Zeeb /* Shouldn't happen! Cleanup in case of other failure? */
769dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",
770dd4f32aeSBjoern A. Zeeb rx_tid->tid, status);
771dd4f32aeSBjoern A. Zeeb return;
772dd4f32aeSBjoern A. Zeeb }
773dd4f32aeSBjoern A. Zeeb
774dd4f32aeSBjoern A. Zeeb elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
775dd4f32aeSBjoern A. Zeeb if (!elem)
776dd4f32aeSBjoern A. Zeeb goto free_desc;
777dd4f32aeSBjoern A. Zeeb
778dd4f32aeSBjoern A. Zeeb elem->ts = jiffies;
779dd4f32aeSBjoern A. Zeeb memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
780dd4f32aeSBjoern A. Zeeb
781dd4f32aeSBjoern A. Zeeb spin_lock_bh(&dp->reo_cmd_lock);
782dd4f32aeSBjoern A. Zeeb list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
783dd4f32aeSBjoern A. Zeeb dp->reo_cmd_cache_flush_count++;
784dd4f32aeSBjoern A. Zeeb
785dd4f32aeSBjoern A. Zeeb /* Flush and invalidate aged REO desc from HW cache */
786dd4f32aeSBjoern A. Zeeb list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
787dd4f32aeSBjoern A. Zeeb list) {
788dd4f32aeSBjoern A. Zeeb if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD ||
789dd4f32aeSBjoern A. Zeeb time_after(jiffies, elem->ts +
790dd4f32aeSBjoern A. Zeeb msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) {
791dd4f32aeSBjoern A. Zeeb list_del(&elem->list);
792dd4f32aeSBjoern A. Zeeb dp->reo_cmd_cache_flush_count--;
793dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&dp->reo_cmd_lock);
794dd4f32aeSBjoern A. Zeeb
795dd4f32aeSBjoern A. Zeeb ath11k_dp_reo_cache_flush(ab, &elem->data);
796dd4f32aeSBjoern A. Zeeb kfree(elem);
797dd4f32aeSBjoern A. Zeeb spin_lock_bh(&dp->reo_cmd_lock);
798dd4f32aeSBjoern A. Zeeb }
799dd4f32aeSBjoern A. Zeeb }
800dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&dp->reo_cmd_lock);
801dd4f32aeSBjoern A. Zeeb
802dd4f32aeSBjoern A. Zeeb return;
803dd4f32aeSBjoern A. Zeeb free_desc:
804dd4f32aeSBjoern A. Zeeb dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
805dd4f32aeSBjoern A. Zeeb DMA_BIDIRECTIONAL);
806dd4f32aeSBjoern A. Zeeb kfree(rx_tid->vaddr);
807*28348caeSBjoern A. Zeeb rx_tid->vaddr = NULL;
808dd4f32aeSBjoern A. Zeeb }
809dd4f32aeSBjoern A. Zeeb
ath11k_peer_rx_tid_delete(struct ath11k * ar,struct ath11k_peer * peer,u8 tid)810dd4f32aeSBjoern A. Zeeb void ath11k_peer_rx_tid_delete(struct ath11k *ar,
811dd4f32aeSBjoern A. Zeeb struct ath11k_peer *peer, u8 tid)
812dd4f32aeSBjoern A. Zeeb {
813dd4f32aeSBjoern A. Zeeb struct ath11k_hal_reo_cmd cmd = {0};
814dd4f32aeSBjoern A. Zeeb struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
815dd4f32aeSBjoern A. Zeeb int ret;
816dd4f32aeSBjoern A. Zeeb
817dd4f32aeSBjoern A. Zeeb if (!rx_tid->active)
818dd4f32aeSBjoern A. Zeeb return;
819dd4f32aeSBjoern A. Zeeb
820*28348caeSBjoern A. Zeeb rx_tid->active = false;
821*28348caeSBjoern A. Zeeb
822dd4f32aeSBjoern A. Zeeb cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
823dd4f32aeSBjoern A. Zeeb cmd.addr_lo = lower_32_bits(rx_tid->paddr);
824dd4f32aeSBjoern A. Zeeb cmd.addr_hi = upper_32_bits(rx_tid->paddr);
825dd4f32aeSBjoern A. Zeeb cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
826dd4f32aeSBjoern A. Zeeb ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
827dd4f32aeSBjoern A. Zeeb HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
828dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_tid_del_func);
829dd4f32aeSBjoern A. Zeeb if (ret) {
830*28348caeSBjoern A. Zeeb if (ret != -ESHUTDOWN)
831dd4f32aeSBjoern A. Zeeb ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
832dd4f32aeSBjoern A. Zeeb tid, ret);
833dd4f32aeSBjoern A. Zeeb dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
834dd4f32aeSBjoern A. Zeeb DMA_BIDIRECTIONAL);
835dd4f32aeSBjoern A. Zeeb kfree(rx_tid->vaddr);
836*28348caeSBjoern A. Zeeb rx_tid->vaddr = NULL;
837dd4f32aeSBjoern A. Zeeb }
838dd4f32aeSBjoern A. Zeeb
839*28348caeSBjoern A. Zeeb rx_tid->paddr = 0;
840*28348caeSBjoern A. Zeeb rx_tid->size = 0;
841dd4f32aeSBjoern A. Zeeb }
842dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_link_desc_return(struct ath11k_base * ab,u32 * link_desc,enum hal_wbm_rel_bm_act action)843dd4f32aeSBjoern A. Zeeb static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab,
844dd4f32aeSBjoern A. Zeeb u32 *link_desc,
845dd4f32aeSBjoern A. Zeeb enum hal_wbm_rel_bm_act action)
846dd4f32aeSBjoern A. Zeeb {
847dd4f32aeSBjoern A. Zeeb struct ath11k_dp *dp = &ab->dp;
848dd4f32aeSBjoern A. Zeeb struct hal_srng *srng;
849dd4f32aeSBjoern A. Zeeb u32 *desc;
850dd4f32aeSBjoern A. Zeeb int ret = 0;
851dd4f32aeSBjoern A. Zeeb
852dd4f32aeSBjoern A. Zeeb srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
853dd4f32aeSBjoern A. Zeeb
854dd4f32aeSBjoern A. Zeeb spin_lock_bh(&srng->lock);
855dd4f32aeSBjoern A. Zeeb
856dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_begin(ab, srng);
857dd4f32aeSBjoern A. Zeeb
858dd4f32aeSBjoern A. Zeeb desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
859dd4f32aeSBjoern A. Zeeb if (!desc) {
860dd4f32aeSBjoern A. Zeeb ret = -ENOBUFS;
861dd4f32aeSBjoern A. Zeeb goto exit;
862dd4f32aeSBjoern A. Zeeb }
863dd4f32aeSBjoern A. Zeeb
864dd4f32aeSBjoern A. Zeeb ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc,
865dd4f32aeSBjoern A. Zeeb action);
866dd4f32aeSBjoern A. Zeeb
867dd4f32aeSBjoern A. Zeeb exit:
868dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_end(ab, srng);
869dd4f32aeSBjoern A. Zeeb
870dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&srng->lock);
871dd4f32aeSBjoern A. Zeeb
872dd4f32aeSBjoern A. Zeeb return ret;
873dd4f32aeSBjoern A. Zeeb }
874dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_frags_cleanup(struct dp_rx_tid * rx_tid,bool rel_link_desc)875dd4f32aeSBjoern A. Zeeb static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_desc)
876dd4f32aeSBjoern A. Zeeb {
877dd4f32aeSBjoern A. Zeeb struct ath11k_base *ab = rx_tid->ab;
878dd4f32aeSBjoern A. Zeeb
879dd4f32aeSBjoern A. Zeeb lockdep_assert_held(&ab->base_lock);
880dd4f32aeSBjoern A. Zeeb
881dd4f32aeSBjoern A. Zeeb if (rx_tid->dst_ring_desc) {
882dd4f32aeSBjoern A. Zeeb if (rel_link_desc)
883dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc,
884dd4f32aeSBjoern A. Zeeb HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
885dd4f32aeSBjoern A. Zeeb kfree(rx_tid->dst_ring_desc);
886dd4f32aeSBjoern A. Zeeb rx_tid->dst_ring_desc = NULL;
887dd4f32aeSBjoern A. Zeeb }
888dd4f32aeSBjoern A. Zeeb
889dd4f32aeSBjoern A. Zeeb rx_tid->cur_sn = 0;
890dd4f32aeSBjoern A. Zeeb rx_tid->last_frag_no = 0;
891dd4f32aeSBjoern A. Zeeb rx_tid->rx_frag_bitmap = 0;
892dd4f32aeSBjoern A. Zeeb __skb_queue_purge(&rx_tid->rx_frags);
893dd4f32aeSBjoern A. Zeeb }
894dd4f32aeSBjoern A. Zeeb
ath11k_peer_frags_flush(struct ath11k * ar,struct ath11k_peer * peer)895dd4f32aeSBjoern A. Zeeb void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer)
896dd4f32aeSBjoern A. Zeeb {
897dd4f32aeSBjoern A. Zeeb struct dp_rx_tid *rx_tid;
898dd4f32aeSBjoern A. Zeeb int i;
899dd4f32aeSBjoern A. Zeeb
900dd4f32aeSBjoern A. Zeeb lockdep_assert_held(&ar->ab->base_lock);
901dd4f32aeSBjoern A. Zeeb
902dd4f32aeSBjoern A. Zeeb for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
903dd4f32aeSBjoern A. Zeeb rx_tid = &peer->rx_tid[i];
904dd4f32aeSBjoern A. Zeeb
905dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ar->ab->base_lock);
906dd4f32aeSBjoern A. Zeeb del_timer_sync(&rx_tid->frag_timer);
907dd4f32aeSBjoern A. Zeeb spin_lock_bh(&ar->ab->base_lock);
908dd4f32aeSBjoern A. Zeeb
909dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_frags_cleanup(rx_tid, true);
910dd4f32aeSBjoern A. Zeeb }
911dd4f32aeSBjoern A. Zeeb }
912dd4f32aeSBjoern A. Zeeb
ath11k_peer_rx_tid_cleanup(struct ath11k * ar,struct ath11k_peer * peer)913dd4f32aeSBjoern A. Zeeb void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer)
914dd4f32aeSBjoern A. Zeeb {
915dd4f32aeSBjoern A. Zeeb struct dp_rx_tid *rx_tid;
916dd4f32aeSBjoern A. Zeeb int i;
917dd4f32aeSBjoern A. Zeeb
918dd4f32aeSBjoern A. Zeeb lockdep_assert_held(&ar->ab->base_lock);
919dd4f32aeSBjoern A. Zeeb
920dd4f32aeSBjoern A. Zeeb for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
921dd4f32aeSBjoern A. Zeeb rx_tid = &peer->rx_tid[i];
922dd4f32aeSBjoern A. Zeeb
923dd4f32aeSBjoern A. Zeeb ath11k_peer_rx_tid_delete(ar, peer, i);
924dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_frags_cleanup(rx_tid, true);
925dd4f32aeSBjoern A. Zeeb
926dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ar->ab->base_lock);
927dd4f32aeSBjoern A. Zeeb del_timer_sync(&rx_tid->frag_timer);
928dd4f32aeSBjoern A. Zeeb spin_lock_bh(&ar->ab->base_lock);
929dd4f32aeSBjoern A. Zeeb }
930dd4f32aeSBjoern A. Zeeb }
931dd4f32aeSBjoern A. Zeeb
ath11k_peer_rx_tid_reo_update(struct ath11k * ar,struct ath11k_peer * peer,struct dp_rx_tid * rx_tid,u32 ba_win_sz,u16 ssn,bool update_ssn)932dd4f32aeSBjoern A. Zeeb static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar,
933dd4f32aeSBjoern A. Zeeb struct ath11k_peer *peer,
934dd4f32aeSBjoern A. Zeeb struct dp_rx_tid *rx_tid,
935dd4f32aeSBjoern A. Zeeb u32 ba_win_sz, u16 ssn,
936dd4f32aeSBjoern A. Zeeb bool update_ssn)
937dd4f32aeSBjoern A. Zeeb {
938dd4f32aeSBjoern A. Zeeb struct ath11k_hal_reo_cmd cmd = {0};
939dd4f32aeSBjoern A. Zeeb int ret;
940dd4f32aeSBjoern A. Zeeb
941dd4f32aeSBjoern A. Zeeb cmd.addr_lo = lower_32_bits(rx_tid->paddr);
942dd4f32aeSBjoern A. Zeeb cmd.addr_hi = upper_32_bits(rx_tid->paddr);
943dd4f32aeSBjoern A. Zeeb cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
944dd4f32aeSBjoern A. Zeeb cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
945dd4f32aeSBjoern A. Zeeb cmd.ba_window_size = ba_win_sz;
946dd4f32aeSBjoern A. Zeeb
947dd4f32aeSBjoern A. Zeeb if (update_ssn) {
948dd4f32aeSBjoern A. Zeeb cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
949dd4f32aeSBjoern A. Zeeb cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn);
950dd4f32aeSBjoern A. Zeeb }
951dd4f32aeSBjoern A. Zeeb
952dd4f32aeSBjoern A. Zeeb ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
953dd4f32aeSBjoern A. Zeeb HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
954dd4f32aeSBjoern A. Zeeb NULL);
955dd4f32aeSBjoern A. Zeeb if (ret) {
956dd4f32aeSBjoern A. Zeeb ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
957dd4f32aeSBjoern A. Zeeb rx_tid->tid, ret);
958dd4f32aeSBjoern A. Zeeb return ret;
959dd4f32aeSBjoern A. Zeeb }
960dd4f32aeSBjoern A. Zeeb
961dd4f32aeSBjoern A. Zeeb rx_tid->ba_win_sz = ba_win_sz;
962dd4f32aeSBjoern A. Zeeb
963dd4f32aeSBjoern A. Zeeb return 0;
964dd4f32aeSBjoern A. Zeeb }
965dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_tid_mem_free(struct ath11k_base * ab,const u8 * peer_mac,int vdev_id,u8 tid)966dd4f32aeSBjoern A. Zeeb static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab,
967dd4f32aeSBjoern A. Zeeb const u8 *peer_mac, int vdev_id, u8 tid)
968dd4f32aeSBjoern A. Zeeb {
969dd4f32aeSBjoern A. Zeeb struct ath11k_peer *peer;
970dd4f32aeSBjoern A. Zeeb struct dp_rx_tid *rx_tid;
971dd4f32aeSBjoern A. Zeeb
972dd4f32aeSBjoern A. Zeeb spin_lock_bh(&ab->base_lock);
973dd4f32aeSBjoern A. Zeeb
974dd4f32aeSBjoern A. Zeeb peer = ath11k_peer_find(ab, vdev_id, peer_mac);
975dd4f32aeSBjoern A. Zeeb if (!peer) {
976dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n");
977dd4f32aeSBjoern A. Zeeb goto unlock_exit;
978dd4f32aeSBjoern A. Zeeb }
979dd4f32aeSBjoern A. Zeeb
980dd4f32aeSBjoern A. Zeeb rx_tid = &peer->rx_tid[tid];
981dd4f32aeSBjoern A. Zeeb if (!rx_tid->active)
982dd4f32aeSBjoern A. Zeeb goto unlock_exit;
983dd4f32aeSBjoern A. Zeeb
984dd4f32aeSBjoern A. Zeeb dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
985dd4f32aeSBjoern A. Zeeb DMA_BIDIRECTIONAL);
986dd4f32aeSBjoern A. Zeeb kfree(rx_tid->vaddr);
987*28348caeSBjoern A. Zeeb rx_tid->vaddr = NULL;
988dd4f32aeSBjoern A. Zeeb
989dd4f32aeSBjoern A. Zeeb rx_tid->active = false;
990dd4f32aeSBjoern A. Zeeb
991dd4f32aeSBjoern A. Zeeb unlock_exit:
992dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ab->base_lock);
993dd4f32aeSBjoern A. Zeeb }
994dd4f32aeSBjoern A. Zeeb
ath11k_peer_rx_tid_setup(struct ath11k * ar,const u8 * peer_mac,int vdev_id,u8 tid,u32 ba_win_sz,u16 ssn,enum hal_pn_type pn_type)995dd4f32aeSBjoern A. Zeeb int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
996dd4f32aeSBjoern A. Zeeb u8 tid, u32 ba_win_sz, u16 ssn,
997dd4f32aeSBjoern A. Zeeb enum hal_pn_type pn_type)
998dd4f32aeSBjoern A. Zeeb {
999dd4f32aeSBjoern A. Zeeb struct ath11k_base *ab = ar->ab;
1000dd4f32aeSBjoern A. Zeeb struct ath11k_peer *peer;
1001dd4f32aeSBjoern A. Zeeb struct dp_rx_tid *rx_tid;
1002dd4f32aeSBjoern A. Zeeb u32 hw_desc_sz;
1003dd4f32aeSBjoern A. Zeeb u32 *addr_aligned;
1004dd4f32aeSBjoern A. Zeeb void *vaddr;
1005dd4f32aeSBjoern A. Zeeb dma_addr_t paddr;
1006dd4f32aeSBjoern A. Zeeb int ret;
1007dd4f32aeSBjoern A. Zeeb
1008dd4f32aeSBjoern A. Zeeb spin_lock_bh(&ab->base_lock);
1009dd4f32aeSBjoern A. Zeeb
1010dd4f32aeSBjoern A. Zeeb peer = ath11k_peer_find(ab, vdev_id, peer_mac);
1011dd4f32aeSBjoern A. Zeeb if (!peer) {
1012*28348caeSBjoern A. Zeeb ath11k_warn(ab, "failed to find the peer %pM to set up rx tid\n",
1013*28348caeSBjoern A. Zeeb peer_mac);
1014dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ab->base_lock);
1015dd4f32aeSBjoern A. Zeeb return -ENOENT;
1016dd4f32aeSBjoern A. Zeeb }
1017dd4f32aeSBjoern A. Zeeb
1018dd4f32aeSBjoern A. Zeeb rx_tid = &peer->rx_tid[tid];
1019dd4f32aeSBjoern A. Zeeb /* Update the tid queue if it is already setup */
1020dd4f32aeSBjoern A. Zeeb if (rx_tid->active) {
1021dd4f32aeSBjoern A. Zeeb paddr = rx_tid->paddr;
1022dd4f32aeSBjoern A. Zeeb ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid,
1023dd4f32aeSBjoern A. Zeeb ba_win_sz, ssn, true);
1024dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ab->base_lock);
1025dd4f32aeSBjoern A. Zeeb if (ret) {
1026*28348caeSBjoern A. Zeeb ath11k_warn(ab, "failed to update reo for peer %pM rx tid %d\n: %d",
1027*28348caeSBjoern A. Zeeb peer_mac, tid, ret);
1028dd4f32aeSBjoern A. Zeeb return ret;
1029dd4f32aeSBjoern A. Zeeb }
1030dd4f32aeSBjoern A. Zeeb
1031dd4f32aeSBjoern A. Zeeb ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
1032dd4f32aeSBjoern A. Zeeb peer_mac, paddr,
1033dd4f32aeSBjoern A. Zeeb tid, 1, ba_win_sz);
1034dd4f32aeSBjoern A. Zeeb if (ret)
1035*28348caeSBjoern A. Zeeb ath11k_warn(ab, "failed to send wmi rx reorder queue for peer %pM tid %d: %d\n",
1036*28348caeSBjoern A. Zeeb peer_mac, tid, ret);
1037dd4f32aeSBjoern A. Zeeb return ret;
1038dd4f32aeSBjoern A. Zeeb }
1039dd4f32aeSBjoern A. Zeeb
1040dd4f32aeSBjoern A. Zeeb rx_tid->tid = tid;
1041dd4f32aeSBjoern A. Zeeb
1042dd4f32aeSBjoern A. Zeeb rx_tid->ba_win_sz = ba_win_sz;
1043dd4f32aeSBjoern A. Zeeb
1044dd4f32aeSBjoern A. Zeeb /* TODO: Optimize the memory allocation for qos tid based on
1045dd4f32aeSBjoern A. Zeeb * the actual BA window size in REO tid update path.
1046dd4f32aeSBjoern A. Zeeb */
1047dd4f32aeSBjoern A. Zeeb if (tid == HAL_DESC_REO_NON_QOS_TID)
1048dd4f32aeSBjoern A. Zeeb hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid);
1049dd4f32aeSBjoern A. Zeeb else
1050dd4f32aeSBjoern A. Zeeb hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
1051dd4f32aeSBjoern A. Zeeb
1052dd4f32aeSBjoern A. Zeeb vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
1053dd4f32aeSBjoern A. Zeeb if (!vaddr) {
1054dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ab->base_lock);
1055dd4f32aeSBjoern A. Zeeb return -ENOMEM;
1056dd4f32aeSBjoern A. Zeeb }
1057dd4f32aeSBjoern A. Zeeb
1058dd4f32aeSBjoern A. Zeeb addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
1059dd4f32aeSBjoern A. Zeeb
1060dd4f32aeSBjoern A. Zeeb ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
1061dd4f32aeSBjoern A. Zeeb ssn, pn_type);
1062dd4f32aeSBjoern A. Zeeb
1063dd4f32aeSBjoern A. Zeeb paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
1064dd4f32aeSBjoern A. Zeeb DMA_BIDIRECTIONAL);
1065dd4f32aeSBjoern A. Zeeb
1066dd4f32aeSBjoern A. Zeeb ret = dma_mapping_error(ab->dev, paddr);
1067dd4f32aeSBjoern A. Zeeb if (ret) {
1068dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ab->base_lock);
1069*28348caeSBjoern A. Zeeb ath11k_warn(ab, "failed to setup dma map for peer %pM rx tid %d: %d\n",
1070*28348caeSBjoern A. Zeeb peer_mac, tid, ret);
1071dd4f32aeSBjoern A. Zeeb goto err_mem_free;
1072dd4f32aeSBjoern A. Zeeb }
1073dd4f32aeSBjoern A. Zeeb
1074dd4f32aeSBjoern A. Zeeb rx_tid->vaddr = vaddr;
1075dd4f32aeSBjoern A. Zeeb rx_tid->paddr = paddr;
1076dd4f32aeSBjoern A. Zeeb rx_tid->size = hw_desc_sz;
1077dd4f32aeSBjoern A. Zeeb rx_tid->active = true;
1078dd4f32aeSBjoern A. Zeeb
1079dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ab->base_lock);
1080dd4f32aeSBjoern A. Zeeb
1081dd4f32aeSBjoern A. Zeeb ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
1082dd4f32aeSBjoern A. Zeeb paddr, tid, 1, ba_win_sz);
1083dd4f32aeSBjoern A. Zeeb if (ret) {
1084*28348caeSBjoern A. Zeeb ath11k_warn(ar->ab, "failed to setup rx reorder queue for peer %pM tid %d: %d\n",
1085*28348caeSBjoern A. Zeeb peer_mac, tid, ret);
1086dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid);
1087dd4f32aeSBjoern A. Zeeb }
1088dd4f32aeSBjoern A. Zeeb
1089dd4f32aeSBjoern A. Zeeb return ret;
1090dd4f32aeSBjoern A. Zeeb
1091dd4f32aeSBjoern A. Zeeb err_mem_free:
1092*28348caeSBjoern A. Zeeb kfree(rx_tid->vaddr);
1093*28348caeSBjoern A. Zeeb rx_tid->vaddr = NULL;
1094dd4f32aeSBjoern A. Zeeb
1095dd4f32aeSBjoern A. Zeeb return ret;
1096dd4f32aeSBjoern A. Zeeb }
1097dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_ampdu_start(struct ath11k * ar,struct ieee80211_ampdu_params * params)1098dd4f32aeSBjoern A. Zeeb int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
1099dd4f32aeSBjoern A. Zeeb struct ieee80211_ampdu_params *params)
1100dd4f32aeSBjoern A. Zeeb {
1101dd4f32aeSBjoern A. Zeeb struct ath11k_base *ab = ar->ab;
1102dd4f32aeSBjoern A. Zeeb struct ath11k_sta *arsta = (void *)params->sta->drv_priv;
1103dd4f32aeSBjoern A. Zeeb int vdev_id = arsta->arvif->vdev_id;
1104dd4f32aeSBjoern A. Zeeb int ret;
1105dd4f32aeSBjoern A. Zeeb
1106dd4f32aeSBjoern A. Zeeb ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id,
1107dd4f32aeSBjoern A. Zeeb params->tid, params->buf_size,
1108dd4f32aeSBjoern A. Zeeb params->ssn, arsta->pn_type);
1109dd4f32aeSBjoern A. Zeeb if (ret)
1110dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "failed to setup rx tid %d\n", ret);
1111dd4f32aeSBjoern A. Zeeb
1112dd4f32aeSBjoern A. Zeeb return ret;
1113dd4f32aeSBjoern A. Zeeb }
1114dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_ampdu_stop(struct ath11k * ar,struct ieee80211_ampdu_params * params)1115dd4f32aeSBjoern A. Zeeb int ath11k_dp_rx_ampdu_stop(struct ath11k *ar,
1116dd4f32aeSBjoern A. Zeeb struct ieee80211_ampdu_params *params)
1117dd4f32aeSBjoern A. Zeeb {
1118dd4f32aeSBjoern A. Zeeb struct ath11k_base *ab = ar->ab;
1119dd4f32aeSBjoern A. Zeeb struct ath11k_peer *peer;
1120dd4f32aeSBjoern A. Zeeb struct ath11k_sta *arsta = (void *)params->sta->drv_priv;
1121dd4f32aeSBjoern A. Zeeb int vdev_id = arsta->arvif->vdev_id;
1122dd4f32aeSBjoern A. Zeeb dma_addr_t paddr;
1123dd4f32aeSBjoern A. Zeeb bool active;
1124dd4f32aeSBjoern A. Zeeb int ret;
1125dd4f32aeSBjoern A. Zeeb
1126dd4f32aeSBjoern A. Zeeb spin_lock_bh(&ab->base_lock);
1127dd4f32aeSBjoern A. Zeeb
1128dd4f32aeSBjoern A. Zeeb peer = ath11k_peer_find(ab, vdev_id, params->sta->addr);
1129dd4f32aeSBjoern A. Zeeb if (!peer) {
1130dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n");
1131dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ab->base_lock);
1132dd4f32aeSBjoern A. Zeeb return -ENOENT;
1133dd4f32aeSBjoern A. Zeeb }
1134dd4f32aeSBjoern A. Zeeb
1135dd4f32aeSBjoern A. Zeeb paddr = peer->rx_tid[params->tid].paddr;
1136dd4f32aeSBjoern A. Zeeb active = peer->rx_tid[params->tid].active;
1137dd4f32aeSBjoern A. Zeeb
1138dd4f32aeSBjoern A. Zeeb if (!active) {
1139dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ab->base_lock);
1140dd4f32aeSBjoern A. Zeeb return 0;
1141dd4f32aeSBjoern A. Zeeb }
1142dd4f32aeSBjoern A. Zeeb
1143dd4f32aeSBjoern A. Zeeb ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);
1144dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ab->base_lock);
1145dd4f32aeSBjoern A. Zeeb if (ret) {
1146dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n",
1147dd4f32aeSBjoern A. Zeeb params->tid, ret);
1148dd4f32aeSBjoern A. Zeeb return ret;
1149dd4f32aeSBjoern A. Zeeb }
1150dd4f32aeSBjoern A. Zeeb
1151dd4f32aeSBjoern A. Zeeb ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
1152dd4f32aeSBjoern A. Zeeb params->sta->addr, paddr,
1153dd4f32aeSBjoern A. Zeeb params->tid, 1, 1);
1154dd4f32aeSBjoern A. Zeeb if (ret)
1155dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n",
1156dd4f32aeSBjoern A. Zeeb ret);
1157dd4f32aeSBjoern A. Zeeb
1158dd4f32aeSBjoern A. Zeeb return ret;
1159dd4f32aeSBjoern A. Zeeb }
1160dd4f32aeSBjoern A. Zeeb
ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif * arvif,const u8 * peer_addr,enum set_key_cmd key_cmd,struct ieee80211_key_conf * key)1161dd4f32aeSBjoern A. Zeeb int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif,
1162dd4f32aeSBjoern A. Zeeb const u8 *peer_addr,
1163dd4f32aeSBjoern A. Zeeb enum set_key_cmd key_cmd,
1164dd4f32aeSBjoern A. Zeeb struct ieee80211_key_conf *key)
1165dd4f32aeSBjoern A. Zeeb {
1166dd4f32aeSBjoern A. Zeeb struct ath11k *ar = arvif->ar;
1167dd4f32aeSBjoern A. Zeeb struct ath11k_base *ab = ar->ab;
1168dd4f32aeSBjoern A. Zeeb struct ath11k_hal_reo_cmd cmd = {0};
1169dd4f32aeSBjoern A. Zeeb struct ath11k_peer *peer;
1170dd4f32aeSBjoern A. Zeeb struct dp_rx_tid *rx_tid;
1171dd4f32aeSBjoern A. Zeeb u8 tid;
1172dd4f32aeSBjoern A. Zeeb int ret = 0;
1173dd4f32aeSBjoern A. Zeeb
1174dd4f32aeSBjoern A. Zeeb /* NOTE: Enable PN/TSC replay check offload only for unicast frames.
1175dd4f32aeSBjoern A. Zeeb * We use mac80211 PN/TSC replay check functionality for bcast/mcast
1176dd4f32aeSBjoern A. Zeeb * for now.
1177dd4f32aeSBjoern A. Zeeb */
1178dd4f32aeSBjoern A. Zeeb if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1179dd4f32aeSBjoern A. Zeeb return 0;
1180dd4f32aeSBjoern A. Zeeb
1181dd4f32aeSBjoern A. Zeeb cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
1182dd4f32aeSBjoern A. Zeeb cmd.upd0 |= HAL_REO_CMD_UPD0_PN |
1183dd4f32aeSBjoern A. Zeeb HAL_REO_CMD_UPD0_PN_SIZE |
1184dd4f32aeSBjoern A. Zeeb HAL_REO_CMD_UPD0_PN_VALID |
1185dd4f32aeSBjoern A. Zeeb HAL_REO_CMD_UPD0_PN_CHECK |
1186dd4f32aeSBjoern A. Zeeb HAL_REO_CMD_UPD0_SVLD;
1187dd4f32aeSBjoern A. Zeeb
1188dd4f32aeSBjoern A. Zeeb switch (key->cipher) {
1189dd4f32aeSBjoern A. Zeeb case WLAN_CIPHER_SUITE_TKIP:
1190dd4f32aeSBjoern A. Zeeb case WLAN_CIPHER_SUITE_CCMP:
1191dd4f32aeSBjoern A. Zeeb case WLAN_CIPHER_SUITE_CCMP_256:
1192dd4f32aeSBjoern A. Zeeb case WLAN_CIPHER_SUITE_GCMP:
1193dd4f32aeSBjoern A. Zeeb case WLAN_CIPHER_SUITE_GCMP_256:
1194dd4f32aeSBjoern A. Zeeb if (key_cmd == SET_KEY) {
1195dd4f32aeSBjoern A. Zeeb cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
1196dd4f32aeSBjoern A. Zeeb cmd.pn_size = 48;
1197dd4f32aeSBjoern A. Zeeb }
1198dd4f32aeSBjoern A. Zeeb break;
1199dd4f32aeSBjoern A. Zeeb default:
1200dd4f32aeSBjoern A. Zeeb break;
1201dd4f32aeSBjoern A. Zeeb }
1202dd4f32aeSBjoern A. Zeeb
1203dd4f32aeSBjoern A. Zeeb spin_lock_bh(&ab->base_lock);
1204dd4f32aeSBjoern A. Zeeb
1205dd4f32aeSBjoern A. Zeeb peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
1206dd4f32aeSBjoern A. Zeeb if (!peer) {
1207dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "failed to find the peer to configure pn replay detection\n");
1208dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ab->base_lock);
1209dd4f32aeSBjoern A. Zeeb return -ENOENT;
1210dd4f32aeSBjoern A. Zeeb }
1211dd4f32aeSBjoern A. Zeeb
1212dd4f32aeSBjoern A. Zeeb for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
1213dd4f32aeSBjoern A. Zeeb rx_tid = &peer->rx_tid[tid];
1214dd4f32aeSBjoern A. Zeeb if (!rx_tid->active)
1215dd4f32aeSBjoern A. Zeeb continue;
1216dd4f32aeSBjoern A. Zeeb cmd.addr_lo = lower_32_bits(rx_tid->paddr);
1217dd4f32aeSBjoern A. Zeeb cmd.addr_hi = upper_32_bits(rx_tid->paddr);
1218dd4f32aeSBjoern A. Zeeb ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
1219dd4f32aeSBjoern A. Zeeb HAL_REO_CMD_UPDATE_RX_QUEUE,
1220dd4f32aeSBjoern A. Zeeb &cmd, NULL);
1221dd4f32aeSBjoern A. Zeeb if (ret) {
1222dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "failed to configure rx tid %d queue for pn replay detection %d\n",
1223dd4f32aeSBjoern A. Zeeb tid, ret);
1224dd4f32aeSBjoern A. Zeeb break;
1225dd4f32aeSBjoern A. Zeeb }
1226dd4f32aeSBjoern A. Zeeb }
1227dd4f32aeSBjoern A. Zeeb
1228dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ab->base_lock);
1229dd4f32aeSBjoern A. Zeeb
1230dd4f32aeSBjoern A. Zeeb return ret;
1231dd4f32aeSBjoern A. Zeeb }
1232dd4f32aeSBjoern A. Zeeb
ath11k_get_ppdu_user_index(struct htt_ppdu_stats * ppdu_stats,u16 peer_id)1233dd4f32aeSBjoern A. Zeeb static inline int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
1234dd4f32aeSBjoern A. Zeeb u16 peer_id)
1235dd4f32aeSBjoern A. Zeeb {
1236dd4f32aeSBjoern A. Zeeb int i;
1237dd4f32aeSBjoern A. Zeeb
1238dd4f32aeSBjoern A. Zeeb for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
1239dd4f32aeSBjoern A. Zeeb if (ppdu_stats->user_stats[i].is_valid_peer_id) {
1240dd4f32aeSBjoern A. Zeeb if (peer_id == ppdu_stats->user_stats[i].peer_id)
1241dd4f32aeSBjoern A. Zeeb return i;
1242dd4f32aeSBjoern A. Zeeb } else {
1243dd4f32aeSBjoern A. Zeeb return i;
1244dd4f32aeSBjoern A. Zeeb }
1245dd4f32aeSBjoern A. Zeeb }
1246dd4f32aeSBjoern A. Zeeb
1247dd4f32aeSBjoern A. Zeeb return -EINVAL;
1248dd4f32aeSBjoern A. Zeeb }
1249dd4f32aeSBjoern A. Zeeb
ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base * ab,u16 tag,u16 len,const void * ptr,void * data)1250dd4f32aeSBjoern A. Zeeb static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab,
1251dd4f32aeSBjoern A. Zeeb u16 tag, u16 len, const void *ptr,
1252dd4f32aeSBjoern A. Zeeb void *data)
1253dd4f32aeSBjoern A. Zeeb {
1254dd4f32aeSBjoern A. Zeeb struct htt_ppdu_stats_info *ppdu_info;
1255dd4f32aeSBjoern A. Zeeb struct htt_ppdu_user_stats *user_stats;
1256dd4f32aeSBjoern A. Zeeb int cur_user;
1257dd4f32aeSBjoern A. Zeeb u16 peer_id;
1258dd4f32aeSBjoern A. Zeeb
1259dd4f32aeSBjoern A. Zeeb ppdu_info = (struct htt_ppdu_stats_info *)data;
1260dd4f32aeSBjoern A. Zeeb
1261dd4f32aeSBjoern A. Zeeb switch (tag) {
1262dd4f32aeSBjoern A. Zeeb case HTT_PPDU_STATS_TAG_COMMON:
1263dd4f32aeSBjoern A. Zeeb if (len < sizeof(struct htt_ppdu_stats_common)) {
1264dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1265dd4f32aeSBjoern A. Zeeb len, tag);
1266dd4f32aeSBjoern A. Zeeb return -EINVAL;
1267dd4f32aeSBjoern A. Zeeb }
1268dd4f32aeSBjoern A. Zeeb memcpy((void *)&ppdu_info->ppdu_stats.common, ptr,
1269dd4f32aeSBjoern A. Zeeb sizeof(struct htt_ppdu_stats_common));
1270dd4f32aeSBjoern A. Zeeb break;
1271dd4f32aeSBjoern A. Zeeb case HTT_PPDU_STATS_TAG_USR_RATE:
1272dd4f32aeSBjoern A. Zeeb if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
1273dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1274dd4f32aeSBjoern A. Zeeb len, tag);
1275dd4f32aeSBjoern A. Zeeb return -EINVAL;
1276dd4f32aeSBjoern A. Zeeb }
1277dd4f32aeSBjoern A. Zeeb
1278dd4f32aeSBjoern A. Zeeb #if defined(__linux__)
1279dd4f32aeSBjoern A. Zeeb peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id;
1280dd4f32aeSBjoern A. Zeeb #elif defined(__FreeBSD__)
1281dd4f32aeSBjoern A. Zeeb peer_id = ((const struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id;
1282dd4f32aeSBjoern A. Zeeb #endif
1283dd4f32aeSBjoern A. Zeeb cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1284dd4f32aeSBjoern A. Zeeb peer_id);
1285dd4f32aeSBjoern A. Zeeb if (cur_user < 0)
1286dd4f32aeSBjoern A. Zeeb return -EINVAL;
1287dd4f32aeSBjoern A. Zeeb user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1288dd4f32aeSBjoern A. Zeeb user_stats->peer_id = peer_id;
1289dd4f32aeSBjoern A. Zeeb user_stats->is_valid_peer_id = true;
1290dd4f32aeSBjoern A. Zeeb memcpy((void *)&user_stats->rate, ptr,
1291dd4f32aeSBjoern A. Zeeb sizeof(struct htt_ppdu_stats_user_rate));
1292dd4f32aeSBjoern A. Zeeb user_stats->tlv_flags |= BIT(tag);
1293dd4f32aeSBjoern A. Zeeb break;
1294dd4f32aeSBjoern A. Zeeb case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
1295dd4f32aeSBjoern A. Zeeb if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
1296dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1297dd4f32aeSBjoern A. Zeeb len, tag);
1298dd4f32aeSBjoern A. Zeeb return -EINVAL;
1299dd4f32aeSBjoern A. Zeeb }
1300dd4f32aeSBjoern A. Zeeb
1301dd4f32aeSBjoern A. Zeeb #if defined(__linux__)
1302dd4f32aeSBjoern A. Zeeb peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id;
1303dd4f32aeSBjoern A. Zeeb #elif defined(__FreeBSD__)
1304dd4f32aeSBjoern A. Zeeb peer_id = ((const struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id;
1305dd4f32aeSBjoern A. Zeeb #endif
1306dd4f32aeSBjoern A. Zeeb cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1307dd4f32aeSBjoern A. Zeeb peer_id);
1308dd4f32aeSBjoern A. Zeeb if (cur_user < 0)
1309dd4f32aeSBjoern A. Zeeb return -EINVAL;
1310dd4f32aeSBjoern A. Zeeb user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1311dd4f32aeSBjoern A. Zeeb user_stats->peer_id = peer_id;
1312dd4f32aeSBjoern A. Zeeb user_stats->is_valid_peer_id = true;
1313dd4f32aeSBjoern A. Zeeb memcpy((void *)&user_stats->cmpltn_cmn, ptr,
1314dd4f32aeSBjoern A. Zeeb sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
1315dd4f32aeSBjoern A. Zeeb user_stats->tlv_flags |= BIT(tag);
1316dd4f32aeSBjoern A. Zeeb break;
1317dd4f32aeSBjoern A. Zeeb case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
1318dd4f32aeSBjoern A. Zeeb if (len <
1319dd4f32aeSBjoern A. Zeeb sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
1320dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1321dd4f32aeSBjoern A. Zeeb len, tag);
1322dd4f32aeSBjoern A. Zeeb return -EINVAL;
1323dd4f32aeSBjoern A. Zeeb }
1324dd4f32aeSBjoern A. Zeeb
1325dd4f32aeSBjoern A. Zeeb peer_id =
1326dd4f32aeSBjoern A. Zeeb #if defined(__linux__)
1327dd4f32aeSBjoern A. Zeeb ((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id;
1328dd4f32aeSBjoern A. Zeeb #elif defined(__FreeBSD__)
1329dd4f32aeSBjoern A. Zeeb ((const struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id;
1330dd4f32aeSBjoern A. Zeeb #endif
1331dd4f32aeSBjoern A. Zeeb cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1332dd4f32aeSBjoern A. Zeeb peer_id);
1333dd4f32aeSBjoern A. Zeeb if (cur_user < 0)
1334dd4f32aeSBjoern A. Zeeb return -EINVAL;
1335dd4f32aeSBjoern A. Zeeb user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1336dd4f32aeSBjoern A. Zeeb user_stats->peer_id = peer_id;
1337dd4f32aeSBjoern A. Zeeb user_stats->is_valid_peer_id = true;
1338dd4f32aeSBjoern A. Zeeb memcpy((void *)&user_stats->ack_ba, ptr,
1339dd4f32aeSBjoern A. Zeeb sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
1340dd4f32aeSBjoern A. Zeeb user_stats->tlv_flags |= BIT(tag);
1341dd4f32aeSBjoern A. Zeeb break;
1342dd4f32aeSBjoern A. Zeeb }
1343dd4f32aeSBjoern A. Zeeb return 0;
1344dd4f32aeSBjoern A. Zeeb }
1345dd4f32aeSBjoern A. Zeeb
1346dd4f32aeSBjoern A. Zeeb #if defined(__linux__)
ath11k_dp_htt_tlv_iter(struct ath11k_base * ab,const void * ptr,size_t len,int (* iter)(struct ath11k_base * ar,u16 tag,u16 len,const void * ptr,void * data),void * data)1347dd4f32aeSBjoern A. Zeeb int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
1348dd4f32aeSBjoern A. Zeeb #elif defined(__FreeBSD__)
1349dd4f32aeSBjoern A. Zeeb int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const u8 *ptr, size_t len,
1350dd4f32aeSBjoern A. Zeeb #endif
1351dd4f32aeSBjoern A. Zeeb int (*iter)(struct ath11k_base *ar, u16 tag, u16 len,
1352dd4f32aeSBjoern A. Zeeb const void *ptr, void *data),
1353dd4f32aeSBjoern A. Zeeb void *data)
1354dd4f32aeSBjoern A. Zeeb {
1355dd4f32aeSBjoern A. Zeeb const struct htt_tlv *tlv;
1356dd4f32aeSBjoern A. Zeeb #if defined(__linux__)
1357dd4f32aeSBjoern A. Zeeb const void *begin = ptr;
1358dd4f32aeSBjoern A. Zeeb #elif defined(__FreeBSD__)
1359dd4f32aeSBjoern A. Zeeb const u8 *begin = ptr;
1360dd4f32aeSBjoern A. Zeeb #endif
1361dd4f32aeSBjoern A. Zeeb u16 tlv_tag, tlv_len;
1362dd4f32aeSBjoern A. Zeeb int ret = -EINVAL;
1363dd4f32aeSBjoern A. Zeeb
1364dd4f32aeSBjoern A. Zeeb while (len > 0) {
1365dd4f32aeSBjoern A. Zeeb if (len < sizeof(*tlv)) {
1366dd4f32aeSBjoern A. Zeeb ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
1367dd4f32aeSBjoern A. Zeeb ptr - begin, len, sizeof(*tlv));
1368dd4f32aeSBjoern A. Zeeb return -EINVAL;
1369dd4f32aeSBjoern A. Zeeb }
1370dd4f32aeSBjoern A. Zeeb #if defined(__linux__)
1371dd4f32aeSBjoern A. Zeeb tlv = (struct htt_tlv *)ptr;
1372dd4f32aeSBjoern A. Zeeb #elif defined(__FreeBSD__)
1373dd4f32aeSBjoern A. Zeeb tlv = (const struct htt_tlv *)(const void *)ptr;
1374dd4f32aeSBjoern A. Zeeb #endif
1375dd4f32aeSBjoern A. Zeeb tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header);
1376dd4f32aeSBjoern A. Zeeb tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header);
1377dd4f32aeSBjoern A. Zeeb ptr += sizeof(*tlv);
1378dd4f32aeSBjoern A. Zeeb len -= sizeof(*tlv);
1379dd4f32aeSBjoern A. Zeeb
1380dd4f32aeSBjoern A. Zeeb if (tlv_len > len) {
1381dd4f32aeSBjoern A. Zeeb ath11k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
1382dd4f32aeSBjoern A. Zeeb tlv_tag, ptr - begin, len, tlv_len);
1383dd4f32aeSBjoern A. Zeeb return -EINVAL;
1384dd4f32aeSBjoern A. Zeeb }
1385dd4f32aeSBjoern A. Zeeb ret = iter(ab, tlv_tag, tlv_len, ptr, data);
1386dd4f32aeSBjoern A. Zeeb if (ret == -ENOMEM)
1387dd4f32aeSBjoern A. Zeeb return ret;
1388dd4f32aeSBjoern A. Zeeb
1389dd4f32aeSBjoern A. Zeeb ptr += tlv_len;
1390dd4f32aeSBjoern A. Zeeb len -= tlv_len;
1391dd4f32aeSBjoern A. Zeeb }
1392dd4f32aeSBjoern A. Zeeb return 0;
1393dd4f32aeSBjoern A. Zeeb }
1394dd4f32aeSBjoern A. Zeeb
1395dd4f32aeSBjoern A. Zeeb static void
ath11k_update_per_peer_tx_stats(struct ath11k * ar,struct htt_ppdu_stats * ppdu_stats,u8 user)1396dd4f32aeSBjoern A. Zeeb ath11k_update_per_peer_tx_stats(struct ath11k *ar,
1397dd4f32aeSBjoern A. Zeeb struct htt_ppdu_stats *ppdu_stats, u8 user)
1398dd4f32aeSBjoern A. Zeeb {
1399dd4f32aeSBjoern A. Zeeb struct ath11k_base *ab = ar->ab;
1400dd4f32aeSBjoern A. Zeeb struct ath11k_peer *peer;
1401dd4f32aeSBjoern A. Zeeb struct ieee80211_sta *sta;
1402dd4f32aeSBjoern A. Zeeb struct ath11k_sta *arsta;
1403dd4f32aeSBjoern A. Zeeb struct htt_ppdu_stats_user_rate *user_rate;
1404dd4f32aeSBjoern A. Zeeb struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
1405dd4f32aeSBjoern A. Zeeb struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
1406dd4f32aeSBjoern A. Zeeb struct htt_ppdu_stats_common *common = &ppdu_stats->common;
1407dd4f32aeSBjoern A. Zeeb int ret;
1408dd4f32aeSBjoern A. Zeeb u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0;
1409dd4f32aeSBjoern A. Zeeb u32 succ_bytes = 0;
1410dd4f32aeSBjoern A. Zeeb u16 rate = 0, succ_pkts = 0;
1411dd4f32aeSBjoern A. Zeeb u32 tx_duration = 0;
1412dd4f32aeSBjoern A. Zeeb u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
1413dd4f32aeSBjoern A. Zeeb bool is_ampdu = false;
1414dd4f32aeSBjoern A. Zeeb
1415dd4f32aeSBjoern A. Zeeb if (!usr_stats)
1416dd4f32aeSBjoern A. Zeeb return;
1417dd4f32aeSBjoern A. Zeeb
1418dd4f32aeSBjoern A. Zeeb if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
1419dd4f32aeSBjoern A. Zeeb return;
1420dd4f32aeSBjoern A. Zeeb
1421dd4f32aeSBjoern A. Zeeb if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))
1422dd4f32aeSBjoern A. Zeeb is_ampdu =
1423dd4f32aeSBjoern A. Zeeb HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
1424dd4f32aeSBjoern A. Zeeb
1425dd4f32aeSBjoern A. Zeeb if (usr_stats->tlv_flags &
1426dd4f32aeSBjoern A. Zeeb BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
1427dd4f32aeSBjoern A. Zeeb succ_bytes = usr_stats->ack_ba.success_bytes;
1428dd4f32aeSBjoern A. Zeeb succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M,
1429dd4f32aeSBjoern A. Zeeb usr_stats->ack_ba.info);
1430dd4f32aeSBjoern A. Zeeb tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM,
1431dd4f32aeSBjoern A. Zeeb usr_stats->ack_ba.info);
1432dd4f32aeSBjoern A. Zeeb }
1433dd4f32aeSBjoern A. Zeeb
1434dd4f32aeSBjoern A. Zeeb if (common->fes_duration_us)
1435dd4f32aeSBjoern A. Zeeb tx_duration = common->fes_duration_us;
1436dd4f32aeSBjoern A. Zeeb
1437dd4f32aeSBjoern A. Zeeb user_rate = &usr_stats->rate;
1438dd4f32aeSBjoern A. Zeeb flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
1439dd4f32aeSBjoern A. Zeeb bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
1440dd4f32aeSBjoern A. Zeeb nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
1441dd4f32aeSBjoern A. Zeeb mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
1442dd4f32aeSBjoern A. Zeeb sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
1443dd4f32aeSBjoern A. Zeeb dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
1444dd4f32aeSBjoern A. Zeeb
1445dd4f32aeSBjoern A. Zeeb /* Note: If host configured fixed rates and in some other special
1446dd4f32aeSBjoern A. Zeeb * cases, the broadcast/management frames are sent in different rates.
1447dd4f32aeSBjoern A. Zeeb * Firmware rate's control to be skipped for this?
1448dd4f32aeSBjoern A. Zeeb */
1449dd4f32aeSBjoern A. Zeeb
1450dd4f32aeSBjoern A. Zeeb if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) {
1451dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "Invalid HE mcs %d peer stats", mcs);
1452dd4f32aeSBjoern A. Zeeb return;
1453dd4f32aeSBjoern A. Zeeb }
1454dd4f32aeSBjoern A. Zeeb
1455dd4f32aeSBjoern A. Zeeb if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) {
1456dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "Invalid VHT mcs %d peer stats", mcs);
1457dd4f32aeSBjoern A. Zeeb return;
1458dd4f32aeSBjoern A. Zeeb }
1459dd4f32aeSBjoern A. Zeeb
1460dd4f32aeSBjoern A. Zeeb if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) {
1461dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "Invalid HT mcs %d nss %d peer stats",
1462dd4f32aeSBjoern A. Zeeb mcs, nss);
1463dd4f32aeSBjoern A. Zeeb return;
1464dd4f32aeSBjoern A. Zeeb }
1465dd4f32aeSBjoern A. Zeeb
1466dd4f32aeSBjoern A. Zeeb if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
1467dd4f32aeSBjoern A. Zeeb ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs,
1468dd4f32aeSBjoern A. Zeeb flags,
1469dd4f32aeSBjoern A. Zeeb &rate_idx,
1470dd4f32aeSBjoern A. Zeeb &rate);
1471dd4f32aeSBjoern A. Zeeb if (ret < 0)
1472dd4f32aeSBjoern A. Zeeb return;
1473dd4f32aeSBjoern A. Zeeb }
1474dd4f32aeSBjoern A. Zeeb
1475dd4f32aeSBjoern A. Zeeb rcu_read_lock();
1476dd4f32aeSBjoern A. Zeeb spin_lock_bh(&ab->base_lock);
1477dd4f32aeSBjoern A. Zeeb peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id);
1478dd4f32aeSBjoern A. Zeeb
1479dd4f32aeSBjoern A. Zeeb if (!peer || !peer->sta) {
1480dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ab->base_lock);
1481dd4f32aeSBjoern A. Zeeb rcu_read_unlock();
1482dd4f32aeSBjoern A. Zeeb return;
1483dd4f32aeSBjoern A. Zeeb }
1484dd4f32aeSBjoern A. Zeeb
1485dd4f32aeSBjoern A. Zeeb sta = peer->sta;
1486dd4f32aeSBjoern A. Zeeb arsta = (struct ath11k_sta *)sta->drv_priv;
1487dd4f32aeSBjoern A. Zeeb
1488dd4f32aeSBjoern A. Zeeb memset(&arsta->txrate, 0, sizeof(arsta->txrate));
1489dd4f32aeSBjoern A. Zeeb
1490dd4f32aeSBjoern A. Zeeb switch (flags) {
1491dd4f32aeSBjoern A. Zeeb case WMI_RATE_PREAMBLE_OFDM:
1492dd4f32aeSBjoern A. Zeeb arsta->txrate.legacy = rate;
1493dd4f32aeSBjoern A. Zeeb break;
1494dd4f32aeSBjoern A. Zeeb case WMI_RATE_PREAMBLE_CCK:
1495dd4f32aeSBjoern A. Zeeb arsta->txrate.legacy = rate;
1496dd4f32aeSBjoern A. Zeeb break;
1497dd4f32aeSBjoern A. Zeeb case WMI_RATE_PREAMBLE_HT:
1498dd4f32aeSBjoern A. Zeeb arsta->txrate.mcs = mcs + 8 * (nss - 1);
1499dd4f32aeSBjoern A. Zeeb arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
1500dd4f32aeSBjoern A. Zeeb if (sgi)
1501dd4f32aeSBjoern A. Zeeb arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1502dd4f32aeSBjoern A. Zeeb break;
1503dd4f32aeSBjoern A. Zeeb case WMI_RATE_PREAMBLE_VHT:
1504dd4f32aeSBjoern A. Zeeb arsta->txrate.mcs = mcs;
1505dd4f32aeSBjoern A. Zeeb arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
1506dd4f32aeSBjoern A. Zeeb if (sgi)
1507dd4f32aeSBjoern A. Zeeb arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1508dd4f32aeSBjoern A. Zeeb break;
1509dd4f32aeSBjoern A. Zeeb case WMI_RATE_PREAMBLE_HE:
1510dd4f32aeSBjoern A. Zeeb arsta->txrate.mcs = mcs;
1511dd4f32aeSBjoern A. Zeeb arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
1512dd4f32aeSBjoern A. Zeeb arsta->txrate.he_dcm = dcm;
1513dd4f32aeSBjoern A. Zeeb arsta->txrate.he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);
1514dd4f32aeSBjoern A. Zeeb arsta->txrate.he_ru_alloc = ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc
1515dd4f32aeSBjoern A. Zeeb ((user_rate->ru_end -
1516dd4f32aeSBjoern A. Zeeb user_rate->ru_start) + 1);
1517dd4f32aeSBjoern A. Zeeb break;
1518dd4f32aeSBjoern A. Zeeb }
1519dd4f32aeSBjoern A. Zeeb
1520dd4f32aeSBjoern A. Zeeb arsta->txrate.nss = nss;
1521dd4f32aeSBjoern A. Zeeb
1522dd4f32aeSBjoern A. Zeeb arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
1523dd4f32aeSBjoern A. Zeeb arsta->tx_duration += tx_duration;
1524dd4f32aeSBjoern A. Zeeb memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
1525dd4f32aeSBjoern A. Zeeb
1526dd4f32aeSBjoern A. Zeeb /* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
1527dd4f32aeSBjoern A. Zeeb * So skip peer stats update for mgmt packets.
1528dd4f32aeSBjoern A. Zeeb */
1529dd4f32aeSBjoern A. Zeeb if (tid < HTT_PPDU_STATS_NON_QOS_TID) {
1530dd4f32aeSBjoern A. Zeeb memset(peer_stats, 0, sizeof(*peer_stats));
1531dd4f32aeSBjoern A. Zeeb peer_stats->succ_pkts = succ_pkts;
1532dd4f32aeSBjoern A. Zeeb peer_stats->succ_bytes = succ_bytes;
1533dd4f32aeSBjoern A. Zeeb peer_stats->is_ampdu = is_ampdu;
1534dd4f32aeSBjoern A. Zeeb peer_stats->duration = tx_duration;
1535dd4f32aeSBjoern A. Zeeb peer_stats->ba_fails =
1536dd4f32aeSBjoern A. Zeeb HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
1537dd4f32aeSBjoern A. Zeeb HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
1538dd4f32aeSBjoern A. Zeeb
1539dd4f32aeSBjoern A. Zeeb if (ath11k_debugfs_is_extd_tx_stats_enabled(ar))
1540dd4f32aeSBjoern A. Zeeb ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx);
1541dd4f32aeSBjoern A. Zeeb }
1542dd4f32aeSBjoern A. Zeeb
1543dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ab->base_lock);
1544dd4f32aeSBjoern A. Zeeb rcu_read_unlock();
1545dd4f32aeSBjoern A. Zeeb }
1546dd4f32aeSBjoern A. Zeeb
ath11k_htt_update_ppdu_stats(struct ath11k * ar,struct htt_ppdu_stats * ppdu_stats)1547dd4f32aeSBjoern A. Zeeb static void ath11k_htt_update_ppdu_stats(struct ath11k *ar,
1548dd4f32aeSBjoern A. Zeeb struct htt_ppdu_stats *ppdu_stats)
1549dd4f32aeSBjoern A. Zeeb {
1550dd4f32aeSBjoern A. Zeeb u8 user;
1551dd4f32aeSBjoern A. Zeeb
1552dd4f32aeSBjoern A. Zeeb for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
1553dd4f32aeSBjoern A. Zeeb ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user);
1554dd4f32aeSBjoern A. Zeeb }
1555dd4f32aeSBjoern A. Zeeb
1556dd4f32aeSBjoern A. Zeeb static
ath11k_dp_htt_get_ppdu_desc(struct ath11k * ar,u32 ppdu_id)1557dd4f32aeSBjoern A. Zeeb struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar,
1558dd4f32aeSBjoern A. Zeeb u32 ppdu_id)
1559dd4f32aeSBjoern A. Zeeb {
1560dd4f32aeSBjoern A. Zeeb struct htt_ppdu_stats_info *ppdu_info;
1561dd4f32aeSBjoern A. Zeeb
1562*28348caeSBjoern A. Zeeb lockdep_assert_held(&ar->data_lock);
1563*28348caeSBjoern A. Zeeb
1564dd4f32aeSBjoern A. Zeeb if (!list_empty(&ar->ppdu_stats_info)) {
1565dd4f32aeSBjoern A. Zeeb list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
1566*28348caeSBjoern A. Zeeb if (ppdu_info->ppdu_id == ppdu_id)
1567dd4f32aeSBjoern A. Zeeb return ppdu_info;
1568dd4f32aeSBjoern A. Zeeb }
1569dd4f32aeSBjoern A. Zeeb
1570dd4f32aeSBjoern A. Zeeb if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
1571dd4f32aeSBjoern A. Zeeb ppdu_info = list_first_entry(&ar->ppdu_stats_info,
1572dd4f32aeSBjoern A. Zeeb typeof(*ppdu_info), list);
1573dd4f32aeSBjoern A. Zeeb list_del(&ppdu_info->list);
1574dd4f32aeSBjoern A. Zeeb ar->ppdu_stat_list_depth--;
1575dd4f32aeSBjoern A. Zeeb ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);
1576dd4f32aeSBjoern A. Zeeb kfree(ppdu_info);
1577dd4f32aeSBjoern A. Zeeb }
1578dd4f32aeSBjoern A. Zeeb }
1579dd4f32aeSBjoern A. Zeeb
1580dd4f32aeSBjoern A. Zeeb ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC);
1581dd4f32aeSBjoern A. Zeeb if (!ppdu_info)
1582dd4f32aeSBjoern A. Zeeb return NULL;
1583dd4f32aeSBjoern A. Zeeb
1584dd4f32aeSBjoern A. Zeeb list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
1585dd4f32aeSBjoern A. Zeeb ar->ppdu_stat_list_depth++;
1586dd4f32aeSBjoern A. Zeeb
1587dd4f32aeSBjoern A. Zeeb return ppdu_info;
1588dd4f32aeSBjoern A. Zeeb }
1589dd4f32aeSBjoern A. Zeeb
ath11k_htt_pull_ppdu_stats(struct ath11k_base * ab,struct sk_buff * skb)1590dd4f32aeSBjoern A. Zeeb static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab,
1591dd4f32aeSBjoern A. Zeeb struct sk_buff *skb)
1592dd4f32aeSBjoern A. Zeeb {
1593dd4f32aeSBjoern A. Zeeb struct ath11k_htt_ppdu_stats_msg *msg;
1594dd4f32aeSBjoern A. Zeeb struct htt_ppdu_stats_info *ppdu_info;
1595dd4f32aeSBjoern A. Zeeb struct ath11k *ar;
1596dd4f32aeSBjoern A. Zeeb int ret;
1597dd4f32aeSBjoern A. Zeeb u8 pdev_id;
1598dd4f32aeSBjoern A. Zeeb u32 ppdu_id, len;
1599dd4f32aeSBjoern A. Zeeb
1600dd4f32aeSBjoern A. Zeeb msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data;
1601dd4f32aeSBjoern A. Zeeb len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info);
1602dd4f32aeSBjoern A. Zeeb pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info);
1603dd4f32aeSBjoern A. Zeeb ppdu_id = msg->ppdu_id;
1604dd4f32aeSBjoern A. Zeeb
1605dd4f32aeSBjoern A. Zeeb rcu_read_lock();
1606dd4f32aeSBjoern A. Zeeb ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
1607dd4f32aeSBjoern A. Zeeb if (!ar) {
1608dd4f32aeSBjoern A. Zeeb ret = -EINVAL;
1609*28348caeSBjoern A. Zeeb goto out;
1610dd4f32aeSBjoern A. Zeeb }
1611dd4f32aeSBjoern A. Zeeb
1612dd4f32aeSBjoern A. Zeeb if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar))
1613dd4f32aeSBjoern A. Zeeb trace_ath11k_htt_ppdu_stats(ar, skb->data, len);
1614dd4f32aeSBjoern A. Zeeb
1615*28348caeSBjoern A. Zeeb spin_lock_bh(&ar->data_lock);
1616dd4f32aeSBjoern A. Zeeb ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id);
1617dd4f32aeSBjoern A. Zeeb if (!ppdu_info) {
1618dd4f32aeSBjoern A. Zeeb ret = -EINVAL;
1619*28348caeSBjoern A. Zeeb goto out_unlock_data;
1620dd4f32aeSBjoern A. Zeeb }
1621dd4f32aeSBjoern A. Zeeb
1622dd4f32aeSBjoern A. Zeeb ppdu_info->ppdu_id = ppdu_id;
1623dd4f32aeSBjoern A. Zeeb ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len,
1624dd4f32aeSBjoern A. Zeeb ath11k_htt_tlv_ppdu_stats_parse,
1625dd4f32aeSBjoern A. Zeeb (void *)ppdu_info);
1626dd4f32aeSBjoern A. Zeeb if (ret) {
1627dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "Failed to parse tlv %d\n", ret);
1628*28348caeSBjoern A. Zeeb goto out_unlock_data;
1629dd4f32aeSBjoern A. Zeeb }
1630dd4f32aeSBjoern A. Zeeb
1631*28348caeSBjoern A. Zeeb out_unlock_data:
1632*28348caeSBjoern A. Zeeb spin_unlock_bh(&ar->data_lock);
1633*28348caeSBjoern A. Zeeb
1634*28348caeSBjoern A. Zeeb out:
1635dd4f32aeSBjoern A. Zeeb rcu_read_unlock();
1636dd4f32aeSBjoern A. Zeeb
1637dd4f32aeSBjoern A. Zeeb return ret;
1638dd4f32aeSBjoern A. Zeeb }
1639dd4f32aeSBjoern A. Zeeb
ath11k_htt_pktlog(struct ath11k_base * ab,struct sk_buff * skb)1640dd4f32aeSBjoern A. Zeeb static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
1641dd4f32aeSBjoern A. Zeeb {
1642dd4f32aeSBjoern A. Zeeb struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data;
1643dd4f32aeSBjoern A. Zeeb struct ath_pktlog_hdr *hdr = (struct ath_pktlog_hdr *)data;
1644dd4f32aeSBjoern A. Zeeb struct ath11k *ar;
1645dd4f32aeSBjoern A. Zeeb u8 pdev_id;
1646dd4f32aeSBjoern A. Zeeb
1647dd4f32aeSBjoern A. Zeeb pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr);
1648dd4f32aeSBjoern A. Zeeb ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
1649dd4f32aeSBjoern A. Zeeb if (!ar) {
1650dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id);
1651dd4f32aeSBjoern A. Zeeb return;
1652dd4f32aeSBjoern A. Zeeb }
1653dd4f32aeSBjoern A. Zeeb
1654dd4f32aeSBjoern A. Zeeb trace_ath11k_htt_pktlog(ar, data->payload, hdr->size,
1655dd4f32aeSBjoern A. Zeeb ar->ab->pktlog_defs_checksum);
1656dd4f32aeSBjoern A. Zeeb }
1657dd4f32aeSBjoern A. Zeeb
ath11k_htt_backpressure_event_handler(struct ath11k_base * ab,struct sk_buff * skb)1658dd4f32aeSBjoern A. Zeeb static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,
1659dd4f32aeSBjoern A. Zeeb struct sk_buff *skb)
1660dd4f32aeSBjoern A. Zeeb {
1661dd4f32aeSBjoern A. Zeeb u32 *data = (u32 *)skb->data;
1662dd4f32aeSBjoern A. Zeeb u8 pdev_id, ring_type, ring_id, pdev_idx;
1663dd4f32aeSBjoern A. Zeeb u16 hp, tp;
1664dd4f32aeSBjoern A. Zeeb u32 backpressure_time;
1665dd4f32aeSBjoern A. Zeeb struct ath11k_bp_stats *bp_stats;
1666dd4f32aeSBjoern A. Zeeb
1667dd4f32aeSBjoern A. Zeeb pdev_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M, *data);
1668dd4f32aeSBjoern A. Zeeb ring_type = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M, *data);
1669dd4f32aeSBjoern A. Zeeb ring_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_ID_M, *data);
1670dd4f32aeSBjoern A. Zeeb ++data;
1671dd4f32aeSBjoern A. Zeeb
1672dd4f32aeSBjoern A. Zeeb hp = FIELD_GET(HTT_BACKPRESSURE_EVENT_HP_M, *data);
1673dd4f32aeSBjoern A. Zeeb tp = FIELD_GET(HTT_BACKPRESSURE_EVENT_TP_M, *data);
1674dd4f32aeSBjoern A. Zeeb ++data;
1675dd4f32aeSBjoern A. Zeeb
1676dd4f32aeSBjoern A. Zeeb backpressure_time = *data;
1677dd4f32aeSBjoern A. Zeeb
1678*28348caeSBjoern A. Zeeb ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n",
1679dd4f32aeSBjoern A. Zeeb pdev_id, ring_type, ring_id, hp, tp, backpressure_time);
1680dd4f32aeSBjoern A. Zeeb
1681dd4f32aeSBjoern A. Zeeb if (ring_type == HTT_BACKPRESSURE_UMAC_RING_TYPE) {
1682dd4f32aeSBjoern A. Zeeb if (ring_id >= HTT_SW_UMAC_RING_IDX_MAX)
1683dd4f32aeSBjoern A. Zeeb return;
1684dd4f32aeSBjoern A. Zeeb
1685dd4f32aeSBjoern A. Zeeb bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[ring_id];
1686dd4f32aeSBjoern A. Zeeb } else if (ring_type == HTT_BACKPRESSURE_LMAC_RING_TYPE) {
1687dd4f32aeSBjoern A. Zeeb pdev_idx = DP_HW2SW_MACID(pdev_id);
1688dd4f32aeSBjoern A. Zeeb
1689dd4f32aeSBjoern A. Zeeb if (ring_id >= HTT_SW_LMAC_RING_IDX_MAX || pdev_idx >= MAX_RADIOS)
1690dd4f32aeSBjoern A. Zeeb return;
1691dd4f32aeSBjoern A. Zeeb
1692dd4f32aeSBjoern A. Zeeb bp_stats = &ab->soc_stats.bp_stats.lmac_ring_bp_stats[ring_id][pdev_idx];
1693dd4f32aeSBjoern A. Zeeb } else {
1694dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "unknown ring type received in htt bp event %d\n",
1695dd4f32aeSBjoern A. Zeeb ring_type);
1696dd4f32aeSBjoern A. Zeeb return;
1697dd4f32aeSBjoern A. Zeeb }
1698dd4f32aeSBjoern A. Zeeb
1699dd4f32aeSBjoern A. Zeeb spin_lock_bh(&ab->base_lock);
1700dd4f32aeSBjoern A. Zeeb bp_stats->hp = hp;
1701dd4f32aeSBjoern A. Zeeb bp_stats->tp = tp;
1702dd4f32aeSBjoern A. Zeeb bp_stats->count++;
1703dd4f32aeSBjoern A. Zeeb bp_stats->jiffies = jiffies;
1704dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ab->base_lock);
1705dd4f32aeSBjoern A. Zeeb }
1706dd4f32aeSBjoern A. Zeeb
ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base * ab,struct sk_buff * skb)1707dd4f32aeSBjoern A. Zeeb void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
1708dd4f32aeSBjoern A. Zeeb struct sk_buff *skb)
1709dd4f32aeSBjoern A. Zeeb {
1710dd4f32aeSBjoern A. Zeeb struct ath11k_dp *dp = &ab->dp;
1711dd4f32aeSBjoern A. Zeeb struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
1712dd4f32aeSBjoern A. Zeeb enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp);
1713dd4f32aeSBjoern A. Zeeb u16 peer_id;
1714dd4f32aeSBjoern A. Zeeb u8 vdev_id;
1715dd4f32aeSBjoern A. Zeeb u8 mac_addr[ETH_ALEN];
1716dd4f32aeSBjoern A. Zeeb u16 peer_mac_h16;
1717dd4f32aeSBjoern A. Zeeb u16 ast_hash;
1718dd4f32aeSBjoern A. Zeeb u16 hw_peer_id;
1719dd4f32aeSBjoern A. Zeeb
1720dd4f32aeSBjoern A. Zeeb ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
1721dd4f32aeSBjoern A. Zeeb
1722dd4f32aeSBjoern A. Zeeb switch (type) {
1723dd4f32aeSBjoern A. Zeeb case HTT_T2H_MSG_TYPE_VERSION_CONF:
1724dd4f32aeSBjoern A. Zeeb dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR,
1725dd4f32aeSBjoern A. Zeeb resp->version_msg.version);
1726dd4f32aeSBjoern A. Zeeb dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR,
1727dd4f32aeSBjoern A. Zeeb resp->version_msg.version);
1728dd4f32aeSBjoern A. Zeeb complete(&dp->htt_tgt_version_received);
1729dd4f32aeSBjoern A. Zeeb break;
1730dd4f32aeSBjoern A. Zeeb case HTT_T2H_MSG_TYPE_PEER_MAP:
1731dd4f32aeSBjoern A. Zeeb vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
1732dd4f32aeSBjoern A. Zeeb resp->peer_map_ev.info);
1733dd4f32aeSBjoern A. Zeeb peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
1734dd4f32aeSBjoern A. Zeeb resp->peer_map_ev.info);
1735dd4f32aeSBjoern A. Zeeb peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
1736dd4f32aeSBjoern A. Zeeb resp->peer_map_ev.info1);
1737dd4f32aeSBjoern A. Zeeb ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
1738dd4f32aeSBjoern A. Zeeb peer_mac_h16, mac_addr);
1739dd4f32aeSBjoern A. Zeeb ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0);
1740dd4f32aeSBjoern A. Zeeb break;
1741dd4f32aeSBjoern A. Zeeb case HTT_T2H_MSG_TYPE_PEER_MAP2:
1742dd4f32aeSBjoern A. Zeeb vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
1743dd4f32aeSBjoern A. Zeeb resp->peer_map_ev.info);
1744dd4f32aeSBjoern A. Zeeb peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
1745dd4f32aeSBjoern A. Zeeb resp->peer_map_ev.info);
1746dd4f32aeSBjoern A. Zeeb peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
1747dd4f32aeSBjoern A. Zeeb resp->peer_map_ev.info1);
1748dd4f32aeSBjoern A. Zeeb ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
1749dd4f32aeSBjoern A. Zeeb peer_mac_h16, mac_addr);
1750dd4f32aeSBjoern A. Zeeb ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL,
1751dd4f32aeSBjoern A. Zeeb resp->peer_map_ev.info2);
1752dd4f32aeSBjoern A. Zeeb hw_peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID,
1753dd4f32aeSBjoern A. Zeeb resp->peer_map_ev.info1);
1754dd4f32aeSBjoern A. Zeeb ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
1755dd4f32aeSBjoern A. Zeeb hw_peer_id);
1756dd4f32aeSBjoern A. Zeeb break;
1757dd4f32aeSBjoern A. Zeeb case HTT_T2H_MSG_TYPE_PEER_UNMAP:
1758dd4f32aeSBjoern A. Zeeb case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
1759dd4f32aeSBjoern A. Zeeb peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID,
1760dd4f32aeSBjoern A. Zeeb resp->peer_unmap_ev.info);
1761dd4f32aeSBjoern A. Zeeb ath11k_peer_unmap_event(ab, peer_id);
1762dd4f32aeSBjoern A. Zeeb break;
1763dd4f32aeSBjoern A. Zeeb case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
1764dd4f32aeSBjoern A. Zeeb ath11k_htt_pull_ppdu_stats(ab, skb);
1765dd4f32aeSBjoern A. Zeeb break;
1766dd4f32aeSBjoern A. Zeeb case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
1767dd4f32aeSBjoern A. Zeeb ath11k_debugfs_htt_ext_stats_handler(ab, skb);
1768dd4f32aeSBjoern A. Zeeb break;
1769dd4f32aeSBjoern A. Zeeb case HTT_T2H_MSG_TYPE_PKTLOG:
1770dd4f32aeSBjoern A. Zeeb ath11k_htt_pktlog(ab, skb);
1771dd4f32aeSBjoern A. Zeeb break;
1772dd4f32aeSBjoern A. Zeeb case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
1773dd4f32aeSBjoern A. Zeeb ath11k_htt_backpressure_event_handler(ab, skb);
1774dd4f32aeSBjoern A. Zeeb break;
1775dd4f32aeSBjoern A. Zeeb default:
1776dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "htt event %d not handled\n", type);
1777dd4f32aeSBjoern A. Zeeb break;
1778dd4f32aeSBjoern A. Zeeb }
1779dd4f32aeSBjoern A. Zeeb
1780dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(skb);
1781dd4f32aeSBjoern A. Zeeb }
1782dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_msdu_coalesce(struct ath11k * ar,struct sk_buff_head * msdu_list,struct sk_buff * first,struct sk_buff * last,u8 l3pad_bytes,int msdu_len)1783dd4f32aeSBjoern A. Zeeb static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar,
1784dd4f32aeSBjoern A. Zeeb struct sk_buff_head *msdu_list,
1785dd4f32aeSBjoern A. Zeeb struct sk_buff *first, struct sk_buff *last,
1786dd4f32aeSBjoern A. Zeeb u8 l3pad_bytes, int msdu_len)
1787dd4f32aeSBjoern A. Zeeb {
1788dd4f32aeSBjoern A. Zeeb struct ath11k_base *ab = ar->ab;
1789dd4f32aeSBjoern A. Zeeb struct sk_buff *skb;
1790dd4f32aeSBjoern A. Zeeb struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
1791dd4f32aeSBjoern A. Zeeb int buf_first_hdr_len, buf_first_len;
1792dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *ldesc;
1793dd4f32aeSBjoern A. Zeeb int space_extra, rem_len, buf_len;
1794dd4f32aeSBjoern A. Zeeb u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
1795dd4f32aeSBjoern A. Zeeb
1796dd4f32aeSBjoern A. Zeeb /* As the msdu is spread across multiple rx buffers,
1797dd4f32aeSBjoern A. Zeeb * find the offset to the start of msdu for computing
1798dd4f32aeSBjoern A. Zeeb * the length of the msdu in the first buffer.
1799dd4f32aeSBjoern A. Zeeb */
1800dd4f32aeSBjoern A. Zeeb buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes;
1801dd4f32aeSBjoern A. Zeeb buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
1802dd4f32aeSBjoern A. Zeeb
1803dd4f32aeSBjoern A. Zeeb if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {
1804dd4f32aeSBjoern A. Zeeb skb_put(first, buf_first_hdr_len + msdu_len);
1805dd4f32aeSBjoern A. Zeeb skb_pull(first, buf_first_hdr_len);
1806dd4f32aeSBjoern A. Zeeb return 0;
1807dd4f32aeSBjoern A. Zeeb }
1808dd4f32aeSBjoern A. Zeeb
1809dd4f32aeSBjoern A. Zeeb ldesc = (struct hal_rx_desc *)last->data;
1810dd4f32aeSBjoern A. Zeeb rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ab, ldesc);
1811dd4f32aeSBjoern A. Zeeb rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ab, ldesc);
1812dd4f32aeSBjoern A. Zeeb
1813dd4f32aeSBjoern A. Zeeb /* MSDU spans over multiple buffers because the length of the MSDU
1814dd4f32aeSBjoern A. Zeeb * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
1815dd4f32aeSBjoern A. Zeeb * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
1816dd4f32aeSBjoern A. Zeeb */
1817dd4f32aeSBjoern A. Zeeb skb_put(first, DP_RX_BUFFER_SIZE);
1818dd4f32aeSBjoern A. Zeeb skb_pull(first, buf_first_hdr_len);
1819dd4f32aeSBjoern A. Zeeb
1820dd4f32aeSBjoern A. Zeeb /* When an MSDU spread over multiple buffers attention, MSDU_END and
1821dd4f32aeSBjoern A. Zeeb * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs.
1822dd4f32aeSBjoern A. Zeeb */
1823dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc);
1824dd4f32aeSBjoern A. Zeeb
1825dd4f32aeSBjoern A. Zeeb space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
1826dd4f32aeSBjoern A. Zeeb if (space_extra > 0 &&
1827dd4f32aeSBjoern A. Zeeb (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {
1828dd4f32aeSBjoern A. Zeeb /* Free up all buffers of the MSDU */
1829dd4f32aeSBjoern A. Zeeb while ((skb = __skb_dequeue(msdu_list)) != NULL) {
1830dd4f32aeSBjoern A. Zeeb rxcb = ATH11K_SKB_RXCB(skb);
1831dd4f32aeSBjoern A. Zeeb if (!rxcb->is_continuation) {
1832dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(skb);
1833dd4f32aeSBjoern A. Zeeb break;
1834dd4f32aeSBjoern A. Zeeb }
1835dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(skb);
1836dd4f32aeSBjoern A. Zeeb }
1837dd4f32aeSBjoern A. Zeeb return -ENOMEM;
1838dd4f32aeSBjoern A. Zeeb }
1839dd4f32aeSBjoern A. Zeeb
1840dd4f32aeSBjoern A. Zeeb rem_len = msdu_len - buf_first_len;
1841dd4f32aeSBjoern A. Zeeb while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
1842dd4f32aeSBjoern A. Zeeb rxcb = ATH11K_SKB_RXCB(skb);
1843dd4f32aeSBjoern A. Zeeb if (rxcb->is_continuation)
1844dd4f32aeSBjoern A. Zeeb buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;
1845dd4f32aeSBjoern A. Zeeb else
1846dd4f32aeSBjoern A. Zeeb buf_len = rem_len;
1847dd4f32aeSBjoern A. Zeeb
1848dd4f32aeSBjoern A. Zeeb if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) {
1849dd4f32aeSBjoern A. Zeeb WARN_ON_ONCE(1);
1850dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(skb);
1851dd4f32aeSBjoern A. Zeeb return -EINVAL;
1852dd4f32aeSBjoern A. Zeeb }
1853dd4f32aeSBjoern A. Zeeb
1854dd4f32aeSBjoern A. Zeeb skb_put(skb, buf_len + hal_rx_desc_sz);
1855dd4f32aeSBjoern A. Zeeb skb_pull(skb, hal_rx_desc_sz);
1856dd4f32aeSBjoern A. Zeeb skb_copy_from_linear_data(skb, skb_put(first, buf_len),
1857dd4f32aeSBjoern A. Zeeb buf_len);
1858dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(skb);
1859dd4f32aeSBjoern A. Zeeb
1860dd4f32aeSBjoern A. Zeeb rem_len -= buf_len;
1861dd4f32aeSBjoern A. Zeeb if (!rxcb->is_continuation)
1862dd4f32aeSBjoern A. Zeeb break;
1863dd4f32aeSBjoern A. Zeeb }
1864dd4f32aeSBjoern A. Zeeb
1865dd4f32aeSBjoern A. Zeeb return 0;
1866dd4f32aeSBjoern A. Zeeb }
1867dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head * msdu_list,struct sk_buff * first)1868dd4f32aeSBjoern A. Zeeb static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
1869dd4f32aeSBjoern A. Zeeb struct sk_buff *first)
1870dd4f32aeSBjoern A. Zeeb {
1871dd4f32aeSBjoern A. Zeeb struct sk_buff *skb;
1872dd4f32aeSBjoern A. Zeeb struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
1873dd4f32aeSBjoern A. Zeeb
1874dd4f32aeSBjoern A. Zeeb if (!rxcb->is_continuation)
1875dd4f32aeSBjoern A. Zeeb return first;
1876dd4f32aeSBjoern A. Zeeb
1877dd4f32aeSBjoern A. Zeeb skb_queue_walk(msdu_list, skb) {
1878dd4f32aeSBjoern A. Zeeb rxcb = ATH11K_SKB_RXCB(skb);
1879dd4f32aeSBjoern A. Zeeb if (!rxcb->is_continuation)
1880dd4f32aeSBjoern A. Zeeb return skb;
1881dd4f32aeSBjoern A. Zeeb }
1882dd4f32aeSBjoern A. Zeeb
1883dd4f32aeSBjoern A. Zeeb return NULL;
1884dd4f32aeSBjoern A. Zeeb }
1885dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_csum_offload(struct ath11k * ar,struct sk_buff * msdu)1886dd4f32aeSBjoern A. Zeeb static void ath11k_dp_rx_h_csum_offload(struct ath11k *ar, struct sk_buff *msdu)
1887dd4f32aeSBjoern A. Zeeb {
1888dd4f32aeSBjoern A. Zeeb struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1889dd4f32aeSBjoern A. Zeeb struct rx_attention *rx_attention;
1890dd4f32aeSBjoern A. Zeeb bool ip_csum_fail, l4_csum_fail;
1891dd4f32aeSBjoern A. Zeeb
1892dd4f32aeSBjoern A. Zeeb rx_attention = ath11k_dp_rx_get_attention(ar->ab, rxcb->rx_desc);
1893dd4f32aeSBjoern A. Zeeb ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rx_attention);
1894dd4f32aeSBjoern A. Zeeb l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rx_attention);
1895dd4f32aeSBjoern A. Zeeb
1896dd4f32aeSBjoern A. Zeeb msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
1897dd4f32aeSBjoern A. Zeeb CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
1898dd4f32aeSBjoern A. Zeeb }
1899dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_crypto_mic_len(struct ath11k * ar,enum hal_encrypt_type enctype)1900dd4f32aeSBjoern A. Zeeb static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar,
1901dd4f32aeSBjoern A. Zeeb enum hal_encrypt_type enctype)
1902dd4f32aeSBjoern A. Zeeb {
1903dd4f32aeSBjoern A. Zeeb switch (enctype) {
1904dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_OPEN:
1905dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1906dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_TKIP_MIC:
1907dd4f32aeSBjoern A. Zeeb return 0;
1908dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_CCMP_128:
1909dd4f32aeSBjoern A. Zeeb return IEEE80211_CCMP_MIC_LEN;
1910dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_CCMP_256:
1911dd4f32aeSBjoern A. Zeeb return IEEE80211_CCMP_256_MIC_LEN;
1912dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_GCMP_128:
1913dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1914dd4f32aeSBjoern A. Zeeb return IEEE80211_GCMP_MIC_LEN;
1915dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_WEP_40:
1916dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_WEP_104:
1917dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_WEP_128:
1918dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1919dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_WAPI:
1920dd4f32aeSBjoern A. Zeeb break;
1921dd4f32aeSBjoern A. Zeeb }
1922dd4f32aeSBjoern A. Zeeb
1923dd4f32aeSBjoern A. Zeeb ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype);
1924dd4f32aeSBjoern A. Zeeb return 0;
1925dd4f32aeSBjoern A. Zeeb }
1926dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_crypto_param_len(struct ath11k * ar,enum hal_encrypt_type enctype)1927dd4f32aeSBjoern A. Zeeb static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar,
1928dd4f32aeSBjoern A. Zeeb enum hal_encrypt_type enctype)
1929dd4f32aeSBjoern A. Zeeb {
1930dd4f32aeSBjoern A. Zeeb switch (enctype) {
1931dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_OPEN:
1932dd4f32aeSBjoern A. Zeeb return 0;
1933dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1934dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_TKIP_MIC:
1935dd4f32aeSBjoern A. Zeeb return IEEE80211_TKIP_IV_LEN;
1936dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_CCMP_128:
1937dd4f32aeSBjoern A. Zeeb return IEEE80211_CCMP_HDR_LEN;
1938dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_CCMP_256:
1939dd4f32aeSBjoern A. Zeeb return IEEE80211_CCMP_256_HDR_LEN;
1940dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_GCMP_128:
1941dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1942dd4f32aeSBjoern A. Zeeb return IEEE80211_GCMP_HDR_LEN;
1943dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_WEP_40:
1944dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_WEP_104:
1945dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_WEP_128:
1946dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1947dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_WAPI:
1948dd4f32aeSBjoern A. Zeeb break;
1949dd4f32aeSBjoern A. Zeeb }
1950dd4f32aeSBjoern A. Zeeb
1951dd4f32aeSBjoern A. Zeeb ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1952dd4f32aeSBjoern A. Zeeb return 0;
1953dd4f32aeSBjoern A. Zeeb }
1954dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_crypto_icv_len(struct ath11k * ar,enum hal_encrypt_type enctype)1955dd4f32aeSBjoern A. Zeeb static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar,
1956dd4f32aeSBjoern A. Zeeb enum hal_encrypt_type enctype)
1957dd4f32aeSBjoern A. Zeeb {
1958dd4f32aeSBjoern A. Zeeb switch (enctype) {
1959dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_OPEN:
1960dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_CCMP_128:
1961dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_CCMP_256:
1962dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_GCMP_128:
1963dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1964dd4f32aeSBjoern A. Zeeb return 0;
1965dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1966dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_TKIP_MIC:
1967dd4f32aeSBjoern A. Zeeb return IEEE80211_TKIP_ICV_LEN;
1968dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_WEP_40:
1969dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_WEP_104:
1970dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_WEP_128:
1971dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1972dd4f32aeSBjoern A. Zeeb case HAL_ENCRYPT_TYPE_WAPI:
1973dd4f32aeSBjoern A. Zeeb break;
1974dd4f32aeSBjoern A. Zeeb }
1975dd4f32aeSBjoern A. Zeeb
1976dd4f32aeSBjoern A. Zeeb ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1977dd4f32aeSBjoern A. Zeeb return 0;
1978dd4f32aeSBjoern A. Zeeb }
1979dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_undecap_nwifi(struct ath11k * ar,struct sk_buff * msdu,u8 * first_hdr,enum hal_encrypt_type enctype,struct ieee80211_rx_status * status)1980dd4f32aeSBjoern A. Zeeb static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar,
1981dd4f32aeSBjoern A. Zeeb struct sk_buff *msdu,
1982dd4f32aeSBjoern A. Zeeb u8 *first_hdr,
1983dd4f32aeSBjoern A. Zeeb enum hal_encrypt_type enctype,
1984dd4f32aeSBjoern A. Zeeb struct ieee80211_rx_status *status)
1985dd4f32aeSBjoern A. Zeeb {
1986dd4f32aeSBjoern A. Zeeb struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1987dd4f32aeSBjoern A. Zeeb u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN];
1988dd4f32aeSBjoern A. Zeeb struct ieee80211_hdr *hdr;
1989dd4f32aeSBjoern A. Zeeb size_t hdr_len;
1990dd4f32aeSBjoern A. Zeeb u8 da[ETH_ALEN];
1991dd4f32aeSBjoern A. Zeeb u8 sa[ETH_ALEN];
1992dd4f32aeSBjoern A. Zeeb u16 qos_ctl = 0;
1993dd4f32aeSBjoern A. Zeeb u8 *qos;
1994dd4f32aeSBjoern A. Zeeb
1995dd4f32aeSBjoern A. Zeeb /* copy SA & DA and pull decapped header */
1996dd4f32aeSBjoern A. Zeeb hdr = (struct ieee80211_hdr *)msdu->data;
1997dd4f32aeSBjoern A. Zeeb hdr_len = ieee80211_hdrlen(hdr->frame_control);
1998dd4f32aeSBjoern A. Zeeb ether_addr_copy(da, ieee80211_get_DA(hdr));
1999dd4f32aeSBjoern A. Zeeb ether_addr_copy(sa, ieee80211_get_SA(hdr));
2000dd4f32aeSBjoern A. Zeeb skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control));
2001dd4f32aeSBjoern A. Zeeb
2002dd4f32aeSBjoern A. Zeeb if (rxcb->is_first_msdu) {
2003dd4f32aeSBjoern A. Zeeb /* original 802.11 header is valid for the first msdu
2004dd4f32aeSBjoern A. Zeeb * hence we can reuse the same header
2005dd4f32aeSBjoern A. Zeeb */
2006dd4f32aeSBjoern A. Zeeb hdr = (struct ieee80211_hdr *)first_hdr;
2007dd4f32aeSBjoern A. Zeeb hdr_len = ieee80211_hdrlen(hdr->frame_control);
2008dd4f32aeSBjoern A. Zeeb
2009dd4f32aeSBjoern A. Zeeb /* Each A-MSDU subframe will be reported as a separate MSDU,
2010dd4f32aeSBjoern A. Zeeb * so strip the A-MSDU bit from QoS Ctl.
2011dd4f32aeSBjoern A. Zeeb */
2012dd4f32aeSBjoern A. Zeeb if (ieee80211_is_data_qos(hdr->frame_control)) {
2013dd4f32aeSBjoern A. Zeeb qos = ieee80211_get_qos_ctl(hdr);
2014dd4f32aeSBjoern A. Zeeb qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
2015dd4f32aeSBjoern A. Zeeb }
2016dd4f32aeSBjoern A. Zeeb } else {
2017dd4f32aeSBjoern A. Zeeb /* Rebuild qos header if this is a middle/last msdu */
2018dd4f32aeSBjoern A. Zeeb hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
2019dd4f32aeSBjoern A. Zeeb
2020dd4f32aeSBjoern A. Zeeb /* Reset the order bit as the HT_Control header is stripped */
2021dd4f32aeSBjoern A. Zeeb hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER));
2022dd4f32aeSBjoern A. Zeeb
2023dd4f32aeSBjoern A. Zeeb qos_ctl = rxcb->tid;
2024dd4f32aeSBjoern A. Zeeb
2025dd4f32aeSBjoern A. Zeeb if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(ar->ab, rxcb->rx_desc))
2026dd4f32aeSBjoern A. Zeeb qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
2027dd4f32aeSBjoern A. Zeeb
2028dd4f32aeSBjoern A. Zeeb /* TODO Add other QoS ctl fields when required */
2029dd4f32aeSBjoern A. Zeeb
2030dd4f32aeSBjoern A. Zeeb /* copy decap header before overwriting for reuse below */
2031dd4f32aeSBjoern A. Zeeb memcpy(decap_hdr, (uint8_t *)hdr, hdr_len);
2032dd4f32aeSBjoern A. Zeeb }
2033dd4f32aeSBjoern A. Zeeb
2034dd4f32aeSBjoern A. Zeeb if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
2035dd4f32aeSBjoern A. Zeeb memcpy(skb_push(msdu,
2036dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_crypto_param_len(ar, enctype)),
2037dd4f32aeSBjoern A. Zeeb #if defined(__linux__)
2038dd4f32aeSBjoern A. Zeeb (void *)hdr + hdr_len,
2039dd4f32aeSBjoern A. Zeeb #elif defined(__FreeBSD__)
2040dd4f32aeSBjoern A. Zeeb (u8 *)hdr + hdr_len,
2041dd4f32aeSBjoern A. Zeeb #endif
2042dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_crypto_param_len(ar, enctype));
2043dd4f32aeSBjoern A. Zeeb }
2044dd4f32aeSBjoern A. Zeeb
2045dd4f32aeSBjoern A. Zeeb if (!rxcb->is_first_msdu) {
2046dd4f32aeSBjoern A. Zeeb memcpy(skb_push(msdu,
2047dd4f32aeSBjoern A. Zeeb IEEE80211_QOS_CTL_LEN), &qos_ctl,
2048dd4f32aeSBjoern A. Zeeb IEEE80211_QOS_CTL_LEN);
2049dd4f32aeSBjoern A. Zeeb memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len);
2050dd4f32aeSBjoern A. Zeeb return;
2051dd4f32aeSBjoern A. Zeeb }
2052dd4f32aeSBjoern A. Zeeb
2053dd4f32aeSBjoern A. Zeeb memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
2054dd4f32aeSBjoern A. Zeeb
2055dd4f32aeSBjoern A. Zeeb /* original 802.11 header has a different DA and in
2056dd4f32aeSBjoern A. Zeeb * case of 4addr it may also have different SA
2057dd4f32aeSBjoern A. Zeeb */
2058dd4f32aeSBjoern A. Zeeb hdr = (struct ieee80211_hdr *)msdu->data;
2059dd4f32aeSBjoern A. Zeeb ether_addr_copy(ieee80211_get_DA(hdr), da);
2060dd4f32aeSBjoern A. Zeeb ether_addr_copy(ieee80211_get_SA(hdr), sa);
2061dd4f32aeSBjoern A. Zeeb }
2062dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_undecap_raw(struct ath11k * ar,struct sk_buff * msdu,enum hal_encrypt_type enctype,struct ieee80211_rx_status * status,bool decrypted)2063dd4f32aeSBjoern A. Zeeb static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu,
2064dd4f32aeSBjoern A. Zeeb enum hal_encrypt_type enctype,
2065dd4f32aeSBjoern A. Zeeb struct ieee80211_rx_status *status,
2066dd4f32aeSBjoern A. Zeeb bool decrypted)
2067dd4f32aeSBjoern A. Zeeb {
2068dd4f32aeSBjoern A. Zeeb struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2069dd4f32aeSBjoern A. Zeeb struct ieee80211_hdr *hdr;
2070dd4f32aeSBjoern A. Zeeb size_t hdr_len;
2071dd4f32aeSBjoern A. Zeeb size_t crypto_len;
2072dd4f32aeSBjoern A. Zeeb
2073dd4f32aeSBjoern A. Zeeb if (!rxcb->is_first_msdu ||
2074dd4f32aeSBjoern A. Zeeb !(rxcb->is_first_msdu && rxcb->is_last_msdu)) {
2075dd4f32aeSBjoern A. Zeeb WARN_ON_ONCE(1);
2076dd4f32aeSBjoern A. Zeeb return;
2077dd4f32aeSBjoern A. Zeeb }
2078dd4f32aeSBjoern A. Zeeb
2079dd4f32aeSBjoern A. Zeeb skb_trim(msdu, msdu->len - FCS_LEN);
2080dd4f32aeSBjoern A. Zeeb
2081dd4f32aeSBjoern A. Zeeb if (!decrypted)
2082dd4f32aeSBjoern A. Zeeb return;
2083dd4f32aeSBjoern A. Zeeb
2084dd4f32aeSBjoern A. Zeeb hdr = (void *)msdu->data;
2085dd4f32aeSBjoern A. Zeeb
2086dd4f32aeSBjoern A. Zeeb /* Tail */
2087dd4f32aeSBjoern A. Zeeb if (status->flag & RX_FLAG_IV_STRIPPED) {
2088dd4f32aeSBjoern A. Zeeb skb_trim(msdu, msdu->len -
2089dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_crypto_mic_len(ar, enctype));
2090dd4f32aeSBjoern A. Zeeb
2091dd4f32aeSBjoern A. Zeeb skb_trim(msdu, msdu->len -
2092dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_crypto_icv_len(ar, enctype));
2093dd4f32aeSBjoern A. Zeeb } else {
2094dd4f32aeSBjoern A. Zeeb /* MIC */
2095dd4f32aeSBjoern A. Zeeb if (status->flag & RX_FLAG_MIC_STRIPPED)
2096dd4f32aeSBjoern A. Zeeb skb_trim(msdu, msdu->len -
2097dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_crypto_mic_len(ar, enctype));
2098dd4f32aeSBjoern A. Zeeb
2099dd4f32aeSBjoern A. Zeeb /* ICV */
2100dd4f32aeSBjoern A. Zeeb if (status->flag & RX_FLAG_ICV_STRIPPED)
2101dd4f32aeSBjoern A. Zeeb skb_trim(msdu, msdu->len -
2102dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_crypto_icv_len(ar, enctype));
2103dd4f32aeSBjoern A. Zeeb }
2104dd4f32aeSBjoern A. Zeeb
2105dd4f32aeSBjoern A. Zeeb /* MMIC */
2106dd4f32aeSBjoern A. Zeeb if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
2107dd4f32aeSBjoern A. Zeeb !ieee80211_has_morefrags(hdr->frame_control) &&
2108dd4f32aeSBjoern A. Zeeb enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
2109dd4f32aeSBjoern A. Zeeb skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
2110dd4f32aeSBjoern A. Zeeb
2111dd4f32aeSBjoern A. Zeeb /* Head */
2112dd4f32aeSBjoern A. Zeeb if (status->flag & RX_FLAG_IV_STRIPPED) {
2113dd4f32aeSBjoern A. Zeeb hdr_len = ieee80211_hdrlen(hdr->frame_control);
2114dd4f32aeSBjoern A. Zeeb crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
2115dd4f32aeSBjoern A. Zeeb
2116dd4f32aeSBjoern A. Zeeb #if defined(__linux__)
2117dd4f32aeSBjoern A. Zeeb memmove((void *)msdu->data + crypto_len,
2118dd4f32aeSBjoern A. Zeeb (void *)msdu->data, hdr_len);
2119dd4f32aeSBjoern A. Zeeb #elif defined(__FreeBSD__)
2120dd4f32aeSBjoern A. Zeeb memmove((u8 *)msdu->data + crypto_len,
2121dd4f32aeSBjoern A. Zeeb (u8 *)msdu->data, hdr_len);
2122dd4f32aeSBjoern A. Zeeb #endif
2123dd4f32aeSBjoern A. Zeeb skb_pull(msdu, crypto_len);
2124dd4f32aeSBjoern A. Zeeb }
2125dd4f32aeSBjoern A. Zeeb }
2126dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_find_rfc1042(struct ath11k * ar,struct sk_buff * msdu,enum hal_encrypt_type enctype)2127dd4f32aeSBjoern A. Zeeb static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar,
2128dd4f32aeSBjoern A. Zeeb struct sk_buff *msdu,
2129dd4f32aeSBjoern A. Zeeb enum hal_encrypt_type enctype)
2130dd4f32aeSBjoern A. Zeeb {
2131dd4f32aeSBjoern A. Zeeb struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2132dd4f32aeSBjoern A. Zeeb struct ieee80211_hdr *hdr;
2133dd4f32aeSBjoern A. Zeeb size_t hdr_len, crypto_len;
2134dd4f32aeSBjoern A. Zeeb #if defined(__linux__)
2135dd4f32aeSBjoern A. Zeeb void *rfc1042;
2136dd4f32aeSBjoern A. Zeeb #elif defined(__FreeBSD__)
2137dd4f32aeSBjoern A. Zeeb u8 *rfc1042;
2138dd4f32aeSBjoern A. Zeeb #endif
2139dd4f32aeSBjoern A. Zeeb bool is_amsdu;
2140dd4f32aeSBjoern A. Zeeb
2141dd4f32aeSBjoern A. Zeeb is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu);
2142dd4f32aeSBjoern A. Zeeb hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(ar->ab, rxcb->rx_desc);
2143dd4f32aeSBjoern A. Zeeb #if defined(__linux__)
2144dd4f32aeSBjoern A. Zeeb rfc1042 = hdr;
2145dd4f32aeSBjoern A. Zeeb #elif defined(__FreeBSD__)
2146dd4f32aeSBjoern A. Zeeb rfc1042 = (void *)hdr;
2147dd4f32aeSBjoern A. Zeeb #endif
2148dd4f32aeSBjoern A. Zeeb
2149dd4f32aeSBjoern A. Zeeb if (rxcb->is_first_msdu) {
2150dd4f32aeSBjoern A. Zeeb hdr_len = ieee80211_hdrlen(hdr->frame_control);
2151dd4f32aeSBjoern A. Zeeb crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
2152dd4f32aeSBjoern A. Zeeb
2153dd4f32aeSBjoern A. Zeeb rfc1042 += hdr_len + crypto_len;
2154dd4f32aeSBjoern A. Zeeb }
2155dd4f32aeSBjoern A. Zeeb
2156dd4f32aeSBjoern A. Zeeb if (is_amsdu)
2157dd4f32aeSBjoern A. Zeeb rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr);
2158dd4f32aeSBjoern A. Zeeb
2159dd4f32aeSBjoern A. Zeeb return rfc1042;
2160dd4f32aeSBjoern A. Zeeb }
2161dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_undecap_eth(struct ath11k * ar,struct sk_buff * msdu,u8 * first_hdr,enum hal_encrypt_type enctype,struct ieee80211_rx_status * status)2162dd4f32aeSBjoern A. Zeeb static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar,
2163dd4f32aeSBjoern A. Zeeb struct sk_buff *msdu,
2164dd4f32aeSBjoern A. Zeeb u8 *first_hdr,
2165dd4f32aeSBjoern A. Zeeb enum hal_encrypt_type enctype,
2166dd4f32aeSBjoern A. Zeeb struct ieee80211_rx_status *status)
2167dd4f32aeSBjoern A. Zeeb {
2168dd4f32aeSBjoern A. Zeeb struct ieee80211_hdr *hdr;
2169dd4f32aeSBjoern A. Zeeb struct ethhdr *eth;
2170dd4f32aeSBjoern A. Zeeb size_t hdr_len;
2171dd4f32aeSBjoern A. Zeeb u8 da[ETH_ALEN];
2172dd4f32aeSBjoern A. Zeeb u8 sa[ETH_ALEN];
2173dd4f32aeSBjoern A. Zeeb void *rfc1042;
2174dd4f32aeSBjoern A. Zeeb
2175dd4f32aeSBjoern A. Zeeb rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype);
2176dd4f32aeSBjoern A. Zeeb if (WARN_ON_ONCE(!rfc1042))
2177dd4f32aeSBjoern A. Zeeb return;
2178dd4f32aeSBjoern A. Zeeb
2179dd4f32aeSBjoern A. Zeeb /* pull decapped header and copy SA & DA */
2180dd4f32aeSBjoern A. Zeeb eth = (struct ethhdr *)msdu->data;
2181dd4f32aeSBjoern A. Zeeb ether_addr_copy(da, eth->h_dest);
2182dd4f32aeSBjoern A. Zeeb ether_addr_copy(sa, eth->h_source);
2183dd4f32aeSBjoern A. Zeeb skb_pull(msdu, sizeof(struct ethhdr));
2184dd4f32aeSBjoern A. Zeeb
2185dd4f32aeSBjoern A. Zeeb /* push rfc1042/llc/snap */
2186dd4f32aeSBjoern A. Zeeb memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042,
2187dd4f32aeSBjoern A. Zeeb sizeof(struct ath11k_dp_rfc1042_hdr));
2188dd4f32aeSBjoern A. Zeeb
2189dd4f32aeSBjoern A. Zeeb /* push original 802.11 header */
2190dd4f32aeSBjoern A. Zeeb hdr = (struct ieee80211_hdr *)first_hdr;
2191dd4f32aeSBjoern A. Zeeb hdr_len = ieee80211_hdrlen(hdr->frame_control);
2192dd4f32aeSBjoern A. Zeeb
2193dd4f32aeSBjoern A. Zeeb if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
2194dd4f32aeSBjoern A. Zeeb memcpy(skb_push(msdu,
2195dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_crypto_param_len(ar, enctype)),
2196dd4f32aeSBjoern A. Zeeb #if defined(__linux__)
2197dd4f32aeSBjoern A. Zeeb (void *)hdr + hdr_len,
2198dd4f32aeSBjoern A. Zeeb #elif defined(__FreeBSD__)
2199dd4f32aeSBjoern A. Zeeb (u8 *)hdr + hdr_len,
2200dd4f32aeSBjoern A. Zeeb #endif
2201dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_crypto_param_len(ar, enctype));
2202dd4f32aeSBjoern A. Zeeb }
2203dd4f32aeSBjoern A. Zeeb
2204dd4f32aeSBjoern A. Zeeb memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
2205dd4f32aeSBjoern A. Zeeb
2206dd4f32aeSBjoern A. Zeeb /* original 802.11 header has a different DA and in
2207dd4f32aeSBjoern A. Zeeb * case of 4addr it may also have different SA
2208dd4f32aeSBjoern A. Zeeb */
2209dd4f32aeSBjoern A. Zeeb hdr = (struct ieee80211_hdr *)msdu->data;
2210dd4f32aeSBjoern A. Zeeb ether_addr_copy(ieee80211_get_DA(hdr), da);
2211dd4f32aeSBjoern A. Zeeb ether_addr_copy(ieee80211_get_SA(hdr), sa);
2212dd4f32aeSBjoern A. Zeeb }
2213dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_undecap(struct ath11k * ar,struct sk_buff * msdu,struct hal_rx_desc * rx_desc,enum hal_encrypt_type enctype,struct ieee80211_rx_status * status,bool decrypted)2214dd4f32aeSBjoern A. Zeeb static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
2215dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *rx_desc,
2216dd4f32aeSBjoern A. Zeeb enum hal_encrypt_type enctype,
2217dd4f32aeSBjoern A. Zeeb struct ieee80211_rx_status *status,
2218dd4f32aeSBjoern A. Zeeb bool decrypted)
2219dd4f32aeSBjoern A. Zeeb {
2220dd4f32aeSBjoern A. Zeeb u8 *first_hdr;
2221dd4f32aeSBjoern A. Zeeb u8 decap;
2222dd4f32aeSBjoern A. Zeeb struct ethhdr *ehdr;
2223dd4f32aeSBjoern A. Zeeb
2224dd4f32aeSBjoern A. Zeeb first_hdr = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
2225dd4f32aeSBjoern A. Zeeb decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc);
2226dd4f32aeSBjoern A. Zeeb
2227dd4f32aeSBjoern A. Zeeb switch (decap) {
2228dd4f32aeSBjoern A. Zeeb case DP_RX_DECAP_TYPE_NATIVE_WIFI:
2229dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr,
2230dd4f32aeSBjoern A. Zeeb enctype, status);
2231dd4f32aeSBjoern A. Zeeb break;
2232dd4f32aeSBjoern A. Zeeb case DP_RX_DECAP_TYPE_RAW:
2233dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,
2234dd4f32aeSBjoern A. Zeeb decrypted);
2235dd4f32aeSBjoern A. Zeeb break;
2236dd4f32aeSBjoern A. Zeeb case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
2237dd4f32aeSBjoern A. Zeeb ehdr = (struct ethhdr *)msdu->data;
2238dd4f32aeSBjoern A. Zeeb
2239dd4f32aeSBjoern A. Zeeb /* mac80211 allows fast path only for authorized STA */
2240dd4f32aeSBjoern A. Zeeb if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {
2241dd4f32aeSBjoern A. Zeeb ATH11K_SKB_RXCB(msdu)->is_eapol = true;
2242dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
2243dd4f32aeSBjoern A. Zeeb enctype, status);
2244dd4f32aeSBjoern A. Zeeb break;
2245dd4f32aeSBjoern A. Zeeb }
2246dd4f32aeSBjoern A. Zeeb
2247dd4f32aeSBjoern A. Zeeb /* PN for mcast packets will be validated in mac80211;
2248dd4f32aeSBjoern A. Zeeb * remove eth header and add 802.11 header.
2249dd4f32aeSBjoern A. Zeeb */
2250dd4f32aeSBjoern A. Zeeb if (ATH11K_SKB_RXCB(msdu)->is_mcbc && decrypted)
2251dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
2252dd4f32aeSBjoern A. Zeeb enctype, status);
2253dd4f32aeSBjoern A. Zeeb break;
2254dd4f32aeSBjoern A. Zeeb case DP_RX_DECAP_TYPE_8023:
2255dd4f32aeSBjoern A. Zeeb /* TODO: Handle undecap for these formats */
2256dd4f32aeSBjoern A. Zeeb break;
2257dd4f32aeSBjoern A. Zeeb }
2258dd4f32aeSBjoern A. Zeeb }
2259dd4f32aeSBjoern A. Zeeb
2260dd4f32aeSBjoern A. Zeeb static struct ath11k_peer *
ath11k_dp_rx_h_find_peer(struct ath11k_base * ab,struct sk_buff * msdu)2261dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_h_find_peer(struct ath11k_base *ab, struct sk_buff *msdu)
2262dd4f32aeSBjoern A. Zeeb {
2263dd4f32aeSBjoern A. Zeeb struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2264dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *rx_desc = rxcb->rx_desc;
2265dd4f32aeSBjoern A. Zeeb struct ath11k_peer *peer = NULL;
2266dd4f32aeSBjoern A. Zeeb
2267dd4f32aeSBjoern A. Zeeb lockdep_assert_held(&ab->base_lock);
2268dd4f32aeSBjoern A. Zeeb
2269dd4f32aeSBjoern A. Zeeb if (rxcb->peer_id)
2270dd4f32aeSBjoern A. Zeeb peer = ath11k_peer_find_by_id(ab, rxcb->peer_id);
2271dd4f32aeSBjoern A. Zeeb
2272dd4f32aeSBjoern A. Zeeb if (peer)
2273dd4f32aeSBjoern A. Zeeb return peer;
2274dd4f32aeSBjoern A. Zeeb
2275dd4f32aeSBjoern A. Zeeb if (!rx_desc || !(ath11k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)))
2276dd4f32aeSBjoern A. Zeeb return NULL;
2277dd4f32aeSBjoern A. Zeeb
2278dd4f32aeSBjoern A. Zeeb peer = ath11k_peer_find_by_addr(ab,
2279dd4f32aeSBjoern A. Zeeb ath11k_dp_rxdesc_mpdu_start_addr2(ab, rx_desc));
2280dd4f32aeSBjoern A. Zeeb return peer;
2281dd4f32aeSBjoern A. Zeeb }
2282dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_mpdu(struct ath11k * ar,struct sk_buff * msdu,struct hal_rx_desc * rx_desc,struct ieee80211_rx_status * rx_status)2283dd4f32aeSBjoern A. Zeeb static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
2284dd4f32aeSBjoern A. Zeeb struct sk_buff *msdu,
2285dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *rx_desc,
2286dd4f32aeSBjoern A. Zeeb struct ieee80211_rx_status *rx_status)
2287dd4f32aeSBjoern A. Zeeb {
2288dd4f32aeSBjoern A. Zeeb bool fill_crypto_hdr;
2289dd4f32aeSBjoern A. Zeeb enum hal_encrypt_type enctype;
2290dd4f32aeSBjoern A. Zeeb bool is_decrypted = false;
2291dd4f32aeSBjoern A. Zeeb struct ath11k_skb_rxcb *rxcb;
2292dd4f32aeSBjoern A. Zeeb struct ieee80211_hdr *hdr;
2293dd4f32aeSBjoern A. Zeeb struct ath11k_peer *peer;
2294dd4f32aeSBjoern A. Zeeb struct rx_attention *rx_attention;
2295dd4f32aeSBjoern A. Zeeb u32 err_bitmap;
2296dd4f32aeSBjoern A. Zeeb
2297dd4f32aeSBjoern A. Zeeb /* PN for multicast packets will be checked in mac80211 */
2298dd4f32aeSBjoern A. Zeeb rxcb = ATH11K_SKB_RXCB(msdu);
2299dd4f32aeSBjoern A. Zeeb fill_crypto_hdr = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);
2300dd4f32aeSBjoern A. Zeeb rxcb->is_mcbc = fill_crypto_hdr;
2301dd4f32aeSBjoern A. Zeeb
2302dd4f32aeSBjoern A. Zeeb if (rxcb->is_mcbc) {
2303dd4f32aeSBjoern A. Zeeb rxcb->peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
2304dd4f32aeSBjoern A. Zeeb rxcb->seq_no = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
2305dd4f32aeSBjoern A. Zeeb }
2306dd4f32aeSBjoern A. Zeeb
2307dd4f32aeSBjoern A. Zeeb spin_lock_bh(&ar->ab->base_lock);
2308dd4f32aeSBjoern A. Zeeb peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu);
2309dd4f32aeSBjoern A. Zeeb if (peer) {
2310dd4f32aeSBjoern A. Zeeb if (rxcb->is_mcbc)
2311dd4f32aeSBjoern A. Zeeb enctype = peer->sec_type_grp;
2312dd4f32aeSBjoern A. Zeeb else
2313dd4f32aeSBjoern A. Zeeb enctype = peer->sec_type;
2314dd4f32aeSBjoern A. Zeeb } else {
2315dd4f32aeSBjoern A. Zeeb enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc);
2316dd4f32aeSBjoern A. Zeeb }
2317dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ar->ab->base_lock);
2318dd4f32aeSBjoern A. Zeeb
2319dd4f32aeSBjoern A. Zeeb rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc);
2320dd4f32aeSBjoern A. Zeeb err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
2321dd4f32aeSBjoern A. Zeeb if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)
2322dd4f32aeSBjoern A. Zeeb is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention);
2323dd4f32aeSBjoern A. Zeeb
2324dd4f32aeSBjoern A. Zeeb /* Clear per-MPDU flags while leaving per-PPDU flags intact */
2325dd4f32aeSBjoern A. Zeeb rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
2326dd4f32aeSBjoern A. Zeeb RX_FLAG_MMIC_ERROR |
2327dd4f32aeSBjoern A. Zeeb RX_FLAG_DECRYPTED |
2328dd4f32aeSBjoern A. Zeeb RX_FLAG_IV_STRIPPED |
2329dd4f32aeSBjoern A. Zeeb RX_FLAG_MMIC_STRIPPED);
2330dd4f32aeSBjoern A. Zeeb
2331dd4f32aeSBjoern A. Zeeb if (err_bitmap & DP_RX_MPDU_ERR_FCS)
2332dd4f32aeSBjoern A. Zeeb rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
2333dd4f32aeSBjoern A. Zeeb if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)
2334dd4f32aeSBjoern A. Zeeb rx_status->flag |= RX_FLAG_MMIC_ERROR;
2335dd4f32aeSBjoern A. Zeeb
2336dd4f32aeSBjoern A. Zeeb if (is_decrypted) {
2337dd4f32aeSBjoern A. Zeeb rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
2338dd4f32aeSBjoern A. Zeeb
2339dd4f32aeSBjoern A. Zeeb if (fill_crypto_hdr)
2340dd4f32aeSBjoern A. Zeeb rx_status->flag |= RX_FLAG_MIC_STRIPPED |
2341dd4f32aeSBjoern A. Zeeb RX_FLAG_ICV_STRIPPED;
2342dd4f32aeSBjoern A. Zeeb else
2343dd4f32aeSBjoern A. Zeeb rx_status->flag |= RX_FLAG_IV_STRIPPED |
2344dd4f32aeSBjoern A. Zeeb RX_FLAG_PN_VALIDATED;
2345dd4f32aeSBjoern A. Zeeb }
2346dd4f32aeSBjoern A. Zeeb
2347dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_h_csum_offload(ar, msdu);
2348dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
2349dd4f32aeSBjoern A. Zeeb enctype, rx_status, is_decrypted);
2350dd4f32aeSBjoern A. Zeeb
2351dd4f32aeSBjoern A. Zeeb if (!is_decrypted || fill_crypto_hdr)
2352dd4f32aeSBjoern A. Zeeb return;
2353dd4f32aeSBjoern A. Zeeb
2354dd4f32aeSBjoern A. Zeeb if (ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc) !=
2355dd4f32aeSBjoern A. Zeeb DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
2356dd4f32aeSBjoern A. Zeeb hdr = (void *)msdu->data;
2357dd4f32aeSBjoern A. Zeeb hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2358dd4f32aeSBjoern A. Zeeb }
2359dd4f32aeSBjoern A. Zeeb }
2360dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_rate(struct ath11k * ar,struct hal_rx_desc * rx_desc,struct ieee80211_rx_status * rx_status)2361dd4f32aeSBjoern A. Zeeb static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
2362dd4f32aeSBjoern A. Zeeb struct ieee80211_rx_status *rx_status)
2363dd4f32aeSBjoern A. Zeeb {
2364dd4f32aeSBjoern A. Zeeb struct ieee80211_supported_band *sband;
2365dd4f32aeSBjoern A. Zeeb enum rx_msdu_start_pkt_type pkt_type;
2366dd4f32aeSBjoern A. Zeeb u8 bw;
2367dd4f32aeSBjoern A. Zeeb u8 rate_mcs, nss;
2368dd4f32aeSBjoern A. Zeeb u8 sgi;
2369dd4f32aeSBjoern A. Zeeb bool is_cck, is_ldpc;
2370dd4f32aeSBjoern A. Zeeb
2371dd4f32aeSBjoern A. Zeeb pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(ar->ab, rx_desc);
2372dd4f32aeSBjoern A. Zeeb bw = ath11k_dp_rx_h_msdu_start_rx_bw(ar->ab, rx_desc);
2373dd4f32aeSBjoern A. Zeeb rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(ar->ab, rx_desc);
2374dd4f32aeSBjoern A. Zeeb nss = ath11k_dp_rx_h_msdu_start_nss(ar->ab, rx_desc);
2375dd4f32aeSBjoern A. Zeeb sgi = ath11k_dp_rx_h_msdu_start_sgi(ar->ab, rx_desc);
2376dd4f32aeSBjoern A. Zeeb
2377dd4f32aeSBjoern A. Zeeb switch (pkt_type) {
2378dd4f32aeSBjoern A. Zeeb case RX_MSDU_START_PKT_TYPE_11A:
2379dd4f32aeSBjoern A. Zeeb case RX_MSDU_START_PKT_TYPE_11B:
2380dd4f32aeSBjoern A. Zeeb is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
2381dd4f32aeSBjoern A. Zeeb sband = &ar->mac.sbands[rx_status->band];
2382dd4f32aeSBjoern A. Zeeb rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs,
2383dd4f32aeSBjoern A. Zeeb is_cck);
2384dd4f32aeSBjoern A. Zeeb break;
2385dd4f32aeSBjoern A. Zeeb case RX_MSDU_START_PKT_TYPE_11N:
2386dd4f32aeSBjoern A. Zeeb rx_status->encoding = RX_ENC_HT;
2387dd4f32aeSBjoern A. Zeeb if (rate_mcs > ATH11K_HT_MCS_MAX) {
2388dd4f32aeSBjoern A. Zeeb ath11k_warn(ar->ab,
2389dd4f32aeSBjoern A. Zeeb "Received with invalid mcs in HT mode %d\n",
2390dd4f32aeSBjoern A. Zeeb rate_mcs);
2391dd4f32aeSBjoern A. Zeeb break;
2392dd4f32aeSBjoern A. Zeeb }
2393dd4f32aeSBjoern A. Zeeb rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
2394dd4f32aeSBjoern A. Zeeb if (sgi)
2395dd4f32aeSBjoern A. Zeeb rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2396dd4f32aeSBjoern A. Zeeb rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2397dd4f32aeSBjoern A. Zeeb break;
2398dd4f32aeSBjoern A. Zeeb case RX_MSDU_START_PKT_TYPE_11AC:
2399dd4f32aeSBjoern A. Zeeb rx_status->encoding = RX_ENC_VHT;
2400dd4f32aeSBjoern A. Zeeb rx_status->rate_idx = rate_mcs;
2401dd4f32aeSBjoern A. Zeeb if (rate_mcs > ATH11K_VHT_MCS_MAX) {
2402dd4f32aeSBjoern A. Zeeb ath11k_warn(ar->ab,
2403dd4f32aeSBjoern A. Zeeb "Received with invalid mcs in VHT mode %d\n",
2404dd4f32aeSBjoern A. Zeeb rate_mcs);
2405dd4f32aeSBjoern A. Zeeb break;
2406dd4f32aeSBjoern A. Zeeb }
2407dd4f32aeSBjoern A. Zeeb rx_status->nss = nss;
2408dd4f32aeSBjoern A. Zeeb if (sgi)
2409dd4f32aeSBjoern A. Zeeb rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2410dd4f32aeSBjoern A. Zeeb rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2411dd4f32aeSBjoern A. Zeeb is_ldpc = ath11k_dp_rx_h_msdu_start_ldpc_support(ar->ab, rx_desc);
2412dd4f32aeSBjoern A. Zeeb if (is_ldpc)
2413dd4f32aeSBjoern A. Zeeb rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
2414dd4f32aeSBjoern A. Zeeb break;
2415dd4f32aeSBjoern A. Zeeb case RX_MSDU_START_PKT_TYPE_11AX:
2416dd4f32aeSBjoern A. Zeeb rx_status->rate_idx = rate_mcs;
2417dd4f32aeSBjoern A. Zeeb if (rate_mcs > ATH11K_HE_MCS_MAX) {
2418dd4f32aeSBjoern A. Zeeb ath11k_warn(ar->ab,
2419dd4f32aeSBjoern A. Zeeb "Received with invalid mcs in HE mode %d\n",
2420dd4f32aeSBjoern A. Zeeb rate_mcs);
2421dd4f32aeSBjoern A. Zeeb break;
2422dd4f32aeSBjoern A. Zeeb }
2423dd4f32aeSBjoern A. Zeeb rx_status->encoding = RX_ENC_HE;
2424dd4f32aeSBjoern A. Zeeb rx_status->nss = nss;
2425dd4f32aeSBjoern A. Zeeb rx_status->he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);
2426dd4f32aeSBjoern A. Zeeb rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2427dd4f32aeSBjoern A. Zeeb break;
2428dd4f32aeSBjoern A. Zeeb }
2429dd4f32aeSBjoern A. Zeeb }
2430dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_ppdu(struct ath11k * ar,struct hal_rx_desc * rx_desc,struct ieee80211_rx_status * rx_status)2431dd4f32aeSBjoern A. Zeeb static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
2432dd4f32aeSBjoern A. Zeeb struct ieee80211_rx_status *rx_status)
2433dd4f32aeSBjoern A. Zeeb {
2434dd4f32aeSBjoern A. Zeeb u8 channel_num;
2435dd4f32aeSBjoern A. Zeeb u32 center_freq, meta_data;
2436dd4f32aeSBjoern A. Zeeb struct ieee80211_channel *channel;
2437dd4f32aeSBjoern A. Zeeb
2438dd4f32aeSBjoern A. Zeeb rx_status->freq = 0;
2439dd4f32aeSBjoern A. Zeeb rx_status->rate_idx = 0;
2440dd4f32aeSBjoern A. Zeeb rx_status->nss = 0;
2441dd4f32aeSBjoern A. Zeeb rx_status->encoding = RX_ENC_LEGACY;
2442dd4f32aeSBjoern A. Zeeb rx_status->bw = RATE_INFO_BW_20;
2443dd4f32aeSBjoern A. Zeeb
2444dd4f32aeSBjoern A. Zeeb rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2445dd4f32aeSBjoern A. Zeeb
2446dd4f32aeSBjoern A. Zeeb meta_data = ath11k_dp_rx_h_msdu_start_freq(ar->ab, rx_desc);
2447dd4f32aeSBjoern A. Zeeb channel_num = meta_data;
2448dd4f32aeSBjoern A. Zeeb center_freq = meta_data >> 16;
2449dd4f32aeSBjoern A. Zeeb
2450dd4f32aeSBjoern A. Zeeb if (center_freq >= ATH11K_MIN_6G_FREQ &&
2451dd4f32aeSBjoern A. Zeeb center_freq <= ATH11K_MAX_6G_FREQ) {
2452dd4f32aeSBjoern A. Zeeb rx_status->band = NL80211_BAND_6GHZ;
2453dd4f32aeSBjoern A. Zeeb rx_status->freq = center_freq;
2454dd4f32aeSBjoern A. Zeeb } else if (channel_num >= 1 && channel_num <= 14) {
2455dd4f32aeSBjoern A. Zeeb rx_status->band = NL80211_BAND_2GHZ;
2456*28348caeSBjoern A. Zeeb } else if (channel_num >= 36 && channel_num <= 177) {
2457dd4f32aeSBjoern A. Zeeb rx_status->band = NL80211_BAND_5GHZ;
2458dd4f32aeSBjoern A. Zeeb } else {
2459dd4f32aeSBjoern A. Zeeb spin_lock_bh(&ar->data_lock);
2460dd4f32aeSBjoern A. Zeeb channel = ar->rx_channel;
2461dd4f32aeSBjoern A. Zeeb if (channel) {
2462dd4f32aeSBjoern A. Zeeb rx_status->band = channel->band;
2463dd4f32aeSBjoern A. Zeeb channel_num =
2464dd4f32aeSBjoern A. Zeeb ieee80211_frequency_to_channel(channel->center_freq);
2465dd4f32aeSBjoern A. Zeeb }
2466dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ar->data_lock);
2467dd4f32aeSBjoern A. Zeeb ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ",
2468dd4f32aeSBjoern A. Zeeb rx_desc, sizeof(struct hal_rx_desc));
2469dd4f32aeSBjoern A. Zeeb }
2470dd4f32aeSBjoern A. Zeeb
2471dd4f32aeSBjoern A. Zeeb if (rx_status->band != NL80211_BAND_6GHZ)
2472dd4f32aeSBjoern A. Zeeb rx_status->freq = ieee80211_channel_to_frequency(channel_num,
2473dd4f32aeSBjoern A. Zeeb rx_status->band);
2474dd4f32aeSBjoern A. Zeeb
2475dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_h_rate(ar, rx_desc, rx_status);
2476dd4f32aeSBjoern A. Zeeb }
2477dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_deliver_msdu(struct ath11k * ar,struct napi_struct * napi,struct sk_buff * msdu,struct ieee80211_rx_status * status)2478dd4f32aeSBjoern A. Zeeb static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi,
2479dd4f32aeSBjoern A. Zeeb struct sk_buff *msdu,
2480dd4f32aeSBjoern A. Zeeb struct ieee80211_rx_status *status)
2481dd4f32aeSBjoern A. Zeeb {
2482dd4f32aeSBjoern A. Zeeb static const struct ieee80211_radiotap_he known = {
2483dd4f32aeSBjoern A. Zeeb .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
2484dd4f32aeSBjoern A. Zeeb IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
2485dd4f32aeSBjoern A. Zeeb .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
2486dd4f32aeSBjoern A. Zeeb };
2487dd4f32aeSBjoern A. Zeeb struct ieee80211_rx_status *rx_status;
2488dd4f32aeSBjoern A. Zeeb struct ieee80211_radiotap_he *he = NULL;
2489dd4f32aeSBjoern A. Zeeb struct ieee80211_sta *pubsta = NULL;
2490dd4f32aeSBjoern A. Zeeb struct ath11k_peer *peer;
2491dd4f32aeSBjoern A. Zeeb struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2492dd4f32aeSBjoern A. Zeeb u8 decap = DP_RX_DECAP_TYPE_RAW;
2493dd4f32aeSBjoern A. Zeeb bool is_mcbc = rxcb->is_mcbc;
2494dd4f32aeSBjoern A. Zeeb bool is_eapol = rxcb->is_eapol;
2495dd4f32aeSBjoern A. Zeeb
2496dd4f32aeSBjoern A. Zeeb if (status->encoding == RX_ENC_HE &&
2497dd4f32aeSBjoern A. Zeeb !(status->flag & RX_FLAG_RADIOTAP_HE) &&
2498dd4f32aeSBjoern A. Zeeb !(status->flag & RX_FLAG_SKIP_MONITOR)) {
2499dd4f32aeSBjoern A. Zeeb he = skb_push(msdu, sizeof(known));
2500dd4f32aeSBjoern A. Zeeb memcpy(he, &known, sizeof(known));
2501dd4f32aeSBjoern A. Zeeb status->flag |= RX_FLAG_RADIOTAP_HE;
2502dd4f32aeSBjoern A. Zeeb }
2503dd4f32aeSBjoern A. Zeeb
2504dd4f32aeSBjoern A. Zeeb if (!(status->flag & RX_FLAG_ONLY_MONITOR))
2505dd4f32aeSBjoern A. Zeeb decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rxcb->rx_desc);
2506dd4f32aeSBjoern A. Zeeb
2507dd4f32aeSBjoern A. Zeeb spin_lock_bh(&ar->ab->base_lock);
2508dd4f32aeSBjoern A. Zeeb peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu);
2509dd4f32aeSBjoern A. Zeeb if (peer && peer->sta)
2510dd4f32aeSBjoern A. Zeeb pubsta = peer->sta;
2511dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ar->ab->base_lock);
2512dd4f32aeSBjoern A. Zeeb
2513dd4f32aeSBjoern A. Zeeb ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
2514*28348caeSBjoern A. Zeeb "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
2515dd4f32aeSBjoern A. Zeeb msdu,
2516dd4f32aeSBjoern A. Zeeb msdu->len,
2517dd4f32aeSBjoern A. Zeeb peer ? peer->addr : NULL,
2518dd4f32aeSBjoern A. Zeeb rxcb->tid,
2519dd4f32aeSBjoern A. Zeeb is_mcbc ? "mcast" : "ucast",
2520dd4f32aeSBjoern A. Zeeb rxcb->seq_no,
2521dd4f32aeSBjoern A. Zeeb (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
2522dd4f32aeSBjoern A. Zeeb (status->encoding == RX_ENC_HT) ? "ht" : "",
2523dd4f32aeSBjoern A. Zeeb (status->encoding == RX_ENC_VHT) ? "vht" : "",
2524dd4f32aeSBjoern A. Zeeb (status->encoding == RX_ENC_HE) ? "he" : "",
2525dd4f32aeSBjoern A. Zeeb (status->bw == RATE_INFO_BW_40) ? "40" : "",
2526dd4f32aeSBjoern A. Zeeb (status->bw == RATE_INFO_BW_80) ? "80" : "",
2527dd4f32aeSBjoern A. Zeeb (status->bw == RATE_INFO_BW_160) ? "160" : "",
2528dd4f32aeSBjoern A. Zeeb status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
2529dd4f32aeSBjoern A. Zeeb status->rate_idx,
2530dd4f32aeSBjoern A. Zeeb status->nss,
2531dd4f32aeSBjoern A. Zeeb status->freq,
2532dd4f32aeSBjoern A. Zeeb status->band, status->flag,
2533dd4f32aeSBjoern A. Zeeb !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
2534dd4f32aeSBjoern A. Zeeb !!(status->flag & RX_FLAG_MMIC_ERROR),
2535dd4f32aeSBjoern A. Zeeb !!(status->flag & RX_FLAG_AMSDU_MORE));
2536dd4f32aeSBjoern A. Zeeb
2537dd4f32aeSBjoern A. Zeeb ath11k_dbg_dump(ar->ab, ATH11K_DBG_DP_RX, NULL, "dp rx msdu: ",
2538dd4f32aeSBjoern A. Zeeb msdu->data, msdu->len);
2539dd4f32aeSBjoern A. Zeeb
2540dd4f32aeSBjoern A. Zeeb rx_status = IEEE80211_SKB_RXCB(msdu);
2541dd4f32aeSBjoern A. Zeeb *rx_status = *status;
2542dd4f32aeSBjoern A. Zeeb
2543dd4f32aeSBjoern A. Zeeb /* TODO: trace rx packet */
2544dd4f32aeSBjoern A. Zeeb
2545dd4f32aeSBjoern A. Zeeb /* PN for multicast packets are not validate in HW,
2546dd4f32aeSBjoern A. Zeeb * so skip 802.3 rx path
2547*28348caeSBjoern A. Zeeb * Also, fast_rx expects the STA to be authorized, hence
2548dd4f32aeSBjoern A. Zeeb * eapol packets are sent in slow path.
2549dd4f32aeSBjoern A. Zeeb */
2550dd4f32aeSBjoern A. Zeeb if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol &&
2551dd4f32aeSBjoern A. Zeeb !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
2552dd4f32aeSBjoern A. Zeeb rx_status->flag |= RX_FLAG_8023;
2553dd4f32aeSBjoern A. Zeeb
2554dd4f32aeSBjoern A. Zeeb ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);
2555dd4f32aeSBjoern A. Zeeb }
2556dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_process_msdu(struct ath11k * ar,struct sk_buff * msdu,struct sk_buff_head * msdu_list,struct ieee80211_rx_status * rx_status)2557dd4f32aeSBjoern A. Zeeb static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
2558dd4f32aeSBjoern A. Zeeb struct sk_buff *msdu,
2559dd4f32aeSBjoern A. Zeeb struct sk_buff_head *msdu_list,
2560dd4f32aeSBjoern A. Zeeb struct ieee80211_rx_status *rx_status)
2561dd4f32aeSBjoern A. Zeeb {
2562dd4f32aeSBjoern A. Zeeb struct ath11k_base *ab = ar->ab;
2563dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *rx_desc, *lrx_desc;
2564dd4f32aeSBjoern A. Zeeb struct rx_attention *rx_attention;
2565dd4f32aeSBjoern A. Zeeb struct ath11k_skb_rxcb *rxcb;
2566dd4f32aeSBjoern A. Zeeb struct sk_buff *last_buf;
2567dd4f32aeSBjoern A. Zeeb u8 l3_pad_bytes;
2568dd4f32aeSBjoern A. Zeeb u8 *hdr_status;
2569dd4f32aeSBjoern A. Zeeb u16 msdu_len;
2570dd4f32aeSBjoern A. Zeeb int ret;
2571dd4f32aeSBjoern A. Zeeb u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
2572dd4f32aeSBjoern A. Zeeb
2573dd4f32aeSBjoern A. Zeeb last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
2574dd4f32aeSBjoern A. Zeeb if (!last_buf) {
2575dd4f32aeSBjoern A. Zeeb ath11k_warn(ab,
2576dd4f32aeSBjoern A. Zeeb "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n");
2577dd4f32aeSBjoern A. Zeeb ret = -EIO;
2578dd4f32aeSBjoern A. Zeeb goto free_out;
2579dd4f32aeSBjoern A. Zeeb }
2580dd4f32aeSBjoern A. Zeeb
2581dd4f32aeSBjoern A. Zeeb rx_desc = (struct hal_rx_desc *)msdu->data;
2582dd4f32aeSBjoern A. Zeeb if (ath11k_dp_rx_h_attn_msdu_len_err(ab, rx_desc)) {
2583dd4f32aeSBjoern A. Zeeb ath11k_warn(ar->ab, "msdu len not valid\n");
2584dd4f32aeSBjoern A. Zeeb ret = -EIO;
2585dd4f32aeSBjoern A. Zeeb goto free_out;
2586dd4f32aeSBjoern A. Zeeb }
2587dd4f32aeSBjoern A. Zeeb
2588dd4f32aeSBjoern A. Zeeb lrx_desc = (struct hal_rx_desc *)last_buf->data;
2589dd4f32aeSBjoern A. Zeeb rx_attention = ath11k_dp_rx_get_attention(ab, lrx_desc);
2590dd4f32aeSBjoern A. Zeeb if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) {
2591dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "msdu_done bit in attention is not set\n");
2592dd4f32aeSBjoern A. Zeeb ret = -EIO;
2593dd4f32aeSBjoern A. Zeeb goto free_out;
2594dd4f32aeSBjoern A. Zeeb }
2595dd4f32aeSBjoern A. Zeeb
2596dd4f32aeSBjoern A. Zeeb rxcb = ATH11K_SKB_RXCB(msdu);
2597dd4f32aeSBjoern A. Zeeb rxcb->rx_desc = rx_desc;
2598dd4f32aeSBjoern A. Zeeb msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ab, rx_desc);
2599dd4f32aeSBjoern A. Zeeb l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ab, lrx_desc);
2600dd4f32aeSBjoern A. Zeeb
2601dd4f32aeSBjoern A. Zeeb if (rxcb->is_frag) {
2602dd4f32aeSBjoern A. Zeeb skb_pull(msdu, hal_rx_desc_sz);
2603dd4f32aeSBjoern A. Zeeb } else if (!rxcb->is_continuation) {
2604dd4f32aeSBjoern A. Zeeb if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
2605dd4f32aeSBjoern A. Zeeb hdr_status = ath11k_dp_rx_h_80211_hdr(ab, rx_desc);
2606dd4f32aeSBjoern A. Zeeb ret = -EINVAL;
2607dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "invalid msdu len %u\n", msdu_len);
2608dd4f32aeSBjoern A. Zeeb ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
2609dd4f32aeSBjoern A. Zeeb sizeof(struct ieee80211_hdr));
2610dd4f32aeSBjoern A. Zeeb ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
2611dd4f32aeSBjoern A. Zeeb sizeof(struct hal_rx_desc));
2612dd4f32aeSBjoern A. Zeeb goto free_out;
2613dd4f32aeSBjoern A. Zeeb }
2614dd4f32aeSBjoern A. Zeeb skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len);
2615dd4f32aeSBjoern A. Zeeb skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes);
2616dd4f32aeSBjoern A. Zeeb } else {
2617dd4f32aeSBjoern A. Zeeb ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list,
2618dd4f32aeSBjoern A. Zeeb msdu, last_buf,
2619dd4f32aeSBjoern A. Zeeb l3_pad_bytes, msdu_len);
2620dd4f32aeSBjoern A. Zeeb if (ret) {
2621dd4f32aeSBjoern A. Zeeb ath11k_warn(ab,
2622dd4f32aeSBjoern A. Zeeb "failed to coalesce msdu rx buffer%d\n", ret);
2623dd4f32aeSBjoern A. Zeeb goto free_out;
2624dd4f32aeSBjoern A. Zeeb }
2625dd4f32aeSBjoern A. Zeeb }
2626dd4f32aeSBjoern A. Zeeb
2627dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
2628dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status);
2629dd4f32aeSBjoern A. Zeeb
2630dd4f32aeSBjoern A. Zeeb rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
2631dd4f32aeSBjoern A. Zeeb
2632dd4f32aeSBjoern A. Zeeb return 0;
2633dd4f32aeSBjoern A. Zeeb
2634dd4f32aeSBjoern A. Zeeb free_out:
2635dd4f32aeSBjoern A. Zeeb return ret;
2636dd4f32aeSBjoern A. Zeeb }
2637dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_process_received_packets(struct ath11k_base * ab,struct napi_struct * napi,struct sk_buff_head * msdu_list,int mac_id)2638dd4f32aeSBjoern A. Zeeb static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
2639dd4f32aeSBjoern A. Zeeb struct napi_struct *napi,
2640dd4f32aeSBjoern A. Zeeb struct sk_buff_head *msdu_list,
2641dd4f32aeSBjoern A. Zeeb int mac_id)
2642dd4f32aeSBjoern A. Zeeb {
2643dd4f32aeSBjoern A. Zeeb struct sk_buff *msdu;
2644dd4f32aeSBjoern A. Zeeb struct ath11k *ar;
2645dd4f32aeSBjoern A. Zeeb struct ieee80211_rx_status rx_status = {0};
2646dd4f32aeSBjoern A. Zeeb int ret;
2647dd4f32aeSBjoern A. Zeeb
2648dd4f32aeSBjoern A. Zeeb if (skb_queue_empty(msdu_list))
2649dd4f32aeSBjoern A. Zeeb return;
2650dd4f32aeSBjoern A. Zeeb
2651dd4f32aeSBjoern A. Zeeb if (unlikely(!rcu_access_pointer(ab->pdevs_active[mac_id]))) {
2652dd4f32aeSBjoern A. Zeeb __skb_queue_purge(msdu_list);
2653dd4f32aeSBjoern A. Zeeb return;
2654dd4f32aeSBjoern A. Zeeb }
2655dd4f32aeSBjoern A. Zeeb
2656dd4f32aeSBjoern A. Zeeb ar = ab->pdevs[mac_id].ar;
2657dd4f32aeSBjoern A. Zeeb if (unlikely(test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags))) {
2658dd4f32aeSBjoern A. Zeeb __skb_queue_purge(msdu_list);
2659dd4f32aeSBjoern A. Zeeb return;
2660dd4f32aeSBjoern A. Zeeb }
2661dd4f32aeSBjoern A. Zeeb
2662dd4f32aeSBjoern A. Zeeb while ((msdu = __skb_dequeue(msdu_list))) {
2663dd4f32aeSBjoern A. Zeeb ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status);
2664dd4f32aeSBjoern A. Zeeb if (unlikely(ret)) {
2665dd4f32aeSBjoern A. Zeeb ath11k_dbg(ab, ATH11K_DBG_DATA,
2666dd4f32aeSBjoern A. Zeeb "Unable to process msdu %d", ret);
2667dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(msdu);
2668dd4f32aeSBjoern A. Zeeb continue;
2669dd4f32aeSBjoern A. Zeeb }
2670dd4f32aeSBjoern A. Zeeb
2671dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status);
2672dd4f32aeSBjoern A. Zeeb }
2673dd4f32aeSBjoern A. Zeeb }
2674dd4f32aeSBjoern A. Zeeb
ath11k_dp_process_rx(struct ath11k_base * ab,int ring_id,struct napi_struct * napi,int budget)2675dd4f32aeSBjoern A. Zeeb int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
2676dd4f32aeSBjoern A. Zeeb struct napi_struct *napi, int budget)
2677dd4f32aeSBjoern A. Zeeb {
2678dd4f32aeSBjoern A. Zeeb struct ath11k_dp *dp = &ab->dp;
2679dd4f32aeSBjoern A. Zeeb struct dp_rxdma_ring *rx_ring;
2680dd4f32aeSBjoern A. Zeeb int num_buffs_reaped[MAX_RADIOS] = {0};
2681dd4f32aeSBjoern A. Zeeb struct sk_buff_head msdu_list[MAX_RADIOS];
2682dd4f32aeSBjoern A. Zeeb struct ath11k_skb_rxcb *rxcb;
2683dd4f32aeSBjoern A. Zeeb int total_msdu_reaped = 0;
2684dd4f32aeSBjoern A. Zeeb struct hal_srng *srng;
2685dd4f32aeSBjoern A. Zeeb struct sk_buff *msdu;
2686dd4f32aeSBjoern A. Zeeb bool done = false;
2687dd4f32aeSBjoern A. Zeeb int buf_id, mac_id;
2688dd4f32aeSBjoern A. Zeeb struct ath11k *ar;
2689dd4f32aeSBjoern A. Zeeb struct hal_reo_dest_ring *desc;
2690dd4f32aeSBjoern A. Zeeb enum hal_reo_dest_ring_push_reason push_reason;
2691dd4f32aeSBjoern A. Zeeb u32 cookie;
2692dd4f32aeSBjoern A. Zeeb int i;
2693dd4f32aeSBjoern A. Zeeb
2694dd4f32aeSBjoern A. Zeeb for (i = 0; i < MAX_RADIOS; i++)
2695dd4f32aeSBjoern A. Zeeb __skb_queue_head_init(&msdu_list[i]);
2696dd4f32aeSBjoern A. Zeeb
2697dd4f32aeSBjoern A. Zeeb srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
2698dd4f32aeSBjoern A. Zeeb
2699dd4f32aeSBjoern A. Zeeb spin_lock_bh(&srng->lock);
2700dd4f32aeSBjoern A. Zeeb
2701*28348caeSBjoern A. Zeeb try_again:
2702dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_begin(ab, srng);
2703dd4f32aeSBjoern A. Zeeb
2704dd4f32aeSBjoern A. Zeeb while (likely(desc =
2705dd4f32aeSBjoern A. Zeeb (struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab,
2706dd4f32aeSBjoern A. Zeeb srng))) {
2707dd4f32aeSBjoern A. Zeeb cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
2708dd4f32aeSBjoern A. Zeeb desc->buf_addr_info.info1);
2709dd4f32aeSBjoern A. Zeeb buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
2710dd4f32aeSBjoern A. Zeeb cookie);
2711dd4f32aeSBjoern A. Zeeb mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
2712dd4f32aeSBjoern A. Zeeb
2713*28348caeSBjoern A. Zeeb if (unlikely(buf_id == 0))
2714*28348caeSBjoern A. Zeeb continue;
2715*28348caeSBjoern A. Zeeb
2716dd4f32aeSBjoern A. Zeeb ar = ab->pdevs[mac_id].ar;
2717dd4f32aeSBjoern A. Zeeb rx_ring = &ar->dp.rx_refill_buf_ring;
2718dd4f32aeSBjoern A. Zeeb spin_lock_bh(&rx_ring->idr_lock);
2719dd4f32aeSBjoern A. Zeeb msdu = idr_find(&rx_ring->bufs_idr, buf_id);
2720dd4f32aeSBjoern A. Zeeb if (unlikely(!msdu)) {
2721dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "frame rx with invalid buf_id %d\n",
2722dd4f32aeSBjoern A. Zeeb buf_id);
2723dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&rx_ring->idr_lock);
2724dd4f32aeSBjoern A. Zeeb continue;
2725dd4f32aeSBjoern A. Zeeb }
2726dd4f32aeSBjoern A. Zeeb
2727dd4f32aeSBjoern A. Zeeb idr_remove(&rx_ring->bufs_idr, buf_id);
2728dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&rx_ring->idr_lock);
2729dd4f32aeSBjoern A. Zeeb
2730dd4f32aeSBjoern A. Zeeb rxcb = ATH11K_SKB_RXCB(msdu);
2731dd4f32aeSBjoern A. Zeeb dma_unmap_single(ab->dev, rxcb->paddr,
2732dd4f32aeSBjoern A. Zeeb msdu->len + skb_tailroom(msdu),
2733dd4f32aeSBjoern A. Zeeb DMA_FROM_DEVICE);
2734dd4f32aeSBjoern A. Zeeb
2735dd4f32aeSBjoern A. Zeeb num_buffs_reaped[mac_id]++;
2736dd4f32aeSBjoern A. Zeeb
2737dd4f32aeSBjoern A. Zeeb push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
2738dd4f32aeSBjoern A. Zeeb desc->info0);
2739dd4f32aeSBjoern A. Zeeb if (unlikely(push_reason !=
2740dd4f32aeSBjoern A. Zeeb HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) {
2741dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(msdu);
2742dd4f32aeSBjoern A. Zeeb ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
2743dd4f32aeSBjoern A. Zeeb continue;
2744dd4f32aeSBjoern A. Zeeb }
2745dd4f32aeSBjoern A. Zeeb
2746dd4f32aeSBjoern A. Zeeb rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 &
2747dd4f32aeSBjoern A. Zeeb RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
2748dd4f32aeSBjoern A. Zeeb rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 &
2749dd4f32aeSBjoern A. Zeeb RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
2750dd4f32aeSBjoern A. Zeeb rxcb->is_continuation = !!(desc->rx_msdu_info.info0 &
2751dd4f32aeSBjoern A. Zeeb RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
2752dd4f32aeSBjoern A. Zeeb rxcb->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID,
2753dd4f32aeSBjoern A. Zeeb desc->rx_mpdu_info.meta_data);
2754dd4f32aeSBjoern A. Zeeb rxcb->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM,
2755dd4f32aeSBjoern A. Zeeb desc->rx_mpdu_info.info0);
2756dd4f32aeSBjoern A. Zeeb rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
2757dd4f32aeSBjoern A. Zeeb desc->info0);
2758dd4f32aeSBjoern A. Zeeb
2759dd4f32aeSBjoern A. Zeeb rxcb->mac_id = mac_id;
2760dd4f32aeSBjoern A. Zeeb __skb_queue_tail(&msdu_list[mac_id], msdu);
2761dd4f32aeSBjoern A. Zeeb
2762dd4f32aeSBjoern A. Zeeb if (rxcb->is_continuation) {
2763dd4f32aeSBjoern A. Zeeb done = false;
2764dd4f32aeSBjoern A. Zeeb } else {
2765dd4f32aeSBjoern A. Zeeb total_msdu_reaped++;
2766dd4f32aeSBjoern A. Zeeb done = true;
2767dd4f32aeSBjoern A. Zeeb }
2768dd4f32aeSBjoern A. Zeeb
2769dd4f32aeSBjoern A. Zeeb if (total_msdu_reaped >= budget)
2770dd4f32aeSBjoern A. Zeeb break;
2771dd4f32aeSBjoern A. Zeeb }
2772dd4f32aeSBjoern A. Zeeb
2773dd4f32aeSBjoern A. Zeeb /* Hw might have updated the head pointer after we cached it.
2774dd4f32aeSBjoern A. Zeeb * In this case, even though there are entries in the ring we'll
2775dd4f32aeSBjoern A. Zeeb * get rx_desc NULL. Give the read another try with updated cached
2776dd4f32aeSBjoern A. Zeeb * head pointer so that we can reap complete MPDU in the current
2777dd4f32aeSBjoern A. Zeeb * rx processing.
2778dd4f32aeSBjoern A. Zeeb */
2779dd4f32aeSBjoern A. Zeeb if (unlikely(!done && ath11k_hal_srng_dst_num_free(ab, srng, true))) {
2780dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_end(ab, srng);
2781dd4f32aeSBjoern A. Zeeb goto try_again;
2782dd4f32aeSBjoern A. Zeeb }
2783dd4f32aeSBjoern A. Zeeb
2784dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_end(ab, srng);
2785dd4f32aeSBjoern A. Zeeb
2786dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&srng->lock);
2787dd4f32aeSBjoern A. Zeeb
2788dd4f32aeSBjoern A. Zeeb if (unlikely(!total_msdu_reaped))
2789dd4f32aeSBjoern A. Zeeb goto exit;
2790dd4f32aeSBjoern A. Zeeb
2791dd4f32aeSBjoern A. Zeeb for (i = 0; i < ab->num_radios; i++) {
2792dd4f32aeSBjoern A. Zeeb if (!num_buffs_reaped[i])
2793dd4f32aeSBjoern A. Zeeb continue;
2794dd4f32aeSBjoern A. Zeeb
2795dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list[i], i);
2796dd4f32aeSBjoern A. Zeeb
2797dd4f32aeSBjoern A. Zeeb ar = ab->pdevs[i].ar;
2798dd4f32aeSBjoern A. Zeeb rx_ring = &ar->dp.rx_refill_buf_ring;
2799dd4f32aeSBjoern A. Zeeb
2800dd4f32aeSBjoern A. Zeeb ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
2801dd4f32aeSBjoern A. Zeeb ab->hw_params.hal_params->rx_buf_rbm);
2802dd4f32aeSBjoern A. Zeeb }
2803dd4f32aeSBjoern A. Zeeb exit:
2804dd4f32aeSBjoern A. Zeeb return total_msdu_reaped;
2805dd4f32aeSBjoern A. Zeeb }
2806dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_update_peer_stats(struct ath11k_sta * arsta,struct hal_rx_mon_ppdu_info * ppdu_info)2807dd4f32aeSBjoern A. Zeeb static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
2808dd4f32aeSBjoern A. Zeeb struct hal_rx_mon_ppdu_info *ppdu_info)
2809dd4f32aeSBjoern A. Zeeb {
2810dd4f32aeSBjoern A. Zeeb struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats;
2811dd4f32aeSBjoern A. Zeeb u32 num_msdu;
2812dd4f32aeSBjoern A. Zeeb int i;
2813dd4f32aeSBjoern A. Zeeb
2814dd4f32aeSBjoern A. Zeeb if (!rx_stats)
2815dd4f32aeSBjoern A. Zeeb return;
2816dd4f32aeSBjoern A. Zeeb
2817*28348caeSBjoern A. Zeeb arsta->rssi_comb = ppdu_info->rssi_comb;
2818*28348caeSBjoern A. Zeeb ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb);
2819*28348caeSBjoern A. Zeeb
2820dd4f32aeSBjoern A. Zeeb num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
2821dd4f32aeSBjoern A. Zeeb ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
2822dd4f32aeSBjoern A. Zeeb
2823dd4f32aeSBjoern A. Zeeb rx_stats->num_msdu += num_msdu;
2824dd4f32aeSBjoern A. Zeeb rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count +
2825dd4f32aeSBjoern A. Zeeb ppdu_info->tcp_ack_msdu_count;
2826dd4f32aeSBjoern A. Zeeb rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count;
2827dd4f32aeSBjoern A. Zeeb rx_stats->other_msdu_count += ppdu_info->other_msdu_count;
2828dd4f32aeSBjoern A. Zeeb
2829dd4f32aeSBjoern A. Zeeb if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A ||
2830dd4f32aeSBjoern A. Zeeb ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) {
2831dd4f32aeSBjoern A. Zeeb ppdu_info->nss = 1;
2832dd4f32aeSBjoern A. Zeeb ppdu_info->mcs = HAL_RX_MAX_MCS;
2833dd4f32aeSBjoern A. Zeeb ppdu_info->tid = IEEE80211_NUM_TIDS;
2834dd4f32aeSBjoern A. Zeeb }
2835dd4f32aeSBjoern A. Zeeb
2836dd4f32aeSBjoern A. Zeeb if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS)
2837dd4f32aeSBjoern A. Zeeb rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu;
2838dd4f32aeSBjoern A. Zeeb
2839dd4f32aeSBjoern A. Zeeb if (ppdu_info->mcs <= HAL_RX_MAX_MCS)
2840dd4f32aeSBjoern A. Zeeb rx_stats->mcs_count[ppdu_info->mcs] += num_msdu;
2841dd4f32aeSBjoern A. Zeeb
2842dd4f32aeSBjoern A. Zeeb if (ppdu_info->gi < HAL_RX_GI_MAX)
2843dd4f32aeSBjoern A. Zeeb rx_stats->gi_count[ppdu_info->gi] += num_msdu;
2844dd4f32aeSBjoern A. Zeeb
2845dd4f32aeSBjoern A. Zeeb if (ppdu_info->bw < HAL_RX_BW_MAX)
2846dd4f32aeSBjoern A. Zeeb rx_stats->bw_count[ppdu_info->bw] += num_msdu;
2847dd4f32aeSBjoern A. Zeeb
2848dd4f32aeSBjoern A. Zeeb if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX)
2849dd4f32aeSBjoern A. Zeeb rx_stats->coding_count[ppdu_info->ldpc] += num_msdu;
2850dd4f32aeSBjoern A. Zeeb
2851dd4f32aeSBjoern A. Zeeb if (ppdu_info->tid <= IEEE80211_NUM_TIDS)
2852dd4f32aeSBjoern A. Zeeb rx_stats->tid_count[ppdu_info->tid] += num_msdu;
2853dd4f32aeSBjoern A. Zeeb
2854dd4f32aeSBjoern A. Zeeb if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX)
2855dd4f32aeSBjoern A. Zeeb rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu;
2856dd4f32aeSBjoern A. Zeeb
2857dd4f32aeSBjoern A. Zeeb if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX)
2858dd4f32aeSBjoern A. Zeeb rx_stats->reception_type[ppdu_info->reception_type] += num_msdu;
2859dd4f32aeSBjoern A. Zeeb
2860dd4f32aeSBjoern A. Zeeb if (ppdu_info->is_stbc)
2861dd4f32aeSBjoern A. Zeeb rx_stats->stbc_count += num_msdu;
2862dd4f32aeSBjoern A. Zeeb
2863dd4f32aeSBjoern A. Zeeb if (ppdu_info->beamformed)
2864dd4f32aeSBjoern A. Zeeb rx_stats->beamformed_count += num_msdu;
2865dd4f32aeSBjoern A. Zeeb
2866dd4f32aeSBjoern A. Zeeb if (ppdu_info->num_mpdu_fcs_ok > 1)
2867dd4f32aeSBjoern A. Zeeb rx_stats->ampdu_msdu_count += num_msdu;
2868dd4f32aeSBjoern A. Zeeb else
2869dd4f32aeSBjoern A. Zeeb rx_stats->non_ampdu_msdu_count += num_msdu;
2870dd4f32aeSBjoern A. Zeeb
2871dd4f32aeSBjoern A. Zeeb rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok;
2872dd4f32aeSBjoern A. Zeeb rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err;
2873dd4f32aeSBjoern A. Zeeb rx_stats->dcm_count += ppdu_info->dcm;
2874dd4f32aeSBjoern A. Zeeb rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu;
2875dd4f32aeSBjoern A. Zeeb
2876dd4f32aeSBjoern A. Zeeb arsta->rssi_comb = ppdu_info->rssi_comb;
2877dd4f32aeSBjoern A. Zeeb
2878dd4f32aeSBjoern A. Zeeb BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) >
2879dd4f32aeSBjoern A. Zeeb ARRAY_SIZE(ppdu_info->rssi_chain_pri20));
2880dd4f32aeSBjoern A. Zeeb
2881dd4f32aeSBjoern A. Zeeb for (i = 0; i < ARRAY_SIZE(arsta->chain_signal); i++)
2882dd4f32aeSBjoern A. Zeeb arsta->chain_signal[i] = ppdu_info->rssi_chain_pri20[i];
2883dd4f32aeSBjoern A. Zeeb
2884dd4f32aeSBjoern A. Zeeb rx_stats->rx_duration += ppdu_info->rx_duration;
2885dd4f32aeSBjoern A. Zeeb arsta->rx_duration = rx_stats->rx_duration;
2886dd4f32aeSBjoern A. Zeeb }
2887dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base * ab,struct dp_rxdma_ring * rx_ring,int * buf_id)2888dd4f32aeSBjoern A. Zeeb static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab,
2889dd4f32aeSBjoern A. Zeeb struct dp_rxdma_ring *rx_ring,
2890dd4f32aeSBjoern A. Zeeb int *buf_id)
2891dd4f32aeSBjoern A. Zeeb {
2892dd4f32aeSBjoern A. Zeeb struct sk_buff *skb;
2893dd4f32aeSBjoern A. Zeeb dma_addr_t paddr;
2894dd4f32aeSBjoern A. Zeeb
2895dd4f32aeSBjoern A. Zeeb skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
2896dd4f32aeSBjoern A. Zeeb DP_RX_BUFFER_ALIGN_SIZE);
2897dd4f32aeSBjoern A. Zeeb
2898dd4f32aeSBjoern A. Zeeb if (!skb)
2899dd4f32aeSBjoern A. Zeeb goto fail_alloc_skb;
2900dd4f32aeSBjoern A. Zeeb
2901dd4f32aeSBjoern A. Zeeb if (!IS_ALIGNED((unsigned long)skb->data,
2902dd4f32aeSBjoern A. Zeeb DP_RX_BUFFER_ALIGN_SIZE)) {
2903dd4f32aeSBjoern A. Zeeb skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
2904dd4f32aeSBjoern A. Zeeb skb->data);
2905dd4f32aeSBjoern A. Zeeb }
2906dd4f32aeSBjoern A. Zeeb
2907dd4f32aeSBjoern A. Zeeb paddr = dma_map_single(ab->dev, skb->data,
2908dd4f32aeSBjoern A. Zeeb skb->len + skb_tailroom(skb),
2909dd4f32aeSBjoern A. Zeeb DMA_FROM_DEVICE);
2910dd4f32aeSBjoern A. Zeeb if (unlikely(dma_mapping_error(ab->dev, paddr)))
2911dd4f32aeSBjoern A. Zeeb goto fail_free_skb;
2912dd4f32aeSBjoern A. Zeeb
2913dd4f32aeSBjoern A. Zeeb spin_lock_bh(&rx_ring->idr_lock);
2914dd4f32aeSBjoern A. Zeeb *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
2915dd4f32aeSBjoern A. Zeeb rx_ring->bufs_max, GFP_ATOMIC);
2916dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&rx_ring->idr_lock);
2917dd4f32aeSBjoern A. Zeeb if (*buf_id < 0)
2918dd4f32aeSBjoern A. Zeeb goto fail_dma_unmap;
2919dd4f32aeSBjoern A. Zeeb
2920dd4f32aeSBjoern A. Zeeb ATH11K_SKB_RXCB(skb)->paddr = paddr;
2921dd4f32aeSBjoern A. Zeeb return skb;
2922dd4f32aeSBjoern A. Zeeb
2923dd4f32aeSBjoern A. Zeeb fail_dma_unmap:
2924dd4f32aeSBjoern A. Zeeb dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
2925dd4f32aeSBjoern A. Zeeb DMA_FROM_DEVICE);
2926dd4f32aeSBjoern A. Zeeb fail_free_skb:
2927dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(skb);
2928dd4f32aeSBjoern A. Zeeb fail_alloc_skb:
2929dd4f32aeSBjoern A. Zeeb return NULL;
2930dd4f32aeSBjoern A. Zeeb }
2931dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base * ab,int mac_id,struct dp_rxdma_ring * rx_ring,int req_entries,enum hal_rx_buf_return_buf_manager mgr)2932dd4f32aeSBjoern A. Zeeb int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
2933dd4f32aeSBjoern A. Zeeb struct dp_rxdma_ring *rx_ring,
2934dd4f32aeSBjoern A. Zeeb int req_entries,
2935dd4f32aeSBjoern A. Zeeb enum hal_rx_buf_return_buf_manager mgr)
2936dd4f32aeSBjoern A. Zeeb {
2937dd4f32aeSBjoern A. Zeeb struct hal_srng *srng;
2938dd4f32aeSBjoern A. Zeeb u32 *desc;
2939dd4f32aeSBjoern A. Zeeb struct sk_buff *skb;
2940dd4f32aeSBjoern A. Zeeb int num_free;
2941dd4f32aeSBjoern A. Zeeb int num_remain;
2942dd4f32aeSBjoern A. Zeeb int buf_id;
2943dd4f32aeSBjoern A. Zeeb u32 cookie;
2944dd4f32aeSBjoern A. Zeeb dma_addr_t paddr;
2945dd4f32aeSBjoern A. Zeeb
2946dd4f32aeSBjoern A. Zeeb req_entries = min(req_entries, rx_ring->bufs_max);
2947dd4f32aeSBjoern A. Zeeb
2948dd4f32aeSBjoern A. Zeeb srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
2949dd4f32aeSBjoern A. Zeeb
2950dd4f32aeSBjoern A. Zeeb spin_lock_bh(&srng->lock);
2951dd4f32aeSBjoern A. Zeeb
2952dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_begin(ab, srng);
2953dd4f32aeSBjoern A. Zeeb
2954dd4f32aeSBjoern A. Zeeb num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
2955dd4f32aeSBjoern A. Zeeb
2956dd4f32aeSBjoern A. Zeeb req_entries = min(num_free, req_entries);
2957dd4f32aeSBjoern A. Zeeb num_remain = req_entries;
2958dd4f32aeSBjoern A. Zeeb
2959dd4f32aeSBjoern A. Zeeb while (num_remain > 0) {
2960dd4f32aeSBjoern A. Zeeb skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
2961dd4f32aeSBjoern A. Zeeb &buf_id);
2962dd4f32aeSBjoern A. Zeeb if (!skb)
2963dd4f32aeSBjoern A. Zeeb break;
2964dd4f32aeSBjoern A. Zeeb paddr = ATH11K_SKB_RXCB(skb)->paddr;
2965dd4f32aeSBjoern A. Zeeb
2966dd4f32aeSBjoern A. Zeeb desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
2967dd4f32aeSBjoern A. Zeeb if (!desc)
2968dd4f32aeSBjoern A. Zeeb goto fail_desc_get;
2969dd4f32aeSBjoern A. Zeeb
2970dd4f32aeSBjoern A. Zeeb cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
2971dd4f32aeSBjoern A. Zeeb FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
2972dd4f32aeSBjoern A. Zeeb
2973dd4f32aeSBjoern A. Zeeb num_remain--;
2974dd4f32aeSBjoern A. Zeeb
2975dd4f32aeSBjoern A. Zeeb ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
2976dd4f32aeSBjoern A. Zeeb }
2977dd4f32aeSBjoern A. Zeeb
2978dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_end(ab, srng);
2979dd4f32aeSBjoern A. Zeeb
2980dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&srng->lock);
2981dd4f32aeSBjoern A. Zeeb
2982dd4f32aeSBjoern A. Zeeb return req_entries - num_remain;
2983dd4f32aeSBjoern A. Zeeb
2984dd4f32aeSBjoern A. Zeeb fail_desc_get:
2985dd4f32aeSBjoern A. Zeeb spin_lock_bh(&rx_ring->idr_lock);
2986dd4f32aeSBjoern A. Zeeb idr_remove(&rx_ring->bufs_idr, buf_id);
2987dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&rx_ring->idr_lock);
2988dd4f32aeSBjoern A. Zeeb dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
2989dd4f32aeSBjoern A. Zeeb DMA_FROM_DEVICE);
2990dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(skb);
2991dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_end(ab, srng);
2992dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&srng->lock);
2993dd4f32aeSBjoern A. Zeeb
2994dd4f32aeSBjoern A. Zeeb return req_entries - num_remain;
2995dd4f32aeSBjoern A. Zeeb }
2996dd4f32aeSBjoern A. Zeeb
2997dd4f32aeSBjoern A. Zeeb #define ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP 32535
2998dd4f32aeSBjoern A. Zeeb
2999dd4f32aeSBjoern A. Zeeb static void
ath11k_dp_rx_mon_update_status_buf_state(struct ath11k_mon_data * pmon,struct hal_tlv_hdr * tlv)3000dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_mon_update_status_buf_state(struct ath11k_mon_data *pmon,
3001dd4f32aeSBjoern A. Zeeb struct hal_tlv_hdr *tlv)
3002dd4f32aeSBjoern A. Zeeb {
3003dd4f32aeSBjoern A. Zeeb struct hal_rx_ppdu_start *ppdu_start;
3004dd4f32aeSBjoern A. Zeeb u16 ppdu_id_diff, ppdu_id, tlv_len;
3005dd4f32aeSBjoern A. Zeeb u8 *ptr;
3006dd4f32aeSBjoern A. Zeeb
3007dd4f32aeSBjoern A. Zeeb /* PPDU id is part of second tlv, move ptr to second tlv */
3008dd4f32aeSBjoern A. Zeeb tlv_len = FIELD_GET(HAL_TLV_HDR_LEN, tlv->tl);
3009dd4f32aeSBjoern A. Zeeb ptr = (u8 *)tlv;
3010dd4f32aeSBjoern A. Zeeb ptr += sizeof(*tlv) + tlv_len;
3011dd4f32aeSBjoern A. Zeeb tlv = (struct hal_tlv_hdr *)ptr;
3012dd4f32aeSBjoern A. Zeeb
3013dd4f32aeSBjoern A. Zeeb if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_PPDU_START)
3014dd4f32aeSBjoern A. Zeeb return;
3015dd4f32aeSBjoern A. Zeeb
3016dd4f32aeSBjoern A. Zeeb ptr += sizeof(*tlv);
3017dd4f32aeSBjoern A. Zeeb ppdu_start = (struct hal_rx_ppdu_start *)ptr;
3018dd4f32aeSBjoern A. Zeeb ppdu_id = FIELD_GET(HAL_RX_PPDU_START_INFO0_PPDU_ID,
3019dd4f32aeSBjoern A. Zeeb __le32_to_cpu(ppdu_start->info0));
3020dd4f32aeSBjoern A. Zeeb
3021dd4f32aeSBjoern A. Zeeb if (pmon->sw_mon_entries.ppdu_id < ppdu_id) {
3022dd4f32aeSBjoern A. Zeeb pmon->buf_state = DP_MON_STATUS_LEAD;
3023dd4f32aeSBjoern A. Zeeb ppdu_id_diff = ppdu_id - pmon->sw_mon_entries.ppdu_id;
3024dd4f32aeSBjoern A. Zeeb if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP)
3025dd4f32aeSBjoern A. Zeeb pmon->buf_state = DP_MON_STATUS_LAG;
3026dd4f32aeSBjoern A. Zeeb } else if (pmon->sw_mon_entries.ppdu_id > ppdu_id) {
3027dd4f32aeSBjoern A. Zeeb pmon->buf_state = DP_MON_STATUS_LAG;
3028dd4f32aeSBjoern A. Zeeb ppdu_id_diff = pmon->sw_mon_entries.ppdu_id - ppdu_id;
3029dd4f32aeSBjoern A. Zeeb if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP)
3030dd4f32aeSBjoern A. Zeeb pmon->buf_state = DP_MON_STATUS_LEAD;
3031dd4f32aeSBjoern A. Zeeb }
3032dd4f32aeSBjoern A. Zeeb }
3033dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base * ab,int mac_id,int * budget,struct sk_buff_head * skb_list)3034dd4f32aeSBjoern A. Zeeb static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
3035dd4f32aeSBjoern A. Zeeb int *budget, struct sk_buff_head *skb_list)
3036dd4f32aeSBjoern A. Zeeb {
3037dd4f32aeSBjoern A. Zeeb struct ath11k *ar;
3038dd4f32aeSBjoern A. Zeeb const struct ath11k_hw_hal_params *hal_params;
3039dd4f32aeSBjoern A. Zeeb struct ath11k_pdev_dp *dp;
3040dd4f32aeSBjoern A. Zeeb struct dp_rxdma_ring *rx_ring;
3041dd4f32aeSBjoern A. Zeeb struct ath11k_mon_data *pmon;
3042dd4f32aeSBjoern A. Zeeb struct hal_srng *srng;
3043dd4f32aeSBjoern A. Zeeb void *rx_mon_status_desc;
3044dd4f32aeSBjoern A. Zeeb struct sk_buff *skb;
3045dd4f32aeSBjoern A. Zeeb struct ath11k_skb_rxcb *rxcb;
3046dd4f32aeSBjoern A. Zeeb struct hal_tlv_hdr *tlv;
3047dd4f32aeSBjoern A. Zeeb u32 cookie;
3048dd4f32aeSBjoern A. Zeeb int buf_id, srng_id;
3049dd4f32aeSBjoern A. Zeeb dma_addr_t paddr;
3050dd4f32aeSBjoern A. Zeeb u8 rbm;
3051dd4f32aeSBjoern A. Zeeb int num_buffs_reaped = 0;
3052dd4f32aeSBjoern A. Zeeb
3053dd4f32aeSBjoern A. Zeeb ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
3054dd4f32aeSBjoern A. Zeeb dp = &ar->dp;
3055dd4f32aeSBjoern A. Zeeb pmon = &dp->mon_data;
3056dd4f32aeSBjoern A. Zeeb srng_id = ath11k_hw_mac_id_to_srng_id(&ab->hw_params, mac_id);
3057dd4f32aeSBjoern A. Zeeb rx_ring = &dp->rx_mon_status_refill_ring[srng_id];
3058dd4f32aeSBjoern A. Zeeb
3059dd4f32aeSBjoern A. Zeeb srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
3060dd4f32aeSBjoern A. Zeeb
3061dd4f32aeSBjoern A. Zeeb spin_lock_bh(&srng->lock);
3062dd4f32aeSBjoern A. Zeeb
3063dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_begin(ab, srng);
3064dd4f32aeSBjoern A. Zeeb while (*budget) {
3065dd4f32aeSBjoern A. Zeeb *budget -= 1;
3066dd4f32aeSBjoern A. Zeeb rx_mon_status_desc =
3067dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_src_peek(ab, srng);
3068dd4f32aeSBjoern A. Zeeb if (!rx_mon_status_desc) {
3069dd4f32aeSBjoern A. Zeeb pmon->buf_state = DP_MON_STATUS_REPLINISH;
3070dd4f32aeSBjoern A. Zeeb break;
3071dd4f32aeSBjoern A. Zeeb }
3072dd4f32aeSBjoern A. Zeeb
3073dd4f32aeSBjoern A. Zeeb ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
3074dd4f32aeSBjoern A. Zeeb &cookie, &rbm);
3075dd4f32aeSBjoern A. Zeeb if (paddr) {
3076dd4f32aeSBjoern A. Zeeb buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
3077dd4f32aeSBjoern A. Zeeb
3078dd4f32aeSBjoern A. Zeeb spin_lock_bh(&rx_ring->idr_lock);
3079dd4f32aeSBjoern A. Zeeb skb = idr_find(&rx_ring->bufs_idr, buf_id);
3080*28348caeSBjoern A. Zeeb spin_unlock_bh(&rx_ring->idr_lock);
3081*28348caeSBjoern A. Zeeb
3082dd4f32aeSBjoern A. Zeeb if (!skb) {
3083dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n",
3084dd4f32aeSBjoern A. Zeeb buf_id);
3085dd4f32aeSBjoern A. Zeeb pmon->buf_state = DP_MON_STATUS_REPLINISH;
3086dd4f32aeSBjoern A. Zeeb goto move_next;
3087dd4f32aeSBjoern A. Zeeb }
3088dd4f32aeSBjoern A. Zeeb
3089dd4f32aeSBjoern A. Zeeb rxcb = ATH11K_SKB_RXCB(skb);
3090dd4f32aeSBjoern A. Zeeb
3091*28348caeSBjoern A. Zeeb dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
3092dd4f32aeSBjoern A. Zeeb skb->len + skb_tailroom(skb),
3093dd4f32aeSBjoern A. Zeeb DMA_FROM_DEVICE);
3094dd4f32aeSBjoern A. Zeeb
3095dd4f32aeSBjoern A. Zeeb tlv = (struct hal_tlv_hdr *)skb->data;
3096dd4f32aeSBjoern A. Zeeb if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) !=
3097dd4f32aeSBjoern A. Zeeb HAL_RX_STATUS_BUFFER_DONE) {
3098*28348caeSBjoern A. Zeeb ath11k_warn(ab, "mon status DONE not set %lx, buf_id %d\n",
3099dd4f32aeSBjoern A. Zeeb FIELD_GET(HAL_TLV_HDR_TAG,
3100*28348caeSBjoern A. Zeeb tlv->tl), buf_id);
3101*28348caeSBjoern A. Zeeb /* If done status is missing, hold onto status
3102*28348caeSBjoern A. Zeeb * ring until status is done for this status
3103*28348caeSBjoern A. Zeeb * ring buffer.
3104*28348caeSBjoern A. Zeeb * Keep HP in mon_status_ring unchanged,
3105*28348caeSBjoern A. Zeeb * and break from here.
3106*28348caeSBjoern A. Zeeb * Check status for same buffer for next time
3107*28348caeSBjoern A. Zeeb */
3108dd4f32aeSBjoern A. Zeeb pmon->buf_state = DP_MON_STATUS_NO_DMA;
3109*28348caeSBjoern A. Zeeb break;
3110dd4f32aeSBjoern A. Zeeb }
3111dd4f32aeSBjoern A. Zeeb
3112*28348caeSBjoern A. Zeeb spin_lock_bh(&rx_ring->idr_lock);
3113*28348caeSBjoern A. Zeeb idr_remove(&rx_ring->bufs_idr, buf_id);
3114*28348caeSBjoern A. Zeeb spin_unlock_bh(&rx_ring->idr_lock);
3115dd4f32aeSBjoern A. Zeeb if (ab->hw_params.full_monitor_mode) {
3116dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_mon_update_status_buf_state(pmon, tlv);
3117dd4f32aeSBjoern A. Zeeb if (paddr == pmon->mon_status_paddr)
3118dd4f32aeSBjoern A. Zeeb pmon->buf_state = DP_MON_STATUS_MATCH;
3119dd4f32aeSBjoern A. Zeeb }
3120*28348caeSBjoern A. Zeeb
3121*28348caeSBjoern A. Zeeb dma_unmap_single(ab->dev, rxcb->paddr,
3122*28348caeSBjoern A. Zeeb skb->len + skb_tailroom(skb),
3123*28348caeSBjoern A. Zeeb DMA_FROM_DEVICE);
3124*28348caeSBjoern A. Zeeb
3125dd4f32aeSBjoern A. Zeeb __skb_queue_tail(skb_list, skb);
3126dd4f32aeSBjoern A. Zeeb } else {
3127dd4f32aeSBjoern A. Zeeb pmon->buf_state = DP_MON_STATUS_REPLINISH;
3128dd4f32aeSBjoern A. Zeeb }
3129dd4f32aeSBjoern A. Zeeb move_next:
3130dd4f32aeSBjoern A. Zeeb skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
3131dd4f32aeSBjoern A. Zeeb &buf_id);
3132dd4f32aeSBjoern A. Zeeb
3133dd4f32aeSBjoern A. Zeeb if (!skb) {
3134dd4f32aeSBjoern A. Zeeb hal_params = ab->hw_params.hal_params;
3135dd4f32aeSBjoern A. Zeeb ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
3136dd4f32aeSBjoern A. Zeeb hal_params->rx_buf_rbm);
3137dd4f32aeSBjoern A. Zeeb num_buffs_reaped++;
3138dd4f32aeSBjoern A. Zeeb break;
3139dd4f32aeSBjoern A. Zeeb }
3140dd4f32aeSBjoern A. Zeeb rxcb = ATH11K_SKB_RXCB(skb);
3141dd4f32aeSBjoern A. Zeeb
3142dd4f32aeSBjoern A. Zeeb cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
3143dd4f32aeSBjoern A. Zeeb FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
3144dd4f32aeSBjoern A. Zeeb
3145dd4f32aeSBjoern A. Zeeb ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr,
3146dd4f32aeSBjoern A. Zeeb cookie,
3147dd4f32aeSBjoern A. Zeeb ab->hw_params.hal_params->rx_buf_rbm);
3148dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_src_get_next_entry(ab, srng);
3149dd4f32aeSBjoern A. Zeeb num_buffs_reaped++;
3150dd4f32aeSBjoern A. Zeeb }
3151dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_end(ab, srng);
3152dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&srng->lock);
3153dd4f32aeSBjoern A. Zeeb
3154dd4f32aeSBjoern A. Zeeb return num_buffs_reaped;
3155dd4f32aeSBjoern A. Zeeb }
3156dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_frag_timer(struct timer_list * timer)3157dd4f32aeSBjoern A. Zeeb static void ath11k_dp_rx_frag_timer(struct timer_list *timer)
3158dd4f32aeSBjoern A. Zeeb {
3159dd4f32aeSBjoern A. Zeeb struct dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer);
3160dd4f32aeSBjoern A. Zeeb
3161dd4f32aeSBjoern A. Zeeb spin_lock_bh(&rx_tid->ab->base_lock);
3162dd4f32aeSBjoern A. Zeeb if (rx_tid->last_frag_no &&
3163dd4f32aeSBjoern A. Zeeb rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) {
3164dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&rx_tid->ab->base_lock);
3165dd4f32aeSBjoern A. Zeeb return;
3166dd4f32aeSBjoern A. Zeeb }
3167dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_frags_cleanup(rx_tid, true);
3168dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&rx_tid->ab->base_lock);
3169dd4f32aeSBjoern A. Zeeb }
3170dd4f32aeSBjoern A. Zeeb
ath11k_peer_rx_frag_setup(struct ath11k * ar,const u8 * peer_mac,int vdev_id)3171dd4f32aeSBjoern A. Zeeb int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id)
3172dd4f32aeSBjoern A. Zeeb {
3173dd4f32aeSBjoern A. Zeeb struct ath11k_base *ab = ar->ab;
3174dd4f32aeSBjoern A. Zeeb struct crypto_shash *tfm;
3175dd4f32aeSBjoern A. Zeeb struct ath11k_peer *peer;
3176dd4f32aeSBjoern A. Zeeb struct dp_rx_tid *rx_tid;
3177dd4f32aeSBjoern A. Zeeb int i;
3178dd4f32aeSBjoern A. Zeeb
3179dd4f32aeSBjoern A. Zeeb tfm = crypto_alloc_shash("michael_mic", 0, 0);
3180*28348caeSBjoern A. Zeeb if (IS_ERR(tfm)) {
3181*28348caeSBjoern A. Zeeb ath11k_warn(ab, "failed to allocate michael_mic shash: %ld\n",
3182*28348caeSBjoern A. Zeeb PTR_ERR(tfm));
3183dd4f32aeSBjoern A. Zeeb return PTR_ERR(tfm);
3184*28348caeSBjoern A. Zeeb }
3185dd4f32aeSBjoern A. Zeeb
3186dd4f32aeSBjoern A. Zeeb spin_lock_bh(&ab->base_lock);
3187dd4f32aeSBjoern A. Zeeb
3188dd4f32aeSBjoern A. Zeeb peer = ath11k_peer_find(ab, vdev_id, peer_mac);
3189dd4f32aeSBjoern A. Zeeb if (!peer) {
3190dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "failed to find the peer to set up fragment info\n");
3191dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ab->base_lock);
3192*28348caeSBjoern A. Zeeb crypto_free_shash(tfm);
3193dd4f32aeSBjoern A. Zeeb return -ENOENT;
3194dd4f32aeSBjoern A. Zeeb }
3195dd4f32aeSBjoern A. Zeeb
3196dd4f32aeSBjoern A. Zeeb for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
3197dd4f32aeSBjoern A. Zeeb rx_tid = &peer->rx_tid[i];
3198dd4f32aeSBjoern A. Zeeb rx_tid->ab = ab;
3199dd4f32aeSBjoern A. Zeeb timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0);
3200dd4f32aeSBjoern A. Zeeb skb_queue_head_init(&rx_tid->rx_frags);
3201dd4f32aeSBjoern A. Zeeb }
3202dd4f32aeSBjoern A. Zeeb
3203dd4f32aeSBjoern A. Zeeb peer->tfm_mmic = tfm;
3204*28348caeSBjoern A. Zeeb peer->dp_setup_done = true;
3205dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ab->base_lock);
3206dd4f32aeSBjoern A. Zeeb
3207dd4f32aeSBjoern A. Zeeb return 0;
3208dd4f32aeSBjoern A. Zeeb }
3209dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_michael_mic(struct crypto_shash * tfm,u8 * key,struct ieee80211_hdr * hdr,u8 * data,size_t data_len,u8 * mic)3210dd4f32aeSBjoern A. Zeeb static int ath11k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
3211dd4f32aeSBjoern A. Zeeb struct ieee80211_hdr *hdr, u8 *data,
3212dd4f32aeSBjoern A. Zeeb size_t data_len, u8 *mic)
3213dd4f32aeSBjoern A. Zeeb {
3214dd4f32aeSBjoern A. Zeeb SHASH_DESC_ON_STACK(desc, tfm);
3215dd4f32aeSBjoern A. Zeeb u8 mic_hdr[16] = {0};
3216dd4f32aeSBjoern A. Zeeb u8 tid = 0;
3217dd4f32aeSBjoern A. Zeeb int ret;
3218dd4f32aeSBjoern A. Zeeb
3219dd4f32aeSBjoern A. Zeeb if (!tfm)
3220dd4f32aeSBjoern A. Zeeb return -EINVAL;
3221dd4f32aeSBjoern A. Zeeb
3222dd4f32aeSBjoern A. Zeeb desc->tfm = tfm;
3223dd4f32aeSBjoern A. Zeeb
3224dd4f32aeSBjoern A. Zeeb ret = crypto_shash_setkey(tfm, key, 8);
3225dd4f32aeSBjoern A. Zeeb if (ret)
3226dd4f32aeSBjoern A. Zeeb goto out;
3227dd4f32aeSBjoern A. Zeeb
3228dd4f32aeSBjoern A. Zeeb ret = crypto_shash_init(desc);
3229dd4f32aeSBjoern A. Zeeb if (ret)
3230dd4f32aeSBjoern A. Zeeb goto out;
3231dd4f32aeSBjoern A. Zeeb
3232dd4f32aeSBjoern A. Zeeb /* TKIP MIC header */
3233dd4f32aeSBjoern A. Zeeb memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN);
3234dd4f32aeSBjoern A. Zeeb memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN);
3235dd4f32aeSBjoern A. Zeeb if (ieee80211_is_data_qos(hdr->frame_control))
3236dd4f32aeSBjoern A. Zeeb tid = ieee80211_get_tid(hdr);
3237dd4f32aeSBjoern A. Zeeb mic_hdr[12] = tid;
3238dd4f32aeSBjoern A. Zeeb
3239dd4f32aeSBjoern A. Zeeb ret = crypto_shash_update(desc, mic_hdr, 16);
3240dd4f32aeSBjoern A. Zeeb if (ret)
3241dd4f32aeSBjoern A. Zeeb goto out;
3242dd4f32aeSBjoern A. Zeeb ret = crypto_shash_update(desc, data, data_len);
3243dd4f32aeSBjoern A. Zeeb if (ret)
3244dd4f32aeSBjoern A. Zeeb goto out;
3245dd4f32aeSBjoern A. Zeeb ret = crypto_shash_final(desc, mic);
3246dd4f32aeSBjoern A. Zeeb out:
3247dd4f32aeSBjoern A. Zeeb shash_desc_zero(desc);
3248dd4f32aeSBjoern A. Zeeb return ret;
3249dd4f32aeSBjoern A. Zeeb }
3250dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_verify_tkip_mic(struct ath11k * ar,struct ath11k_peer * peer,struct sk_buff * msdu)3251dd4f32aeSBjoern A. Zeeb static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer *peer,
3252dd4f32aeSBjoern A. Zeeb struct sk_buff *msdu)
3253dd4f32aeSBjoern A. Zeeb {
3254dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
3255dd4f32aeSBjoern A. Zeeb struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
3256dd4f32aeSBjoern A. Zeeb struct ieee80211_key_conf *key_conf;
3257dd4f32aeSBjoern A. Zeeb struct ieee80211_hdr *hdr;
3258dd4f32aeSBjoern A. Zeeb u8 mic[IEEE80211_CCMP_MIC_LEN];
3259dd4f32aeSBjoern A. Zeeb int head_len, tail_len, ret;
3260dd4f32aeSBjoern A. Zeeb size_t data_len;
3261dd4f32aeSBjoern A. Zeeb u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3262dd4f32aeSBjoern A. Zeeb u8 *key, *data;
3263dd4f32aeSBjoern A. Zeeb u8 key_idx;
3264dd4f32aeSBjoern A. Zeeb
3265dd4f32aeSBjoern A. Zeeb if (ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc) !=
3266dd4f32aeSBjoern A. Zeeb HAL_ENCRYPT_TYPE_TKIP_MIC)
3267dd4f32aeSBjoern A. Zeeb return 0;
3268dd4f32aeSBjoern A. Zeeb
3269dd4f32aeSBjoern A. Zeeb hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
3270dd4f32aeSBjoern A. Zeeb hdr_len = ieee80211_hdrlen(hdr->frame_control);
3271dd4f32aeSBjoern A. Zeeb head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN;
3272dd4f32aeSBjoern A. Zeeb tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN;
3273dd4f32aeSBjoern A. Zeeb
3274dd4f32aeSBjoern A. Zeeb if (!is_multicast_ether_addr(hdr->addr1))
3275dd4f32aeSBjoern A. Zeeb key_idx = peer->ucast_keyidx;
3276dd4f32aeSBjoern A. Zeeb else
3277dd4f32aeSBjoern A. Zeeb key_idx = peer->mcast_keyidx;
3278dd4f32aeSBjoern A. Zeeb
3279dd4f32aeSBjoern A. Zeeb key_conf = peer->keys[key_idx];
3280dd4f32aeSBjoern A. Zeeb
3281dd4f32aeSBjoern A. Zeeb data = msdu->data + head_len;
3282dd4f32aeSBjoern A. Zeeb data_len = msdu->len - head_len - tail_len;
3283dd4f32aeSBjoern A. Zeeb key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
3284dd4f32aeSBjoern A. Zeeb
3285dd4f32aeSBjoern A. Zeeb ret = ath11k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic);
3286dd4f32aeSBjoern A. Zeeb if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN))
3287dd4f32aeSBjoern A. Zeeb goto mic_fail;
3288dd4f32aeSBjoern A. Zeeb
3289dd4f32aeSBjoern A. Zeeb return 0;
3290dd4f32aeSBjoern A. Zeeb
3291dd4f32aeSBjoern A. Zeeb mic_fail:
3292dd4f32aeSBjoern A. Zeeb (ATH11K_SKB_RXCB(msdu))->is_first_msdu = true;
3293dd4f32aeSBjoern A. Zeeb (ATH11K_SKB_RXCB(msdu))->is_last_msdu = true;
3294dd4f32aeSBjoern A. Zeeb
3295dd4f32aeSBjoern A. Zeeb rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
3296dd4f32aeSBjoern A. Zeeb RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
3297dd4f32aeSBjoern A. Zeeb skb_pull(msdu, hal_rx_desc_sz);
3298dd4f32aeSBjoern A. Zeeb
3299dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
3300dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
3301dd4f32aeSBjoern A. Zeeb HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
3302dd4f32aeSBjoern A. Zeeb ieee80211_rx(ar->hw, msdu);
3303dd4f32aeSBjoern A. Zeeb return -EINVAL;
3304dd4f32aeSBjoern A. Zeeb }
3305dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_undecap_frag(struct ath11k * ar,struct sk_buff * msdu,enum hal_encrypt_type enctype,u32 flags)3306dd4f32aeSBjoern A. Zeeb static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu,
3307dd4f32aeSBjoern A. Zeeb enum hal_encrypt_type enctype, u32 flags)
3308dd4f32aeSBjoern A. Zeeb {
3309dd4f32aeSBjoern A. Zeeb struct ieee80211_hdr *hdr;
3310dd4f32aeSBjoern A. Zeeb size_t hdr_len;
3311dd4f32aeSBjoern A. Zeeb size_t crypto_len;
3312dd4f32aeSBjoern A. Zeeb u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3313dd4f32aeSBjoern A. Zeeb
3314dd4f32aeSBjoern A. Zeeb if (!flags)
3315dd4f32aeSBjoern A. Zeeb return;
3316dd4f32aeSBjoern A. Zeeb
3317dd4f32aeSBjoern A. Zeeb hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
3318dd4f32aeSBjoern A. Zeeb
3319dd4f32aeSBjoern A. Zeeb if (flags & RX_FLAG_MIC_STRIPPED)
3320dd4f32aeSBjoern A. Zeeb skb_trim(msdu, msdu->len -
3321dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_crypto_mic_len(ar, enctype));
3322dd4f32aeSBjoern A. Zeeb
3323dd4f32aeSBjoern A. Zeeb if (flags & RX_FLAG_ICV_STRIPPED)
3324dd4f32aeSBjoern A. Zeeb skb_trim(msdu, msdu->len -
3325dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_crypto_icv_len(ar, enctype));
3326dd4f32aeSBjoern A. Zeeb
3327dd4f32aeSBjoern A. Zeeb if (flags & RX_FLAG_IV_STRIPPED) {
3328dd4f32aeSBjoern A. Zeeb hdr_len = ieee80211_hdrlen(hdr->frame_control);
3329dd4f32aeSBjoern A. Zeeb crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
3330dd4f32aeSBjoern A. Zeeb
3331dd4f32aeSBjoern A. Zeeb #if defined(__linux__)
3332dd4f32aeSBjoern A. Zeeb memmove((void *)msdu->data + hal_rx_desc_sz + crypto_len,
3333dd4f32aeSBjoern A. Zeeb (void *)msdu->data + hal_rx_desc_sz, hdr_len);
3334dd4f32aeSBjoern A. Zeeb #elif defined(__FreeBSD__)
3335dd4f32aeSBjoern A. Zeeb memmove((u8 *)msdu->data + hal_rx_desc_sz + crypto_len,
3336dd4f32aeSBjoern A. Zeeb (u8 *)msdu->data + hal_rx_desc_sz, hdr_len);
3337dd4f32aeSBjoern A. Zeeb #endif
3338dd4f32aeSBjoern A. Zeeb skb_pull(msdu, crypto_len);
3339dd4f32aeSBjoern A. Zeeb }
3340dd4f32aeSBjoern A. Zeeb }
3341dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_defrag(struct ath11k * ar,struct ath11k_peer * peer,struct dp_rx_tid * rx_tid,struct sk_buff ** defrag_skb)3342dd4f32aeSBjoern A. Zeeb static int ath11k_dp_rx_h_defrag(struct ath11k *ar,
3343dd4f32aeSBjoern A. Zeeb struct ath11k_peer *peer,
3344dd4f32aeSBjoern A. Zeeb struct dp_rx_tid *rx_tid,
3345dd4f32aeSBjoern A. Zeeb struct sk_buff **defrag_skb)
3346dd4f32aeSBjoern A. Zeeb {
3347dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *rx_desc;
3348dd4f32aeSBjoern A. Zeeb struct sk_buff *skb, *first_frag, *last_frag;
3349dd4f32aeSBjoern A. Zeeb struct ieee80211_hdr *hdr;
3350dd4f32aeSBjoern A. Zeeb struct rx_attention *rx_attention;
3351dd4f32aeSBjoern A. Zeeb enum hal_encrypt_type enctype;
3352dd4f32aeSBjoern A. Zeeb bool is_decrypted = false;
3353dd4f32aeSBjoern A. Zeeb int msdu_len = 0;
3354dd4f32aeSBjoern A. Zeeb int extra_space;
3355dd4f32aeSBjoern A. Zeeb u32 flags, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3356dd4f32aeSBjoern A. Zeeb
3357dd4f32aeSBjoern A. Zeeb first_frag = skb_peek(&rx_tid->rx_frags);
3358dd4f32aeSBjoern A. Zeeb last_frag = skb_peek_tail(&rx_tid->rx_frags);
3359dd4f32aeSBjoern A. Zeeb
3360dd4f32aeSBjoern A. Zeeb skb_queue_walk(&rx_tid->rx_frags, skb) {
3361dd4f32aeSBjoern A. Zeeb flags = 0;
3362dd4f32aeSBjoern A. Zeeb rx_desc = (struct hal_rx_desc *)skb->data;
3363dd4f32aeSBjoern A. Zeeb hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
3364dd4f32aeSBjoern A. Zeeb
3365dd4f32aeSBjoern A. Zeeb enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc);
3366dd4f32aeSBjoern A. Zeeb if (enctype != HAL_ENCRYPT_TYPE_OPEN) {
3367dd4f32aeSBjoern A. Zeeb rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc);
3368dd4f32aeSBjoern A. Zeeb is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention);
3369dd4f32aeSBjoern A. Zeeb }
3370dd4f32aeSBjoern A. Zeeb
3371dd4f32aeSBjoern A. Zeeb if (is_decrypted) {
3372dd4f32aeSBjoern A. Zeeb if (skb != first_frag)
3373dd4f32aeSBjoern A. Zeeb flags |= RX_FLAG_IV_STRIPPED;
3374dd4f32aeSBjoern A. Zeeb if (skb != last_frag)
3375dd4f32aeSBjoern A. Zeeb flags |= RX_FLAG_ICV_STRIPPED |
3376dd4f32aeSBjoern A. Zeeb RX_FLAG_MIC_STRIPPED;
3377dd4f32aeSBjoern A. Zeeb }
3378dd4f32aeSBjoern A. Zeeb
3379dd4f32aeSBjoern A. Zeeb /* RX fragments are always raw packets */
3380dd4f32aeSBjoern A. Zeeb if (skb != last_frag)
3381dd4f32aeSBjoern A. Zeeb skb_trim(skb, skb->len - FCS_LEN);
3382dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_h_undecap_frag(ar, skb, enctype, flags);
3383dd4f32aeSBjoern A. Zeeb
3384dd4f32aeSBjoern A. Zeeb if (skb != first_frag)
3385dd4f32aeSBjoern A. Zeeb skb_pull(skb, hal_rx_desc_sz +
3386dd4f32aeSBjoern A. Zeeb ieee80211_hdrlen(hdr->frame_control));
3387dd4f32aeSBjoern A. Zeeb msdu_len += skb->len;
3388dd4f32aeSBjoern A. Zeeb }
3389dd4f32aeSBjoern A. Zeeb
3390dd4f32aeSBjoern A. Zeeb extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag));
3391dd4f32aeSBjoern A. Zeeb if (extra_space > 0 &&
3392dd4f32aeSBjoern A. Zeeb (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0))
3393dd4f32aeSBjoern A. Zeeb return -ENOMEM;
3394dd4f32aeSBjoern A. Zeeb
3395dd4f32aeSBjoern A. Zeeb __skb_unlink(first_frag, &rx_tid->rx_frags);
3396dd4f32aeSBjoern A. Zeeb while ((skb = __skb_dequeue(&rx_tid->rx_frags))) {
3397dd4f32aeSBjoern A. Zeeb skb_put_data(first_frag, skb->data, skb->len);
3398dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(skb);
3399dd4f32aeSBjoern A. Zeeb }
3400dd4f32aeSBjoern A. Zeeb
3401dd4f32aeSBjoern A. Zeeb hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz);
3402dd4f32aeSBjoern A. Zeeb hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
3403dd4f32aeSBjoern A. Zeeb ATH11K_SKB_RXCB(first_frag)->is_frag = 1;
3404dd4f32aeSBjoern A. Zeeb
3405dd4f32aeSBjoern A. Zeeb if (ath11k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag))
3406dd4f32aeSBjoern A. Zeeb first_frag = NULL;
3407dd4f32aeSBjoern A. Zeeb
3408dd4f32aeSBjoern A. Zeeb *defrag_skb = first_frag;
3409dd4f32aeSBjoern A. Zeeb return 0;
3410dd4f32aeSBjoern A. Zeeb }
3411dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k * ar,struct dp_rx_tid * rx_tid,struct sk_buff * defrag_skb)3412dd4f32aeSBjoern A. Zeeb static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_tid *rx_tid,
3413dd4f32aeSBjoern A. Zeeb struct sk_buff *defrag_skb)
3414dd4f32aeSBjoern A. Zeeb {
3415dd4f32aeSBjoern A. Zeeb struct ath11k_base *ab = ar->ab;
3416dd4f32aeSBjoern A. Zeeb struct ath11k_pdev_dp *dp = &ar->dp;
3417dd4f32aeSBjoern A. Zeeb struct dp_rxdma_ring *rx_refill_ring = &dp->rx_refill_buf_ring;
3418dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data;
3419dd4f32aeSBjoern A. Zeeb struct hal_reo_entrance_ring *reo_ent_ring;
3420dd4f32aeSBjoern A. Zeeb struct hal_reo_dest_ring *reo_dest_ring;
3421dd4f32aeSBjoern A. Zeeb struct dp_link_desc_bank *link_desc_banks;
3422dd4f32aeSBjoern A. Zeeb struct hal_rx_msdu_link *msdu_link;
3423dd4f32aeSBjoern A. Zeeb struct hal_rx_msdu_details *msdu0;
3424dd4f32aeSBjoern A. Zeeb struct hal_srng *srng;
3425dd4f32aeSBjoern A. Zeeb dma_addr_t paddr;
3426dd4f32aeSBjoern A. Zeeb u32 desc_bank, msdu_info, mpdu_info;
3427dd4f32aeSBjoern A. Zeeb u32 dst_idx, cookie, hal_rx_desc_sz;
3428dd4f32aeSBjoern A. Zeeb int ret, buf_id;
3429dd4f32aeSBjoern A. Zeeb
3430dd4f32aeSBjoern A. Zeeb hal_rx_desc_sz = ab->hw_params.hal_desc_sz;
3431dd4f32aeSBjoern A. Zeeb link_desc_banks = ab->dp.link_desc_banks;
3432dd4f32aeSBjoern A. Zeeb reo_dest_ring = rx_tid->dst_ring_desc;
3433dd4f32aeSBjoern A. Zeeb
3434dd4f32aeSBjoern A. Zeeb ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
3435dd4f32aeSBjoern A. Zeeb #if defined(__linux__)
3436dd4f32aeSBjoern A. Zeeb msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +
3437dd4f32aeSBjoern A. Zeeb #elif defined(__FreeBSD__)
3438dd4f32aeSBjoern A. Zeeb msdu_link = (struct hal_rx_msdu_link *)((u8 *)link_desc_banks[desc_bank].vaddr +
3439dd4f32aeSBjoern A. Zeeb #endif
3440dd4f32aeSBjoern A. Zeeb (paddr - link_desc_banks[desc_bank].paddr));
3441dd4f32aeSBjoern A. Zeeb msdu0 = &msdu_link->msdu_link[0];
3442dd4f32aeSBjoern A. Zeeb dst_idx = FIELD_GET(RX_MSDU_DESC_INFO0_REO_DEST_IND, msdu0->rx_msdu_info.info0);
3443dd4f32aeSBjoern A. Zeeb memset(msdu0, 0, sizeof(*msdu0));
3444dd4f32aeSBjoern A. Zeeb
3445dd4f32aeSBjoern A. Zeeb msdu_info = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1) |
3446dd4f32aeSBjoern A. Zeeb FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1) |
3447dd4f32aeSBjoern A. Zeeb FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION, 0) |
3448dd4f32aeSBjoern A. Zeeb FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH,
3449dd4f32aeSBjoern A. Zeeb defrag_skb->len - hal_rx_desc_sz) |
3450dd4f32aeSBjoern A. Zeeb FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND, dst_idx) |
3451dd4f32aeSBjoern A. Zeeb FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA, 1) |
3452dd4f32aeSBjoern A. Zeeb FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA, 1);
3453dd4f32aeSBjoern A. Zeeb msdu0->rx_msdu_info.info0 = msdu_info;
3454dd4f32aeSBjoern A. Zeeb
3455dd4f32aeSBjoern A. Zeeb /* change msdu len in hal rx desc */
3456dd4f32aeSBjoern A. Zeeb ath11k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz);
3457dd4f32aeSBjoern A. Zeeb
3458dd4f32aeSBjoern A. Zeeb paddr = dma_map_single(ab->dev, defrag_skb->data,
3459dd4f32aeSBjoern A. Zeeb defrag_skb->len + skb_tailroom(defrag_skb),
3460dd4f32aeSBjoern A. Zeeb DMA_TO_DEVICE);
3461dd4f32aeSBjoern A. Zeeb if (dma_mapping_error(ab->dev, paddr))
3462dd4f32aeSBjoern A. Zeeb return -ENOMEM;
3463dd4f32aeSBjoern A. Zeeb
3464dd4f32aeSBjoern A. Zeeb spin_lock_bh(&rx_refill_ring->idr_lock);
3465dd4f32aeSBjoern A. Zeeb buf_id = idr_alloc(&rx_refill_ring->bufs_idr, defrag_skb, 0,
3466dd4f32aeSBjoern A. Zeeb rx_refill_ring->bufs_max * 3, GFP_ATOMIC);
3467dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&rx_refill_ring->idr_lock);
3468dd4f32aeSBjoern A. Zeeb if (buf_id < 0) {
3469dd4f32aeSBjoern A. Zeeb ret = -ENOMEM;
3470dd4f32aeSBjoern A. Zeeb goto err_unmap_dma;
3471dd4f32aeSBjoern A. Zeeb }
3472dd4f32aeSBjoern A. Zeeb
3473dd4f32aeSBjoern A. Zeeb ATH11K_SKB_RXCB(defrag_skb)->paddr = paddr;
3474dd4f32aeSBjoern A. Zeeb cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, dp->mac_id) |
3475dd4f32aeSBjoern A. Zeeb FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
3476dd4f32aeSBjoern A. Zeeb
3477dd4f32aeSBjoern A. Zeeb ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie,
3478dd4f32aeSBjoern A. Zeeb ab->hw_params.hal_params->rx_buf_rbm);
3479dd4f32aeSBjoern A. Zeeb
3480dd4f32aeSBjoern A. Zeeb /* Fill mpdu details into reo entrace ring */
3481dd4f32aeSBjoern A. Zeeb srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id];
3482dd4f32aeSBjoern A. Zeeb
3483dd4f32aeSBjoern A. Zeeb spin_lock_bh(&srng->lock);
3484dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_begin(ab, srng);
3485dd4f32aeSBjoern A. Zeeb
3486dd4f32aeSBjoern A. Zeeb reo_ent_ring = (struct hal_reo_entrance_ring *)
3487dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_src_get_next_entry(ab, srng);
3488dd4f32aeSBjoern A. Zeeb if (!reo_ent_ring) {
3489dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_end(ab, srng);
3490dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&srng->lock);
3491dd4f32aeSBjoern A. Zeeb ret = -ENOSPC;
3492dd4f32aeSBjoern A. Zeeb goto err_free_idr;
3493dd4f32aeSBjoern A. Zeeb }
3494dd4f32aeSBjoern A. Zeeb memset(reo_ent_ring, 0, sizeof(*reo_ent_ring));
3495dd4f32aeSBjoern A. Zeeb
3496dd4f32aeSBjoern A. Zeeb ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
3497dd4f32aeSBjoern A. Zeeb ath11k_hal_rx_buf_addr_info_set(reo_ent_ring, paddr, desc_bank,
3498dd4f32aeSBjoern A. Zeeb HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST);
3499dd4f32aeSBjoern A. Zeeb
3500dd4f32aeSBjoern A. Zeeb mpdu_info = FIELD_PREP(RX_MPDU_DESC_INFO0_MSDU_COUNT, 1) |
3501dd4f32aeSBjoern A. Zeeb FIELD_PREP(RX_MPDU_DESC_INFO0_SEQ_NUM, rx_tid->cur_sn) |
3502dd4f32aeSBjoern A. Zeeb FIELD_PREP(RX_MPDU_DESC_INFO0_FRAG_FLAG, 0) |
3503dd4f32aeSBjoern A. Zeeb FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_SA, 1) |
3504dd4f32aeSBjoern A. Zeeb FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_DA, 1) |
3505dd4f32aeSBjoern A. Zeeb FIELD_PREP(RX_MPDU_DESC_INFO0_RAW_MPDU, 1) |
3506dd4f32aeSBjoern A. Zeeb FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_PN, 1);
3507dd4f32aeSBjoern A. Zeeb
3508dd4f32aeSBjoern A. Zeeb reo_ent_ring->rx_mpdu_info.info0 = mpdu_info;
3509dd4f32aeSBjoern A. Zeeb reo_ent_ring->rx_mpdu_info.meta_data = reo_dest_ring->rx_mpdu_info.meta_data;
3510dd4f32aeSBjoern A. Zeeb reo_ent_ring->queue_addr_lo = reo_dest_ring->queue_addr_lo;
3511dd4f32aeSBjoern A. Zeeb reo_ent_ring->info0 = FIELD_PREP(HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI,
3512dd4f32aeSBjoern A. Zeeb FIELD_GET(HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI,
3513dd4f32aeSBjoern A. Zeeb reo_dest_ring->info0)) |
3514dd4f32aeSBjoern A. Zeeb FIELD_PREP(HAL_REO_ENTR_RING_INFO0_DEST_IND, dst_idx);
3515dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_end(ab, srng);
3516dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&srng->lock);
3517dd4f32aeSBjoern A. Zeeb
3518dd4f32aeSBjoern A. Zeeb return 0;
3519dd4f32aeSBjoern A. Zeeb
3520dd4f32aeSBjoern A. Zeeb err_free_idr:
3521dd4f32aeSBjoern A. Zeeb spin_lock_bh(&rx_refill_ring->idr_lock);
3522dd4f32aeSBjoern A. Zeeb idr_remove(&rx_refill_ring->bufs_idr, buf_id);
3523dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&rx_refill_ring->idr_lock);
3524dd4f32aeSBjoern A. Zeeb err_unmap_dma:
3525dd4f32aeSBjoern A. Zeeb dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb),
3526dd4f32aeSBjoern A. Zeeb DMA_TO_DEVICE);
3527dd4f32aeSBjoern A. Zeeb return ret;
3528dd4f32aeSBjoern A. Zeeb }
3529dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_cmp_frags(struct ath11k * ar,struct sk_buff * a,struct sk_buff * b)3530dd4f32aeSBjoern A. Zeeb static int ath11k_dp_rx_h_cmp_frags(struct ath11k *ar,
3531dd4f32aeSBjoern A. Zeeb struct sk_buff *a, struct sk_buff *b)
3532dd4f32aeSBjoern A. Zeeb {
3533dd4f32aeSBjoern A. Zeeb int frag1, frag2;
3534dd4f32aeSBjoern A. Zeeb
3535dd4f32aeSBjoern A. Zeeb frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, a);
3536dd4f32aeSBjoern A. Zeeb frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, b);
3537dd4f32aeSBjoern A. Zeeb
3538dd4f32aeSBjoern A. Zeeb return frag1 - frag2;
3539dd4f32aeSBjoern A. Zeeb }
3540dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_sort_frags(struct ath11k * ar,struct sk_buff_head * frag_list,struct sk_buff * cur_frag)3541dd4f32aeSBjoern A. Zeeb static void ath11k_dp_rx_h_sort_frags(struct ath11k *ar,
3542dd4f32aeSBjoern A. Zeeb struct sk_buff_head *frag_list,
3543dd4f32aeSBjoern A. Zeeb struct sk_buff *cur_frag)
3544dd4f32aeSBjoern A. Zeeb {
3545dd4f32aeSBjoern A. Zeeb struct sk_buff *skb;
3546dd4f32aeSBjoern A. Zeeb int cmp;
3547dd4f32aeSBjoern A. Zeeb
3548dd4f32aeSBjoern A. Zeeb skb_queue_walk(frag_list, skb) {
3549dd4f32aeSBjoern A. Zeeb cmp = ath11k_dp_rx_h_cmp_frags(ar, skb, cur_frag);
3550dd4f32aeSBjoern A. Zeeb if (cmp < 0)
3551dd4f32aeSBjoern A. Zeeb continue;
3552dd4f32aeSBjoern A. Zeeb __skb_queue_before(frag_list, skb, cur_frag);
3553dd4f32aeSBjoern A. Zeeb return;
3554dd4f32aeSBjoern A. Zeeb }
3555dd4f32aeSBjoern A. Zeeb __skb_queue_tail(frag_list, cur_frag);
3556dd4f32aeSBjoern A. Zeeb }
3557dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_get_pn(struct ath11k * ar,struct sk_buff * skb)3558dd4f32aeSBjoern A. Zeeb static u64 ath11k_dp_rx_h_get_pn(struct ath11k *ar, struct sk_buff *skb)
3559dd4f32aeSBjoern A. Zeeb {
3560dd4f32aeSBjoern A. Zeeb struct ieee80211_hdr *hdr;
3561dd4f32aeSBjoern A. Zeeb u64 pn = 0;
3562dd4f32aeSBjoern A. Zeeb u8 *ehdr;
3563dd4f32aeSBjoern A. Zeeb u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3564dd4f32aeSBjoern A. Zeeb
3565dd4f32aeSBjoern A. Zeeb hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
3566dd4f32aeSBjoern A. Zeeb ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control);
3567dd4f32aeSBjoern A. Zeeb
3568dd4f32aeSBjoern A. Zeeb pn = ehdr[0];
3569dd4f32aeSBjoern A. Zeeb pn |= (u64)ehdr[1] << 8;
3570dd4f32aeSBjoern A. Zeeb pn |= (u64)ehdr[4] << 16;
3571dd4f32aeSBjoern A. Zeeb pn |= (u64)ehdr[5] << 24;
3572dd4f32aeSBjoern A. Zeeb pn |= (u64)ehdr[6] << 32;
3573dd4f32aeSBjoern A. Zeeb pn |= (u64)ehdr[7] << 40;
3574dd4f32aeSBjoern A. Zeeb
3575dd4f32aeSBjoern A. Zeeb return pn;
3576dd4f32aeSBjoern A. Zeeb }
3577dd4f32aeSBjoern A. Zeeb
3578dd4f32aeSBjoern A. Zeeb static bool
ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k * ar,struct dp_rx_tid * rx_tid)3579dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k *ar, struct dp_rx_tid *rx_tid)
3580dd4f32aeSBjoern A. Zeeb {
3581dd4f32aeSBjoern A. Zeeb enum hal_encrypt_type encrypt_type;
3582dd4f32aeSBjoern A. Zeeb struct sk_buff *first_frag, *skb;
3583dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *desc;
3584dd4f32aeSBjoern A. Zeeb u64 last_pn;
3585dd4f32aeSBjoern A. Zeeb u64 cur_pn;
3586dd4f32aeSBjoern A. Zeeb
3587dd4f32aeSBjoern A. Zeeb first_frag = skb_peek(&rx_tid->rx_frags);
3588dd4f32aeSBjoern A. Zeeb desc = (struct hal_rx_desc *)first_frag->data;
3589dd4f32aeSBjoern A. Zeeb
3590dd4f32aeSBjoern A. Zeeb encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, desc);
3591dd4f32aeSBjoern A. Zeeb if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 &&
3592dd4f32aeSBjoern A. Zeeb encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 &&
3593dd4f32aeSBjoern A. Zeeb encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 &&
3594dd4f32aeSBjoern A. Zeeb encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256)
3595dd4f32aeSBjoern A. Zeeb return true;
3596dd4f32aeSBjoern A. Zeeb
3597dd4f32aeSBjoern A. Zeeb last_pn = ath11k_dp_rx_h_get_pn(ar, first_frag);
3598dd4f32aeSBjoern A. Zeeb skb_queue_walk(&rx_tid->rx_frags, skb) {
3599dd4f32aeSBjoern A. Zeeb if (skb == first_frag)
3600dd4f32aeSBjoern A. Zeeb continue;
3601dd4f32aeSBjoern A. Zeeb
3602dd4f32aeSBjoern A. Zeeb cur_pn = ath11k_dp_rx_h_get_pn(ar, skb);
3603dd4f32aeSBjoern A. Zeeb if (cur_pn != last_pn + 1)
3604dd4f32aeSBjoern A. Zeeb return false;
3605dd4f32aeSBjoern A. Zeeb last_pn = cur_pn;
3606dd4f32aeSBjoern A. Zeeb }
3607dd4f32aeSBjoern A. Zeeb return true;
3608dd4f32aeSBjoern A. Zeeb }
3609dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_frag_h_mpdu(struct ath11k * ar,struct sk_buff * msdu,u32 * ring_desc)3610dd4f32aeSBjoern A. Zeeb static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
3611dd4f32aeSBjoern A. Zeeb struct sk_buff *msdu,
3612dd4f32aeSBjoern A. Zeeb u32 *ring_desc)
3613dd4f32aeSBjoern A. Zeeb {
3614dd4f32aeSBjoern A. Zeeb struct ath11k_base *ab = ar->ab;
3615dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *rx_desc;
3616dd4f32aeSBjoern A. Zeeb struct ath11k_peer *peer;
3617dd4f32aeSBjoern A. Zeeb struct dp_rx_tid *rx_tid;
3618dd4f32aeSBjoern A. Zeeb struct sk_buff *defrag_skb = NULL;
3619dd4f32aeSBjoern A. Zeeb u32 peer_id;
3620dd4f32aeSBjoern A. Zeeb u16 seqno, frag_no;
3621dd4f32aeSBjoern A. Zeeb u8 tid;
3622dd4f32aeSBjoern A. Zeeb int ret = 0;
3623dd4f32aeSBjoern A. Zeeb bool more_frags;
3624dd4f32aeSBjoern A. Zeeb bool is_mcbc;
3625dd4f32aeSBjoern A. Zeeb
3626dd4f32aeSBjoern A. Zeeb rx_desc = (struct hal_rx_desc *)msdu->data;
3627dd4f32aeSBjoern A. Zeeb peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
3628dd4f32aeSBjoern A. Zeeb tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, rx_desc);
3629dd4f32aeSBjoern A. Zeeb seqno = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
3630dd4f32aeSBjoern A. Zeeb frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, msdu);
3631dd4f32aeSBjoern A. Zeeb more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(ar->ab, msdu);
3632dd4f32aeSBjoern A. Zeeb is_mcbc = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);
3633dd4f32aeSBjoern A. Zeeb
3634dd4f32aeSBjoern A. Zeeb /* Multicast/Broadcast fragments are not expected */
3635dd4f32aeSBjoern A. Zeeb if (is_mcbc)
3636dd4f32aeSBjoern A. Zeeb return -EINVAL;
3637dd4f32aeSBjoern A. Zeeb
3638dd4f32aeSBjoern A. Zeeb if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(ar->ab, rx_desc) ||
3639dd4f32aeSBjoern A. Zeeb !ath11k_dp_rx_h_mpdu_start_fc_valid(ar->ab, rx_desc) ||
3640dd4f32aeSBjoern A. Zeeb tid > IEEE80211_NUM_TIDS)
3641dd4f32aeSBjoern A. Zeeb return -EINVAL;
3642dd4f32aeSBjoern A. Zeeb
3643dd4f32aeSBjoern A. Zeeb /* received unfragmented packet in reo
3644dd4f32aeSBjoern A. Zeeb * exception ring, this shouldn't happen
3645dd4f32aeSBjoern A. Zeeb * as these packets typically come from
3646dd4f32aeSBjoern A. Zeeb * reo2sw srngs.
3647dd4f32aeSBjoern A. Zeeb */
3648dd4f32aeSBjoern A. Zeeb if (WARN_ON_ONCE(!frag_no && !more_frags))
3649dd4f32aeSBjoern A. Zeeb return -EINVAL;
3650dd4f32aeSBjoern A. Zeeb
3651dd4f32aeSBjoern A. Zeeb spin_lock_bh(&ab->base_lock);
3652dd4f32aeSBjoern A. Zeeb peer = ath11k_peer_find_by_id(ab, peer_id);
3653dd4f32aeSBjoern A. Zeeb if (!peer) {
3654dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n",
3655dd4f32aeSBjoern A. Zeeb peer_id);
3656dd4f32aeSBjoern A. Zeeb ret = -ENOENT;
3657dd4f32aeSBjoern A. Zeeb goto out_unlock;
3658dd4f32aeSBjoern A. Zeeb }
3659*28348caeSBjoern A. Zeeb if (!peer->dp_setup_done) {
3660*28348caeSBjoern A. Zeeb ath11k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n",
3661*28348caeSBjoern A. Zeeb peer->addr, peer_id);
3662*28348caeSBjoern A. Zeeb ret = -ENOENT;
3663*28348caeSBjoern A. Zeeb goto out_unlock;
3664*28348caeSBjoern A. Zeeb }
3665*28348caeSBjoern A. Zeeb
3666dd4f32aeSBjoern A. Zeeb rx_tid = &peer->rx_tid[tid];
3667dd4f32aeSBjoern A. Zeeb
3668dd4f32aeSBjoern A. Zeeb if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
3669dd4f32aeSBjoern A. Zeeb skb_queue_empty(&rx_tid->rx_frags)) {
3670dd4f32aeSBjoern A. Zeeb /* Flush stored fragments and start a new sequence */
3671dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_frags_cleanup(rx_tid, true);
3672dd4f32aeSBjoern A. Zeeb rx_tid->cur_sn = seqno;
3673dd4f32aeSBjoern A. Zeeb }
3674dd4f32aeSBjoern A. Zeeb
3675dd4f32aeSBjoern A. Zeeb if (rx_tid->rx_frag_bitmap & BIT(frag_no)) {
3676dd4f32aeSBjoern A. Zeeb /* Fragment already present */
3677dd4f32aeSBjoern A. Zeeb ret = -EINVAL;
3678dd4f32aeSBjoern A. Zeeb goto out_unlock;
3679dd4f32aeSBjoern A. Zeeb }
3680dd4f32aeSBjoern A. Zeeb
3681*28348caeSBjoern A. Zeeb if (!rx_tid->rx_frag_bitmap || (frag_no > __fls(rx_tid->rx_frag_bitmap)))
3682dd4f32aeSBjoern A. Zeeb __skb_queue_tail(&rx_tid->rx_frags, msdu);
3683dd4f32aeSBjoern A. Zeeb else
3684dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_h_sort_frags(ar, &rx_tid->rx_frags, msdu);
3685dd4f32aeSBjoern A. Zeeb
3686dd4f32aeSBjoern A. Zeeb rx_tid->rx_frag_bitmap |= BIT(frag_no);
3687dd4f32aeSBjoern A. Zeeb if (!more_frags)
3688dd4f32aeSBjoern A. Zeeb rx_tid->last_frag_no = frag_no;
3689dd4f32aeSBjoern A. Zeeb
3690dd4f32aeSBjoern A. Zeeb if (frag_no == 0) {
3691dd4f32aeSBjoern A. Zeeb rx_tid->dst_ring_desc = kmemdup(ring_desc,
3692dd4f32aeSBjoern A. Zeeb sizeof(*rx_tid->dst_ring_desc),
3693dd4f32aeSBjoern A. Zeeb GFP_ATOMIC);
3694dd4f32aeSBjoern A. Zeeb if (!rx_tid->dst_ring_desc) {
3695dd4f32aeSBjoern A. Zeeb ret = -ENOMEM;
3696dd4f32aeSBjoern A. Zeeb goto out_unlock;
3697dd4f32aeSBjoern A. Zeeb }
3698dd4f32aeSBjoern A. Zeeb } else {
3699dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_link_desc_return(ab, ring_desc,
3700dd4f32aeSBjoern A. Zeeb HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3701dd4f32aeSBjoern A. Zeeb }
3702dd4f32aeSBjoern A. Zeeb
3703dd4f32aeSBjoern A. Zeeb if (!rx_tid->last_frag_no ||
3704dd4f32aeSBjoern A. Zeeb rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) {
3705dd4f32aeSBjoern A. Zeeb mod_timer(&rx_tid->frag_timer, jiffies +
3706dd4f32aeSBjoern A. Zeeb ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS);
3707dd4f32aeSBjoern A. Zeeb goto out_unlock;
3708dd4f32aeSBjoern A. Zeeb }
3709dd4f32aeSBjoern A. Zeeb
3710dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ab->base_lock);
3711dd4f32aeSBjoern A. Zeeb del_timer_sync(&rx_tid->frag_timer);
3712dd4f32aeSBjoern A. Zeeb spin_lock_bh(&ab->base_lock);
3713dd4f32aeSBjoern A. Zeeb
3714dd4f32aeSBjoern A. Zeeb peer = ath11k_peer_find_by_id(ab, peer_id);
3715dd4f32aeSBjoern A. Zeeb if (!peer)
3716dd4f32aeSBjoern A. Zeeb goto err_frags_cleanup;
3717dd4f32aeSBjoern A. Zeeb
3718dd4f32aeSBjoern A. Zeeb if (!ath11k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid))
3719dd4f32aeSBjoern A. Zeeb goto err_frags_cleanup;
3720dd4f32aeSBjoern A. Zeeb
3721dd4f32aeSBjoern A. Zeeb if (ath11k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb))
3722dd4f32aeSBjoern A. Zeeb goto err_frags_cleanup;
3723dd4f32aeSBjoern A. Zeeb
3724dd4f32aeSBjoern A. Zeeb if (!defrag_skb)
3725dd4f32aeSBjoern A. Zeeb goto err_frags_cleanup;
3726dd4f32aeSBjoern A. Zeeb
3727dd4f32aeSBjoern A. Zeeb if (ath11k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb))
3728dd4f32aeSBjoern A. Zeeb goto err_frags_cleanup;
3729dd4f32aeSBjoern A. Zeeb
3730dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_frags_cleanup(rx_tid, false);
3731dd4f32aeSBjoern A. Zeeb goto out_unlock;
3732dd4f32aeSBjoern A. Zeeb
3733dd4f32aeSBjoern A. Zeeb err_frags_cleanup:
3734dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(defrag_skb);
3735dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_frags_cleanup(rx_tid, true);
3736dd4f32aeSBjoern A. Zeeb out_unlock:
3737dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ab->base_lock);
3738dd4f32aeSBjoern A. Zeeb return ret;
3739dd4f32aeSBjoern A. Zeeb }
3740dd4f32aeSBjoern A. Zeeb
3741dd4f32aeSBjoern A. Zeeb static int
ath11k_dp_process_rx_err_buf(struct ath11k * ar,u32 * ring_desc,int buf_id,bool drop)3742dd4f32aeSBjoern A. Zeeb ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool drop)
3743dd4f32aeSBjoern A. Zeeb {
3744dd4f32aeSBjoern A. Zeeb struct ath11k_pdev_dp *dp = &ar->dp;
3745dd4f32aeSBjoern A. Zeeb struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
3746dd4f32aeSBjoern A. Zeeb struct sk_buff *msdu;
3747dd4f32aeSBjoern A. Zeeb struct ath11k_skb_rxcb *rxcb;
3748dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *rx_desc;
3749dd4f32aeSBjoern A. Zeeb u8 *hdr_status;
3750dd4f32aeSBjoern A. Zeeb u16 msdu_len;
3751dd4f32aeSBjoern A. Zeeb u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3752dd4f32aeSBjoern A. Zeeb
3753dd4f32aeSBjoern A. Zeeb spin_lock_bh(&rx_ring->idr_lock);
3754dd4f32aeSBjoern A. Zeeb msdu = idr_find(&rx_ring->bufs_idr, buf_id);
3755dd4f32aeSBjoern A. Zeeb if (!msdu) {
3756dd4f32aeSBjoern A. Zeeb ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n",
3757dd4f32aeSBjoern A. Zeeb buf_id);
3758dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&rx_ring->idr_lock);
3759dd4f32aeSBjoern A. Zeeb return -EINVAL;
3760dd4f32aeSBjoern A. Zeeb }
3761dd4f32aeSBjoern A. Zeeb
3762dd4f32aeSBjoern A. Zeeb idr_remove(&rx_ring->bufs_idr, buf_id);
3763dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&rx_ring->idr_lock);
3764dd4f32aeSBjoern A. Zeeb
3765dd4f32aeSBjoern A. Zeeb rxcb = ATH11K_SKB_RXCB(msdu);
3766dd4f32aeSBjoern A. Zeeb dma_unmap_single(ar->ab->dev, rxcb->paddr,
3767dd4f32aeSBjoern A. Zeeb msdu->len + skb_tailroom(msdu),
3768dd4f32aeSBjoern A. Zeeb DMA_FROM_DEVICE);
3769dd4f32aeSBjoern A. Zeeb
3770dd4f32aeSBjoern A. Zeeb if (drop) {
3771dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(msdu);
3772dd4f32aeSBjoern A. Zeeb return 0;
3773dd4f32aeSBjoern A. Zeeb }
3774dd4f32aeSBjoern A. Zeeb
3775dd4f32aeSBjoern A. Zeeb rcu_read_lock();
3776dd4f32aeSBjoern A. Zeeb if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
3777dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(msdu);
3778dd4f32aeSBjoern A. Zeeb goto exit;
3779dd4f32aeSBjoern A. Zeeb }
3780dd4f32aeSBjoern A. Zeeb
3781dd4f32aeSBjoern A. Zeeb if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
3782dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(msdu);
3783dd4f32aeSBjoern A. Zeeb goto exit;
3784dd4f32aeSBjoern A. Zeeb }
3785dd4f32aeSBjoern A. Zeeb
3786dd4f32aeSBjoern A. Zeeb rx_desc = (struct hal_rx_desc *)msdu->data;
3787dd4f32aeSBjoern A. Zeeb msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, rx_desc);
3788dd4f32aeSBjoern A. Zeeb if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
3789dd4f32aeSBjoern A. Zeeb hdr_status = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
3790dd4f32aeSBjoern A. Zeeb ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
3791dd4f32aeSBjoern A. Zeeb ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
3792dd4f32aeSBjoern A. Zeeb sizeof(struct ieee80211_hdr));
3793dd4f32aeSBjoern A. Zeeb ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
3794dd4f32aeSBjoern A. Zeeb sizeof(struct hal_rx_desc));
3795dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(msdu);
3796dd4f32aeSBjoern A. Zeeb goto exit;
3797dd4f32aeSBjoern A. Zeeb }
3798dd4f32aeSBjoern A. Zeeb
3799dd4f32aeSBjoern A. Zeeb skb_put(msdu, hal_rx_desc_sz + msdu_len);
3800dd4f32aeSBjoern A. Zeeb
3801dd4f32aeSBjoern A. Zeeb if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) {
3802dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(msdu);
3803dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_link_desc_return(ar->ab, ring_desc,
3804dd4f32aeSBjoern A. Zeeb HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3805dd4f32aeSBjoern A. Zeeb }
3806dd4f32aeSBjoern A. Zeeb exit:
3807dd4f32aeSBjoern A. Zeeb rcu_read_unlock();
3808dd4f32aeSBjoern A. Zeeb return 0;
3809dd4f32aeSBjoern A. Zeeb }
3810dd4f32aeSBjoern A. Zeeb
ath11k_dp_process_rx_err(struct ath11k_base * ab,struct napi_struct * napi,int budget)3811dd4f32aeSBjoern A. Zeeb int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
3812dd4f32aeSBjoern A. Zeeb int budget)
3813dd4f32aeSBjoern A. Zeeb {
3814dd4f32aeSBjoern A. Zeeb u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
3815dd4f32aeSBjoern A. Zeeb struct dp_link_desc_bank *link_desc_banks;
3816dd4f32aeSBjoern A. Zeeb enum hal_rx_buf_return_buf_manager rbm;
3817dd4f32aeSBjoern A. Zeeb int tot_n_bufs_reaped, quota, ret, i;
3818dd4f32aeSBjoern A. Zeeb int n_bufs_reaped[MAX_RADIOS] = {0};
3819dd4f32aeSBjoern A. Zeeb struct dp_rxdma_ring *rx_ring;
3820dd4f32aeSBjoern A. Zeeb struct dp_srng *reo_except;
3821dd4f32aeSBjoern A. Zeeb u32 desc_bank, num_msdus;
3822dd4f32aeSBjoern A. Zeeb struct hal_srng *srng;
3823dd4f32aeSBjoern A. Zeeb struct ath11k_dp *dp;
3824dd4f32aeSBjoern A. Zeeb void *link_desc_va;
3825dd4f32aeSBjoern A. Zeeb int buf_id, mac_id;
3826dd4f32aeSBjoern A. Zeeb struct ath11k *ar;
3827dd4f32aeSBjoern A. Zeeb dma_addr_t paddr;
3828dd4f32aeSBjoern A. Zeeb u32 *desc;
3829dd4f32aeSBjoern A. Zeeb bool is_frag;
3830dd4f32aeSBjoern A. Zeeb u8 drop = 0;
3831dd4f32aeSBjoern A. Zeeb
3832dd4f32aeSBjoern A. Zeeb tot_n_bufs_reaped = 0;
3833dd4f32aeSBjoern A. Zeeb quota = budget;
3834dd4f32aeSBjoern A. Zeeb
3835dd4f32aeSBjoern A. Zeeb dp = &ab->dp;
3836dd4f32aeSBjoern A. Zeeb reo_except = &dp->reo_except_ring;
3837dd4f32aeSBjoern A. Zeeb link_desc_banks = dp->link_desc_banks;
3838dd4f32aeSBjoern A. Zeeb
3839dd4f32aeSBjoern A. Zeeb srng = &ab->hal.srng_list[reo_except->ring_id];
3840dd4f32aeSBjoern A. Zeeb
3841dd4f32aeSBjoern A. Zeeb spin_lock_bh(&srng->lock);
3842dd4f32aeSBjoern A. Zeeb
3843dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_begin(ab, srng);
3844dd4f32aeSBjoern A. Zeeb
3845dd4f32aeSBjoern A. Zeeb while (budget &&
3846dd4f32aeSBjoern A. Zeeb (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
3847dd4f32aeSBjoern A. Zeeb struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc;
3848dd4f32aeSBjoern A. Zeeb
3849dd4f32aeSBjoern A. Zeeb ab->soc_stats.err_ring_pkts++;
3850dd4f32aeSBjoern A. Zeeb ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr,
3851dd4f32aeSBjoern A. Zeeb &desc_bank);
3852dd4f32aeSBjoern A. Zeeb if (ret) {
3853dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "failed to parse error reo desc %d\n",
3854dd4f32aeSBjoern A. Zeeb ret);
3855dd4f32aeSBjoern A. Zeeb continue;
3856dd4f32aeSBjoern A. Zeeb }
3857dd4f32aeSBjoern A. Zeeb #if defined(__linux__)
3858dd4f32aeSBjoern A. Zeeb link_desc_va = link_desc_banks[desc_bank].vaddr +
3859dd4f32aeSBjoern A. Zeeb #elif defined(__FreeBSD__)
3860dd4f32aeSBjoern A. Zeeb link_desc_va = (u8 *)link_desc_banks[desc_bank].vaddr +
3861dd4f32aeSBjoern A. Zeeb #endif
3862dd4f32aeSBjoern A. Zeeb (paddr - link_desc_banks[desc_bank].paddr);
3863dd4f32aeSBjoern A. Zeeb ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
3864dd4f32aeSBjoern A. Zeeb &rbm);
3865dd4f32aeSBjoern A. Zeeb if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
3866dd4f32aeSBjoern A. Zeeb rbm != HAL_RX_BUF_RBM_SW3_BM) {
3867dd4f32aeSBjoern A. Zeeb ab->soc_stats.invalid_rbm++;
3868dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "invalid return buffer manager %d\n", rbm);
3869dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_link_desc_return(ab, desc,
3870dd4f32aeSBjoern A. Zeeb HAL_WBM_REL_BM_ACT_REL_MSDU);
3871dd4f32aeSBjoern A. Zeeb continue;
3872dd4f32aeSBjoern A. Zeeb }
3873dd4f32aeSBjoern A. Zeeb
3874dd4f32aeSBjoern A. Zeeb is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG);
3875dd4f32aeSBjoern A. Zeeb
3876dd4f32aeSBjoern A. Zeeb /* Process only rx fragments with one msdu per link desc below, and drop
3877dd4f32aeSBjoern A. Zeeb * msdu's indicated due to error reasons.
3878dd4f32aeSBjoern A. Zeeb */
3879dd4f32aeSBjoern A. Zeeb if (!is_frag || num_msdus > 1) {
3880dd4f32aeSBjoern A. Zeeb drop = 1;
3881dd4f32aeSBjoern A. Zeeb /* Return the link desc back to wbm idle list */
3882dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_link_desc_return(ab, desc,
3883dd4f32aeSBjoern A. Zeeb HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3884dd4f32aeSBjoern A. Zeeb }
3885dd4f32aeSBjoern A. Zeeb
3886dd4f32aeSBjoern A. Zeeb for (i = 0; i < num_msdus; i++) {
3887dd4f32aeSBjoern A. Zeeb buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
3888dd4f32aeSBjoern A. Zeeb msdu_cookies[i]);
3889dd4f32aeSBjoern A. Zeeb
3890dd4f32aeSBjoern A. Zeeb mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID,
3891dd4f32aeSBjoern A. Zeeb msdu_cookies[i]);
3892dd4f32aeSBjoern A. Zeeb
3893dd4f32aeSBjoern A. Zeeb ar = ab->pdevs[mac_id].ar;
3894dd4f32aeSBjoern A. Zeeb
3895dd4f32aeSBjoern A. Zeeb if (!ath11k_dp_process_rx_err_buf(ar, desc, buf_id, drop)) {
3896dd4f32aeSBjoern A. Zeeb n_bufs_reaped[mac_id]++;
3897dd4f32aeSBjoern A. Zeeb tot_n_bufs_reaped++;
3898dd4f32aeSBjoern A. Zeeb }
3899dd4f32aeSBjoern A. Zeeb }
3900dd4f32aeSBjoern A. Zeeb
3901dd4f32aeSBjoern A. Zeeb if (tot_n_bufs_reaped >= quota) {
3902dd4f32aeSBjoern A. Zeeb tot_n_bufs_reaped = quota;
3903dd4f32aeSBjoern A. Zeeb goto exit;
3904dd4f32aeSBjoern A. Zeeb }
3905dd4f32aeSBjoern A. Zeeb
3906dd4f32aeSBjoern A. Zeeb budget = quota - tot_n_bufs_reaped;
3907dd4f32aeSBjoern A. Zeeb }
3908dd4f32aeSBjoern A. Zeeb
3909dd4f32aeSBjoern A. Zeeb exit:
3910dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_end(ab, srng);
3911dd4f32aeSBjoern A. Zeeb
3912dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&srng->lock);
3913dd4f32aeSBjoern A. Zeeb
3914dd4f32aeSBjoern A. Zeeb for (i = 0; i < ab->num_radios; i++) {
3915dd4f32aeSBjoern A. Zeeb if (!n_bufs_reaped[i])
3916dd4f32aeSBjoern A. Zeeb continue;
3917dd4f32aeSBjoern A. Zeeb
3918dd4f32aeSBjoern A. Zeeb ar = ab->pdevs[i].ar;
3919dd4f32aeSBjoern A. Zeeb rx_ring = &ar->dp.rx_refill_buf_ring;
3920dd4f32aeSBjoern A. Zeeb
3921dd4f32aeSBjoern A. Zeeb ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i],
3922dd4f32aeSBjoern A. Zeeb ab->hw_params.hal_params->rx_buf_rbm);
3923dd4f32aeSBjoern A. Zeeb }
3924dd4f32aeSBjoern A. Zeeb
3925dd4f32aeSBjoern A. Zeeb return tot_n_bufs_reaped;
3926dd4f32aeSBjoern A. Zeeb }
3927dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k * ar,int msdu_len,struct sk_buff_head * msdu_list)3928dd4f32aeSBjoern A. Zeeb static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar,
3929dd4f32aeSBjoern A. Zeeb int msdu_len,
3930dd4f32aeSBjoern A. Zeeb struct sk_buff_head *msdu_list)
3931dd4f32aeSBjoern A. Zeeb {
3932dd4f32aeSBjoern A. Zeeb struct sk_buff *skb, *tmp;
3933dd4f32aeSBjoern A. Zeeb struct ath11k_skb_rxcb *rxcb;
3934dd4f32aeSBjoern A. Zeeb int n_buffs;
3935dd4f32aeSBjoern A. Zeeb
3936dd4f32aeSBjoern A. Zeeb n_buffs = DIV_ROUND_UP(msdu_len,
3937dd4f32aeSBjoern A. Zeeb (DP_RX_BUFFER_SIZE - ar->ab->hw_params.hal_desc_sz));
3938dd4f32aeSBjoern A. Zeeb
3939dd4f32aeSBjoern A. Zeeb skb_queue_walk_safe(msdu_list, skb, tmp) {
3940dd4f32aeSBjoern A. Zeeb rxcb = ATH11K_SKB_RXCB(skb);
3941dd4f32aeSBjoern A. Zeeb if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&
3942dd4f32aeSBjoern A. Zeeb rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {
3943dd4f32aeSBjoern A. Zeeb if (!n_buffs)
3944dd4f32aeSBjoern A. Zeeb break;
3945dd4f32aeSBjoern A. Zeeb __skb_unlink(skb, msdu_list);
3946dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(skb);
3947dd4f32aeSBjoern A. Zeeb n_buffs--;
3948dd4f32aeSBjoern A. Zeeb }
3949dd4f32aeSBjoern A. Zeeb }
3950dd4f32aeSBjoern A. Zeeb }
3951dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_null_q_desc(struct ath11k * ar,struct sk_buff * msdu,struct ieee80211_rx_status * status,struct sk_buff_head * msdu_list)3952dd4f32aeSBjoern A. Zeeb static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu,
3953dd4f32aeSBjoern A. Zeeb struct ieee80211_rx_status *status,
3954dd4f32aeSBjoern A. Zeeb struct sk_buff_head *msdu_list)
3955dd4f32aeSBjoern A. Zeeb {
3956dd4f32aeSBjoern A. Zeeb u16 msdu_len;
3957dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3958dd4f32aeSBjoern A. Zeeb struct rx_attention *rx_attention;
3959dd4f32aeSBjoern A. Zeeb u8 l3pad_bytes;
3960dd4f32aeSBjoern A. Zeeb struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3961dd4f32aeSBjoern A. Zeeb u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3962dd4f32aeSBjoern A. Zeeb
3963dd4f32aeSBjoern A. Zeeb msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc);
3964dd4f32aeSBjoern A. Zeeb
3965dd4f32aeSBjoern A. Zeeb if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) {
3966dd4f32aeSBjoern A. Zeeb /* First buffer will be freed by the caller, so deduct it's length */
3967dd4f32aeSBjoern A. Zeeb msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz);
3968dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
3969dd4f32aeSBjoern A. Zeeb return -EINVAL;
3970dd4f32aeSBjoern A. Zeeb }
3971dd4f32aeSBjoern A. Zeeb
3972dd4f32aeSBjoern A. Zeeb rx_attention = ath11k_dp_rx_get_attention(ar->ab, desc);
3973dd4f32aeSBjoern A. Zeeb if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) {
3974dd4f32aeSBjoern A. Zeeb ath11k_warn(ar->ab,
3975dd4f32aeSBjoern A. Zeeb "msdu_done bit not set in null_q_des processing\n");
3976dd4f32aeSBjoern A. Zeeb __skb_queue_purge(msdu_list);
3977dd4f32aeSBjoern A. Zeeb return -EIO;
3978dd4f32aeSBjoern A. Zeeb }
3979dd4f32aeSBjoern A. Zeeb
3980dd4f32aeSBjoern A. Zeeb /* Handle NULL queue descriptor violations arising out a missing
3981dd4f32aeSBjoern A. Zeeb * REO queue for a given peer or a given TID. This typically
3982dd4f32aeSBjoern A. Zeeb * may happen if a packet is received on a QOS enabled TID before the
3983dd4f32aeSBjoern A. Zeeb * ADDBA negotiation for that TID, when the TID queue is setup. Or
3984dd4f32aeSBjoern A. Zeeb * it may also happen for MC/BC frames if they are not routed to the
3985dd4f32aeSBjoern A. Zeeb * non-QOS TID queue, in the absence of any other default TID queue.
3986dd4f32aeSBjoern A. Zeeb * This error can show up both in a REO destination or WBM release ring.
3987dd4f32aeSBjoern A. Zeeb */
3988dd4f32aeSBjoern A. Zeeb
3989dd4f32aeSBjoern A. Zeeb rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc);
3990dd4f32aeSBjoern A. Zeeb rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc);
3991dd4f32aeSBjoern A. Zeeb
3992dd4f32aeSBjoern A. Zeeb if (rxcb->is_frag) {
3993dd4f32aeSBjoern A. Zeeb skb_pull(msdu, hal_rx_desc_sz);
3994dd4f32aeSBjoern A. Zeeb } else {
3995dd4f32aeSBjoern A. Zeeb l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc);
3996dd4f32aeSBjoern A. Zeeb
3997dd4f32aeSBjoern A. Zeeb if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
3998dd4f32aeSBjoern A. Zeeb return -EINVAL;
3999dd4f32aeSBjoern A. Zeeb
4000dd4f32aeSBjoern A. Zeeb skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
4001dd4f32aeSBjoern A. Zeeb skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
4002dd4f32aeSBjoern A. Zeeb }
4003dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_h_ppdu(ar, desc, status);
4004dd4f32aeSBjoern A. Zeeb
4005dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_h_mpdu(ar, msdu, desc, status);
4006dd4f32aeSBjoern A. Zeeb
4007dd4f32aeSBjoern A. Zeeb rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, desc);
4008dd4f32aeSBjoern A. Zeeb
4009dd4f32aeSBjoern A. Zeeb /* Please note that caller will having the access to msdu and completing
4010dd4f32aeSBjoern A. Zeeb * rx with mac80211. Need not worry about cleaning up amsdu_list.
4011dd4f32aeSBjoern A. Zeeb */
4012dd4f32aeSBjoern A. Zeeb
4013dd4f32aeSBjoern A. Zeeb return 0;
4014dd4f32aeSBjoern A. Zeeb }
4015dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_reo_err(struct ath11k * ar,struct sk_buff * msdu,struct ieee80211_rx_status * status,struct sk_buff_head * msdu_list)4016dd4f32aeSBjoern A. Zeeb static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu,
4017dd4f32aeSBjoern A. Zeeb struct ieee80211_rx_status *status,
4018dd4f32aeSBjoern A. Zeeb struct sk_buff_head *msdu_list)
4019dd4f32aeSBjoern A. Zeeb {
4020dd4f32aeSBjoern A. Zeeb struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
4021dd4f32aeSBjoern A. Zeeb bool drop = false;
4022dd4f32aeSBjoern A. Zeeb
4023dd4f32aeSBjoern A. Zeeb ar->ab->soc_stats.reo_error[rxcb->err_code]++;
4024dd4f32aeSBjoern A. Zeeb
4025dd4f32aeSBjoern A. Zeeb switch (rxcb->err_code) {
4026dd4f32aeSBjoern A. Zeeb case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
4027dd4f32aeSBjoern A. Zeeb if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
4028dd4f32aeSBjoern A. Zeeb drop = true;
4029dd4f32aeSBjoern A. Zeeb break;
4030dd4f32aeSBjoern A. Zeeb case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
4031dd4f32aeSBjoern A. Zeeb /* TODO: Do not drop PN failed packets in the driver;
4032dd4f32aeSBjoern A. Zeeb * instead, it is good to drop such packets in mac80211
4033dd4f32aeSBjoern A. Zeeb * after incrementing the replay counters.
4034dd4f32aeSBjoern A. Zeeb */
4035dd4f32aeSBjoern A. Zeeb fallthrough;
4036dd4f32aeSBjoern A. Zeeb default:
4037dd4f32aeSBjoern A. Zeeb /* TODO: Review other errors and process them to mac80211
4038dd4f32aeSBjoern A. Zeeb * as appropriate.
4039dd4f32aeSBjoern A. Zeeb */
4040dd4f32aeSBjoern A. Zeeb drop = true;
4041dd4f32aeSBjoern A. Zeeb break;
4042dd4f32aeSBjoern A. Zeeb }
4043dd4f32aeSBjoern A. Zeeb
4044dd4f32aeSBjoern A. Zeeb return drop;
4045dd4f32aeSBjoern A. Zeeb }
4046dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_tkip_mic_err(struct ath11k * ar,struct sk_buff * msdu,struct ieee80211_rx_status * status)4047dd4f32aeSBjoern A. Zeeb static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu,
4048dd4f32aeSBjoern A. Zeeb struct ieee80211_rx_status *status)
4049dd4f32aeSBjoern A. Zeeb {
4050dd4f32aeSBjoern A. Zeeb u16 msdu_len;
4051dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
4052dd4f32aeSBjoern A. Zeeb u8 l3pad_bytes;
4053dd4f32aeSBjoern A. Zeeb struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
4054dd4f32aeSBjoern A. Zeeb u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
4055dd4f32aeSBjoern A. Zeeb
4056dd4f32aeSBjoern A. Zeeb rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc);
4057dd4f32aeSBjoern A. Zeeb rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc);
4058dd4f32aeSBjoern A. Zeeb
4059dd4f32aeSBjoern A. Zeeb l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc);
4060dd4f32aeSBjoern A. Zeeb msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc);
4061dd4f32aeSBjoern A. Zeeb skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
4062dd4f32aeSBjoern A. Zeeb skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
4063dd4f32aeSBjoern A. Zeeb
4064dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_h_ppdu(ar, desc, status);
4065dd4f32aeSBjoern A. Zeeb
4066dd4f32aeSBjoern A. Zeeb status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
4067dd4f32aeSBjoern A. Zeeb RX_FLAG_DECRYPTED);
4068dd4f32aeSBjoern A. Zeeb
4069dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_h_undecap(ar, msdu, desc,
4070dd4f32aeSBjoern A. Zeeb HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
4071dd4f32aeSBjoern A. Zeeb }
4072dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_h_rxdma_err(struct ath11k * ar,struct sk_buff * msdu,struct ieee80211_rx_status * status)4073dd4f32aeSBjoern A. Zeeb static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar, struct sk_buff *msdu,
4074dd4f32aeSBjoern A. Zeeb struct ieee80211_rx_status *status)
4075dd4f32aeSBjoern A. Zeeb {
4076dd4f32aeSBjoern A. Zeeb struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
4077dd4f32aeSBjoern A. Zeeb bool drop = false;
4078dd4f32aeSBjoern A. Zeeb
4079dd4f32aeSBjoern A. Zeeb ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
4080dd4f32aeSBjoern A. Zeeb
4081dd4f32aeSBjoern A. Zeeb switch (rxcb->err_code) {
4082dd4f32aeSBjoern A. Zeeb case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
4083dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status);
4084dd4f32aeSBjoern A. Zeeb break;
4085dd4f32aeSBjoern A. Zeeb default:
4086dd4f32aeSBjoern A. Zeeb /* TODO: Review other rxdma error code to check if anything is
4087dd4f32aeSBjoern A. Zeeb * worth reporting to mac80211
4088dd4f32aeSBjoern A. Zeeb */
4089dd4f32aeSBjoern A. Zeeb drop = true;
4090dd4f32aeSBjoern A. Zeeb break;
4091dd4f32aeSBjoern A. Zeeb }
4092dd4f32aeSBjoern A. Zeeb
4093dd4f32aeSBjoern A. Zeeb return drop;
4094dd4f32aeSBjoern A. Zeeb }
4095dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_wbm_err(struct ath11k * ar,struct napi_struct * napi,struct sk_buff * msdu,struct sk_buff_head * msdu_list)4096dd4f32aeSBjoern A. Zeeb static void ath11k_dp_rx_wbm_err(struct ath11k *ar,
4097dd4f32aeSBjoern A. Zeeb struct napi_struct *napi,
4098dd4f32aeSBjoern A. Zeeb struct sk_buff *msdu,
4099dd4f32aeSBjoern A. Zeeb struct sk_buff_head *msdu_list)
4100dd4f32aeSBjoern A. Zeeb {
4101dd4f32aeSBjoern A. Zeeb struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
4102dd4f32aeSBjoern A. Zeeb struct ieee80211_rx_status rxs = {0};
4103dd4f32aeSBjoern A. Zeeb bool drop = true;
4104dd4f32aeSBjoern A. Zeeb
4105dd4f32aeSBjoern A. Zeeb switch (rxcb->err_rel_src) {
4106dd4f32aeSBjoern A. Zeeb case HAL_WBM_REL_SRC_MODULE_REO:
4107dd4f32aeSBjoern A. Zeeb drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);
4108dd4f32aeSBjoern A. Zeeb break;
4109dd4f32aeSBjoern A. Zeeb case HAL_WBM_REL_SRC_MODULE_RXDMA:
4110dd4f32aeSBjoern A. Zeeb drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs);
4111dd4f32aeSBjoern A. Zeeb break;
4112dd4f32aeSBjoern A. Zeeb default:
4113dd4f32aeSBjoern A. Zeeb /* msdu will get freed */
4114dd4f32aeSBjoern A. Zeeb break;
4115dd4f32aeSBjoern A. Zeeb }
4116dd4f32aeSBjoern A. Zeeb
4117dd4f32aeSBjoern A. Zeeb if (drop) {
4118dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(msdu);
4119dd4f32aeSBjoern A. Zeeb return;
4120dd4f32aeSBjoern A. Zeeb }
4121dd4f32aeSBjoern A. Zeeb
4122dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs);
4123dd4f32aeSBjoern A. Zeeb }
4124dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_process_wbm_err(struct ath11k_base * ab,struct napi_struct * napi,int budget)4125dd4f32aeSBjoern A. Zeeb int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
4126dd4f32aeSBjoern A. Zeeb struct napi_struct *napi, int budget)
4127dd4f32aeSBjoern A. Zeeb {
4128dd4f32aeSBjoern A. Zeeb struct ath11k *ar;
4129dd4f32aeSBjoern A. Zeeb struct ath11k_dp *dp = &ab->dp;
4130dd4f32aeSBjoern A. Zeeb struct dp_rxdma_ring *rx_ring;
4131dd4f32aeSBjoern A. Zeeb struct hal_rx_wbm_rel_info err_info;
4132dd4f32aeSBjoern A. Zeeb struct hal_srng *srng;
4133dd4f32aeSBjoern A. Zeeb struct sk_buff *msdu;
4134dd4f32aeSBjoern A. Zeeb struct sk_buff_head msdu_list[MAX_RADIOS];
4135dd4f32aeSBjoern A. Zeeb struct ath11k_skb_rxcb *rxcb;
4136dd4f32aeSBjoern A. Zeeb u32 *rx_desc;
4137dd4f32aeSBjoern A. Zeeb int buf_id, mac_id;
4138dd4f32aeSBjoern A. Zeeb int num_buffs_reaped[MAX_RADIOS] = {0};
4139dd4f32aeSBjoern A. Zeeb int total_num_buffs_reaped = 0;
4140dd4f32aeSBjoern A. Zeeb int ret, i;
4141dd4f32aeSBjoern A. Zeeb
4142dd4f32aeSBjoern A. Zeeb for (i = 0; i < ab->num_radios; i++)
4143dd4f32aeSBjoern A. Zeeb __skb_queue_head_init(&msdu_list[i]);
4144dd4f32aeSBjoern A. Zeeb
4145dd4f32aeSBjoern A. Zeeb srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
4146dd4f32aeSBjoern A. Zeeb
4147dd4f32aeSBjoern A. Zeeb spin_lock_bh(&srng->lock);
4148dd4f32aeSBjoern A. Zeeb
4149dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_begin(ab, srng);
4150dd4f32aeSBjoern A. Zeeb
4151dd4f32aeSBjoern A. Zeeb while (budget) {
4152dd4f32aeSBjoern A. Zeeb rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
4153dd4f32aeSBjoern A. Zeeb if (!rx_desc)
4154dd4f32aeSBjoern A. Zeeb break;
4155dd4f32aeSBjoern A. Zeeb
4156dd4f32aeSBjoern A. Zeeb ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);
4157dd4f32aeSBjoern A. Zeeb if (ret) {
4158dd4f32aeSBjoern A. Zeeb ath11k_warn(ab,
4159dd4f32aeSBjoern A. Zeeb "failed to parse rx error in wbm_rel ring desc %d\n",
4160dd4f32aeSBjoern A. Zeeb ret);
4161dd4f32aeSBjoern A. Zeeb continue;
4162dd4f32aeSBjoern A. Zeeb }
4163dd4f32aeSBjoern A. Zeeb
4164dd4f32aeSBjoern A. Zeeb buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie);
4165dd4f32aeSBjoern A. Zeeb mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie);
4166dd4f32aeSBjoern A. Zeeb
4167dd4f32aeSBjoern A. Zeeb ar = ab->pdevs[mac_id].ar;
4168dd4f32aeSBjoern A. Zeeb rx_ring = &ar->dp.rx_refill_buf_ring;
4169dd4f32aeSBjoern A. Zeeb
4170dd4f32aeSBjoern A. Zeeb spin_lock_bh(&rx_ring->idr_lock);
4171dd4f32aeSBjoern A. Zeeb msdu = idr_find(&rx_ring->bufs_idr, buf_id);
4172dd4f32aeSBjoern A. Zeeb if (!msdu) {
4173dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n",
4174dd4f32aeSBjoern A. Zeeb buf_id, mac_id);
4175dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&rx_ring->idr_lock);
4176dd4f32aeSBjoern A. Zeeb continue;
4177dd4f32aeSBjoern A. Zeeb }
4178dd4f32aeSBjoern A. Zeeb
4179dd4f32aeSBjoern A. Zeeb idr_remove(&rx_ring->bufs_idr, buf_id);
4180dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&rx_ring->idr_lock);
4181dd4f32aeSBjoern A. Zeeb
4182dd4f32aeSBjoern A. Zeeb rxcb = ATH11K_SKB_RXCB(msdu);
4183dd4f32aeSBjoern A. Zeeb dma_unmap_single(ab->dev, rxcb->paddr,
4184dd4f32aeSBjoern A. Zeeb msdu->len + skb_tailroom(msdu),
4185dd4f32aeSBjoern A. Zeeb DMA_FROM_DEVICE);
4186dd4f32aeSBjoern A. Zeeb
4187dd4f32aeSBjoern A. Zeeb num_buffs_reaped[mac_id]++;
4188dd4f32aeSBjoern A. Zeeb total_num_buffs_reaped++;
4189dd4f32aeSBjoern A. Zeeb budget--;
4190dd4f32aeSBjoern A. Zeeb
4191dd4f32aeSBjoern A. Zeeb if (err_info.push_reason !=
4192dd4f32aeSBjoern A. Zeeb HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
4193dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(msdu);
4194dd4f32aeSBjoern A. Zeeb continue;
4195dd4f32aeSBjoern A. Zeeb }
4196dd4f32aeSBjoern A. Zeeb
4197dd4f32aeSBjoern A. Zeeb rxcb->err_rel_src = err_info.err_rel_src;
4198dd4f32aeSBjoern A. Zeeb rxcb->err_code = err_info.err_code;
4199dd4f32aeSBjoern A. Zeeb rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
4200dd4f32aeSBjoern A. Zeeb __skb_queue_tail(&msdu_list[mac_id], msdu);
4201dd4f32aeSBjoern A. Zeeb }
4202dd4f32aeSBjoern A. Zeeb
4203dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_end(ab, srng);
4204dd4f32aeSBjoern A. Zeeb
4205dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&srng->lock);
4206dd4f32aeSBjoern A. Zeeb
4207dd4f32aeSBjoern A. Zeeb if (!total_num_buffs_reaped)
4208dd4f32aeSBjoern A. Zeeb goto done;
4209dd4f32aeSBjoern A. Zeeb
4210dd4f32aeSBjoern A. Zeeb for (i = 0; i < ab->num_radios; i++) {
4211dd4f32aeSBjoern A. Zeeb if (!num_buffs_reaped[i])
4212dd4f32aeSBjoern A. Zeeb continue;
4213dd4f32aeSBjoern A. Zeeb
4214dd4f32aeSBjoern A. Zeeb ar = ab->pdevs[i].ar;
4215dd4f32aeSBjoern A. Zeeb rx_ring = &ar->dp.rx_refill_buf_ring;
4216dd4f32aeSBjoern A. Zeeb
4217dd4f32aeSBjoern A. Zeeb ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
4218dd4f32aeSBjoern A. Zeeb ab->hw_params.hal_params->rx_buf_rbm);
4219dd4f32aeSBjoern A. Zeeb }
4220dd4f32aeSBjoern A. Zeeb
4221dd4f32aeSBjoern A. Zeeb rcu_read_lock();
4222dd4f32aeSBjoern A. Zeeb for (i = 0; i < ab->num_radios; i++) {
4223dd4f32aeSBjoern A. Zeeb if (!rcu_dereference(ab->pdevs_active[i])) {
4224dd4f32aeSBjoern A. Zeeb __skb_queue_purge(&msdu_list[i]);
4225dd4f32aeSBjoern A. Zeeb continue;
4226dd4f32aeSBjoern A. Zeeb }
4227dd4f32aeSBjoern A. Zeeb
4228dd4f32aeSBjoern A. Zeeb ar = ab->pdevs[i].ar;
4229dd4f32aeSBjoern A. Zeeb
4230dd4f32aeSBjoern A. Zeeb if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
4231dd4f32aeSBjoern A. Zeeb __skb_queue_purge(&msdu_list[i]);
4232dd4f32aeSBjoern A. Zeeb continue;
4233dd4f32aeSBjoern A. Zeeb }
4234dd4f32aeSBjoern A. Zeeb
4235dd4f32aeSBjoern A. Zeeb while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)
4236dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);
4237dd4f32aeSBjoern A. Zeeb }
4238dd4f32aeSBjoern A. Zeeb rcu_read_unlock();
4239dd4f32aeSBjoern A. Zeeb done:
4240dd4f32aeSBjoern A. Zeeb return total_num_buffs_reaped;
4241dd4f32aeSBjoern A. Zeeb }
4242dd4f32aeSBjoern A. Zeeb
ath11k_dp_process_rxdma_err(struct ath11k_base * ab,int mac_id,int budget)4243dd4f32aeSBjoern A. Zeeb int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget)
4244dd4f32aeSBjoern A. Zeeb {
4245dd4f32aeSBjoern A. Zeeb struct ath11k *ar;
4246dd4f32aeSBjoern A. Zeeb struct dp_srng *err_ring;
4247dd4f32aeSBjoern A. Zeeb struct dp_rxdma_ring *rx_ring;
4248dd4f32aeSBjoern A. Zeeb struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks;
4249dd4f32aeSBjoern A. Zeeb struct hal_srng *srng;
4250dd4f32aeSBjoern A. Zeeb u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
4251dd4f32aeSBjoern A. Zeeb enum hal_rx_buf_return_buf_manager rbm;
4252dd4f32aeSBjoern A. Zeeb enum hal_reo_entr_rxdma_ecode rxdma_err_code;
4253dd4f32aeSBjoern A. Zeeb struct ath11k_skb_rxcb *rxcb;
4254dd4f32aeSBjoern A. Zeeb struct sk_buff *skb;
4255dd4f32aeSBjoern A. Zeeb struct hal_reo_entrance_ring *entr_ring;
4256dd4f32aeSBjoern A. Zeeb void *desc;
4257dd4f32aeSBjoern A. Zeeb int num_buf_freed = 0;
4258dd4f32aeSBjoern A. Zeeb int quota = budget;
4259dd4f32aeSBjoern A. Zeeb dma_addr_t paddr;
4260dd4f32aeSBjoern A. Zeeb u32 desc_bank;
4261dd4f32aeSBjoern A. Zeeb void *link_desc_va;
4262dd4f32aeSBjoern A. Zeeb int num_msdus;
4263dd4f32aeSBjoern A. Zeeb int i;
4264dd4f32aeSBjoern A. Zeeb int buf_id;
4265dd4f32aeSBjoern A. Zeeb
4266dd4f32aeSBjoern A. Zeeb ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
4267dd4f32aeSBjoern A. Zeeb err_ring = &ar->dp.rxdma_err_dst_ring[ath11k_hw_mac_id_to_srng_id(&ab->hw_params,
4268dd4f32aeSBjoern A. Zeeb mac_id)];
4269dd4f32aeSBjoern A. Zeeb rx_ring = &ar->dp.rx_refill_buf_ring;
4270dd4f32aeSBjoern A. Zeeb
4271dd4f32aeSBjoern A. Zeeb srng = &ab->hal.srng_list[err_ring->ring_id];
4272dd4f32aeSBjoern A. Zeeb
4273dd4f32aeSBjoern A. Zeeb spin_lock_bh(&srng->lock);
4274dd4f32aeSBjoern A. Zeeb
4275dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_begin(ab, srng);
4276dd4f32aeSBjoern A. Zeeb
4277dd4f32aeSBjoern A. Zeeb while (quota-- &&
4278dd4f32aeSBjoern A. Zeeb (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
4279dd4f32aeSBjoern A. Zeeb ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank);
4280dd4f32aeSBjoern A. Zeeb
4281dd4f32aeSBjoern A. Zeeb entr_ring = (struct hal_reo_entrance_ring *)desc;
4282dd4f32aeSBjoern A. Zeeb rxdma_err_code =
4283dd4f32aeSBjoern A. Zeeb FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
4284dd4f32aeSBjoern A. Zeeb entr_ring->info1);
4285dd4f32aeSBjoern A. Zeeb ab->soc_stats.rxdma_error[rxdma_err_code]++;
4286dd4f32aeSBjoern A. Zeeb
4287dd4f32aeSBjoern A. Zeeb #if defined(__linux__)
4288dd4f32aeSBjoern A. Zeeb link_desc_va = link_desc_banks[desc_bank].vaddr +
4289dd4f32aeSBjoern A. Zeeb #elif defined(__FreeBSD__)
4290dd4f32aeSBjoern A. Zeeb link_desc_va = (u8 *)link_desc_banks[desc_bank].vaddr +
4291dd4f32aeSBjoern A. Zeeb #endif
4292dd4f32aeSBjoern A. Zeeb (paddr - link_desc_banks[desc_bank].paddr);
4293dd4f32aeSBjoern A. Zeeb ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus,
4294dd4f32aeSBjoern A. Zeeb msdu_cookies, &rbm);
4295dd4f32aeSBjoern A. Zeeb
4296dd4f32aeSBjoern A. Zeeb for (i = 0; i < num_msdus; i++) {
4297dd4f32aeSBjoern A. Zeeb buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
4298dd4f32aeSBjoern A. Zeeb msdu_cookies[i]);
4299dd4f32aeSBjoern A. Zeeb
4300dd4f32aeSBjoern A. Zeeb spin_lock_bh(&rx_ring->idr_lock);
4301dd4f32aeSBjoern A. Zeeb skb = idr_find(&rx_ring->bufs_idr, buf_id);
4302dd4f32aeSBjoern A. Zeeb if (!skb) {
4303dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "rxdma error with invalid buf_id %d\n",
4304dd4f32aeSBjoern A. Zeeb buf_id);
4305dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&rx_ring->idr_lock);
4306dd4f32aeSBjoern A. Zeeb continue;
4307dd4f32aeSBjoern A. Zeeb }
4308dd4f32aeSBjoern A. Zeeb
4309dd4f32aeSBjoern A. Zeeb idr_remove(&rx_ring->bufs_idr, buf_id);
4310dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&rx_ring->idr_lock);
4311dd4f32aeSBjoern A. Zeeb
4312dd4f32aeSBjoern A. Zeeb rxcb = ATH11K_SKB_RXCB(skb);
4313dd4f32aeSBjoern A. Zeeb dma_unmap_single(ab->dev, rxcb->paddr,
4314dd4f32aeSBjoern A. Zeeb skb->len + skb_tailroom(skb),
4315dd4f32aeSBjoern A. Zeeb DMA_FROM_DEVICE);
4316dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(skb);
4317dd4f32aeSBjoern A. Zeeb
4318dd4f32aeSBjoern A. Zeeb num_buf_freed++;
4319dd4f32aeSBjoern A. Zeeb }
4320dd4f32aeSBjoern A. Zeeb
4321dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_link_desc_return(ab, desc,
4322dd4f32aeSBjoern A. Zeeb HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
4323dd4f32aeSBjoern A. Zeeb }
4324dd4f32aeSBjoern A. Zeeb
4325dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_end(ab, srng);
4326dd4f32aeSBjoern A. Zeeb
4327dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&srng->lock);
4328dd4f32aeSBjoern A. Zeeb
4329dd4f32aeSBjoern A. Zeeb if (num_buf_freed)
4330dd4f32aeSBjoern A. Zeeb ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed,
4331dd4f32aeSBjoern A. Zeeb ab->hw_params.hal_params->rx_buf_rbm);
4332dd4f32aeSBjoern A. Zeeb
4333dd4f32aeSBjoern A. Zeeb return budget - quota;
4334dd4f32aeSBjoern A. Zeeb }
4335dd4f32aeSBjoern A. Zeeb
ath11k_dp_process_reo_status(struct ath11k_base * ab)4336dd4f32aeSBjoern A. Zeeb void ath11k_dp_process_reo_status(struct ath11k_base *ab)
4337dd4f32aeSBjoern A. Zeeb {
4338dd4f32aeSBjoern A. Zeeb struct ath11k_dp *dp = &ab->dp;
4339dd4f32aeSBjoern A. Zeeb struct hal_srng *srng;
4340dd4f32aeSBjoern A. Zeeb struct dp_reo_cmd *cmd, *tmp;
4341dd4f32aeSBjoern A. Zeeb bool found = false;
4342dd4f32aeSBjoern A. Zeeb u32 *reo_desc;
4343dd4f32aeSBjoern A. Zeeb u16 tag;
4344dd4f32aeSBjoern A. Zeeb struct hal_reo_status reo_status;
4345dd4f32aeSBjoern A. Zeeb
4346dd4f32aeSBjoern A. Zeeb srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];
4347dd4f32aeSBjoern A. Zeeb
4348dd4f32aeSBjoern A. Zeeb memset(&reo_status, 0, sizeof(reo_status));
4349dd4f32aeSBjoern A. Zeeb
4350dd4f32aeSBjoern A. Zeeb spin_lock_bh(&srng->lock);
4351dd4f32aeSBjoern A. Zeeb
4352dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_begin(ab, srng);
4353dd4f32aeSBjoern A. Zeeb
4354dd4f32aeSBjoern A. Zeeb while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
4355dd4f32aeSBjoern A. Zeeb tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc);
4356dd4f32aeSBjoern A. Zeeb
4357dd4f32aeSBjoern A. Zeeb switch (tag) {
4358dd4f32aeSBjoern A. Zeeb case HAL_REO_GET_QUEUE_STATS_STATUS:
4359dd4f32aeSBjoern A. Zeeb ath11k_hal_reo_status_queue_stats(ab, reo_desc,
4360dd4f32aeSBjoern A. Zeeb &reo_status);
4361dd4f32aeSBjoern A. Zeeb break;
4362dd4f32aeSBjoern A. Zeeb case HAL_REO_FLUSH_QUEUE_STATUS:
4363dd4f32aeSBjoern A. Zeeb ath11k_hal_reo_flush_queue_status(ab, reo_desc,
4364dd4f32aeSBjoern A. Zeeb &reo_status);
4365dd4f32aeSBjoern A. Zeeb break;
4366dd4f32aeSBjoern A. Zeeb case HAL_REO_FLUSH_CACHE_STATUS:
4367dd4f32aeSBjoern A. Zeeb ath11k_hal_reo_flush_cache_status(ab, reo_desc,
4368dd4f32aeSBjoern A. Zeeb &reo_status);
4369dd4f32aeSBjoern A. Zeeb break;
4370dd4f32aeSBjoern A. Zeeb case HAL_REO_UNBLOCK_CACHE_STATUS:
4371dd4f32aeSBjoern A. Zeeb ath11k_hal_reo_unblk_cache_status(ab, reo_desc,
4372dd4f32aeSBjoern A. Zeeb &reo_status);
4373dd4f32aeSBjoern A. Zeeb break;
4374dd4f32aeSBjoern A. Zeeb case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
4375dd4f32aeSBjoern A. Zeeb ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc,
4376dd4f32aeSBjoern A. Zeeb &reo_status);
4377dd4f32aeSBjoern A. Zeeb break;
4378dd4f32aeSBjoern A. Zeeb case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
4379dd4f32aeSBjoern A. Zeeb ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc,
4380dd4f32aeSBjoern A. Zeeb &reo_status);
4381dd4f32aeSBjoern A. Zeeb break;
4382dd4f32aeSBjoern A. Zeeb case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
4383dd4f32aeSBjoern A. Zeeb ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc,
4384dd4f32aeSBjoern A. Zeeb &reo_status);
4385dd4f32aeSBjoern A. Zeeb break;
4386dd4f32aeSBjoern A. Zeeb default:
4387dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "Unknown reo status type %d\n", tag);
4388dd4f32aeSBjoern A. Zeeb continue;
4389dd4f32aeSBjoern A. Zeeb }
4390dd4f32aeSBjoern A. Zeeb
4391dd4f32aeSBjoern A. Zeeb spin_lock_bh(&dp->reo_cmd_lock);
4392dd4f32aeSBjoern A. Zeeb list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
4393dd4f32aeSBjoern A. Zeeb if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
4394dd4f32aeSBjoern A. Zeeb found = true;
4395dd4f32aeSBjoern A. Zeeb list_del(&cmd->list);
4396dd4f32aeSBjoern A. Zeeb break;
4397dd4f32aeSBjoern A. Zeeb }
4398dd4f32aeSBjoern A. Zeeb }
4399dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&dp->reo_cmd_lock);
4400dd4f32aeSBjoern A. Zeeb
4401dd4f32aeSBjoern A. Zeeb if (found) {
4402dd4f32aeSBjoern A. Zeeb cmd->handler(dp, (void *)&cmd->data,
4403dd4f32aeSBjoern A. Zeeb reo_status.uniform_hdr.cmd_status);
4404dd4f32aeSBjoern A. Zeeb kfree(cmd);
4405dd4f32aeSBjoern A. Zeeb }
4406dd4f32aeSBjoern A. Zeeb
4407dd4f32aeSBjoern A. Zeeb found = false;
4408dd4f32aeSBjoern A. Zeeb }
4409dd4f32aeSBjoern A. Zeeb
4410dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_end(ab, srng);
4411dd4f32aeSBjoern A. Zeeb
4412dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&srng->lock);
4413dd4f32aeSBjoern A. Zeeb }
4414dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_pdev_free(struct ath11k_base * ab,int mac_id)4415dd4f32aeSBjoern A. Zeeb void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id)
4416dd4f32aeSBjoern A. Zeeb {
4417dd4f32aeSBjoern A. Zeeb struct ath11k *ar = ab->pdevs[mac_id].ar;
4418dd4f32aeSBjoern A. Zeeb
4419dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_pdev_srng_free(ar);
4420dd4f32aeSBjoern A. Zeeb ath11k_dp_rxdma_pdev_buf_free(ar);
4421dd4f32aeSBjoern A. Zeeb }
4422dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_pdev_alloc(struct ath11k_base * ab,int mac_id)4423dd4f32aeSBjoern A. Zeeb int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
4424dd4f32aeSBjoern A. Zeeb {
4425dd4f32aeSBjoern A. Zeeb struct ath11k *ar = ab->pdevs[mac_id].ar;
4426dd4f32aeSBjoern A. Zeeb struct ath11k_pdev_dp *dp = &ar->dp;
4427dd4f32aeSBjoern A. Zeeb u32 ring_id;
4428dd4f32aeSBjoern A. Zeeb int i;
4429dd4f32aeSBjoern A. Zeeb int ret;
4430dd4f32aeSBjoern A. Zeeb
4431dd4f32aeSBjoern A. Zeeb ret = ath11k_dp_rx_pdev_srng_alloc(ar);
4432dd4f32aeSBjoern A. Zeeb if (ret) {
4433dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "failed to setup rx srngs\n");
4434dd4f32aeSBjoern A. Zeeb return ret;
4435dd4f32aeSBjoern A. Zeeb }
4436dd4f32aeSBjoern A. Zeeb
4437dd4f32aeSBjoern A. Zeeb ret = ath11k_dp_rxdma_pdev_buf_setup(ar);
4438dd4f32aeSBjoern A. Zeeb if (ret) {
4439dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "failed to setup rxdma ring\n");
4440dd4f32aeSBjoern A. Zeeb return ret;
4441dd4f32aeSBjoern A. Zeeb }
4442dd4f32aeSBjoern A. Zeeb
4443dd4f32aeSBjoern A. Zeeb ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
4444dd4f32aeSBjoern A. Zeeb ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF);
4445dd4f32aeSBjoern A. Zeeb if (ret) {
4446dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",
4447dd4f32aeSBjoern A. Zeeb ret);
4448dd4f32aeSBjoern A. Zeeb return ret;
4449dd4f32aeSBjoern A. Zeeb }
4450dd4f32aeSBjoern A. Zeeb
4451dd4f32aeSBjoern A. Zeeb if (ab->hw_params.rx_mac_buf_ring) {
4452dd4f32aeSBjoern A. Zeeb for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
4453dd4f32aeSBjoern A. Zeeb ring_id = dp->rx_mac_buf_ring[i].ring_id;
4454dd4f32aeSBjoern A. Zeeb ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4455dd4f32aeSBjoern A. Zeeb mac_id + i, HAL_RXDMA_BUF);
4456dd4f32aeSBjoern A. Zeeb if (ret) {
4457dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n",
4458dd4f32aeSBjoern A. Zeeb i, ret);
4459dd4f32aeSBjoern A. Zeeb return ret;
4460dd4f32aeSBjoern A. Zeeb }
4461dd4f32aeSBjoern A. Zeeb }
4462dd4f32aeSBjoern A. Zeeb }
4463dd4f32aeSBjoern A. Zeeb
4464dd4f32aeSBjoern A. Zeeb for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
4465dd4f32aeSBjoern A. Zeeb ring_id = dp->rxdma_err_dst_ring[i].ring_id;
4466dd4f32aeSBjoern A. Zeeb ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4467dd4f32aeSBjoern A. Zeeb mac_id + i, HAL_RXDMA_DST);
4468dd4f32aeSBjoern A. Zeeb if (ret) {
4469dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n",
4470dd4f32aeSBjoern A. Zeeb i, ret);
4471dd4f32aeSBjoern A. Zeeb return ret;
4472dd4f32aeSBjoern A. Zeeb }
4473dd4f32aeSBjoern A. Zeeb }
4474dd4f32aeSBjoern A. Zeeb
4475dd4f32aeSBjoern A. Zeeb if (!ab->hw_params.rxdma1_enable)
4476dd4f32aeSBjoern A. Zeeb goto config_refill_ring;
4477dd4f32aeSBjoern A. Zeeb
4478dd4f32aeSBjoern A. Zeeb ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
4479dd4f32aeSBjoern A. Zeeb ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4480dd4f32aeSBjoern A. Zeeb mac_id, HAL_RXDMA_MONITOR_BUF);
4481dd4f32aeSBjoern A. Zeeb if (ret) {
4482dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
4483dd4f32aeSBjoern A. Zeeb ret);
4484dd4f32aeSBjoern A. Zeeb return ret;
4485dd4f32aeSBjoern A. Zeeb }
4486dd4f32aeSBjoern A. Zeeb ret = ath11k_dp_tx_htt_srng_setup(ab,
4487dd4f32aeSBjoern A. Zeeb dp->rxdma_mon_dst_ring.ring_id,
4488dd4f32aeSBjoern A. Zeeb mac_id, HAL_RXDMA_MONITOR_DST);
4489dd4f32aeSBjoern A. Zeeb if (ret) {
4490dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
4491dd4f32aeSBjoern A. Zeeb ret);
4492dd4f32aeSBjoern A. Zeeb return ret;
4493dd4f32aeSBjoern A. Zeeb }
4494dd4f32aeSBjoern A. Zeeb ret = ath11k_dp_tx_htt_srng_setup(ab,
4495dd4f32aeSBjoern A. Zeeb dp->rxdma_mon_desc_ring.ring_id,
4496dd4f32aeSBjoern A. Zeeb mac_id, HAL_RXDMA_MONITOR_DESC);
4497dd4f32aeSBjoern A. Zeeb if (ret) {
4498dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
4499dd4f32aeSBjoern A. Zeeb ret);
4500dd4f32aeSBjoern A. Zeeb return ret;
4501dd4f32aeSBjoern A. Zeeb }
4502dd4f32aeSBjoern A. Zeeb
4503dd4f32aeSBjoern A. Zeeb config_refill_ring:
4504dd4f32aeSBjoern A. Zeeb for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
4505dd4f32aeSBjoern A. Zeeb ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
4506dd4f32aeSBjoern A. Zeeb ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i,
4507dd4f32aeSBjoern A. Zeeb HAL_RXDMA_MONITOR_STATUS);
4508dd4f32aeSBjoern A. Zeeb if (ret) {
4509dd4f32aeSBjoern A. Zeeb ath11k_warn(ab,
4510dd4f32aeSBjoern A. Zeeb "failed to configure mon_status_refill_ring%d %d\n",
4511dd4f32aeSBjoern A. Zeeb i, ret);
4512dd4f32aeSBjoern A. Zeeb return ret;
4513dd4f32aeSBjoern A. Zeeb }
4514dd4f32aeSBjoern A. Zeeb }
4515dd4f32aeSBjoern A. Zeeb
4516dd4f32aeSBjoern A. Zeeb return 0;
4517dd4f32aeSBjoern A. Zeeb }
4518dd4f32aeSBjoern A. Zeeb
ath11k_dp_mon_set_frag_len(u32 * total_len,u32 * frag_len)4519dd4f32aeSBjoern A. Zeeb static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len)
4520dd4f32aeSBjoern A. Zeeb {
4521dd4f32aeSBjoern A. Zeeb if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) {
4522dd4f32aeSBjoern A. Zeeb *frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc);
4523dd4f32aeSBjoern A. Zeeb *total_len -= *frag_len;
4524dd4f32aeSBjoern A. Zeeb } else {
4525dd4f32aeSBjoern A. Zeeb *frag_len = *total_len;
4526dd4f32aeSBjoern A. Zeeb *total_len = 0;
4527dd4f32aeSBjoern A. Zeeb }
4528dd4f32aeSBjoern A. Zeeb }
4529dd4f32aeSBjoern A. Zeeb
4530dd4f32aeSBjoern A. Zeeb static
ath11k_dp_rx_monitor_link_desc_return(struct ath11k * ar,void * p_last_buf_addr_info,u8 mac_id)4531dd4f32aeSBjoern A. Zeeb int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar,
4532dd4f32aeSBjoern A. Zeeb void *p_last_buf_addr_info,
4533dd4f32aeSBjoern A. Zeeb u8 mac_id)
4534dd4f32aeSBjoern A. Zeeb {
4535dd4f32aeSBjoern A. Zeeb struct ath11k_pdev_dp *dp = &ar->dp;
4536dd4f32aeSBjoern A. Zeeb struct dp_srng *dp_srng;
4537dd4f32aeSBjoern A. Zeeb void *hal_srng;
4538dd4f32aeSBjoern A. Zeeb void *src_srng_desc;
4539dd4f32aeSBjoern A. Zeeb int ret = 0;
4540dd4f32aeSBjoern A. Zeeb
4541dd4f32aeSBjoern A. Zeeb if (ar->ab->hw_params.rxdma1_enable) {
4542dd4f32aeSBjoern A. Zeeb dp_srng = &dp->rxdma_mon_desc_ring;
4543dd4f32aeSBjoern A. Zeeb hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
4544dd4f32aeSBjoern A. Zeeb } else {
4545dd4f32aeSBjoern A. Zeeb dp_srng = &ar->ab->dp.wbm_desc_rel_ring;
4546dd4f32aeSBjoern A. Zeeb hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
4547dd4f32aeSBjoern A. Zeeb }
4548dd4f32aeSBjoern A. Zeeb
4549dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_begin(ar->ab, hal_srng);
4550dd4f32aeSBjoern A. Zeeb
4551dd4f32aeSBjoern A. Zeeb src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng);
4552dd4f32aeSBjoern A. Zeeb
4553dd4f32aeSBjoern A. Zeeb if (src_srng_desc) {
4554dd4f32aeSBjoern A. Zeeb struct ath11k_buffer_addr *src_desc =
4555dd4f32aeSBjoern A. Zeeb (struct ath11k_buffer_addr *)src_srng_desc;
4556dd4f32aeSBjoern A. Zeeb
4557dd4f32aeSBjoern A. Zeeb *src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info);
4558dd4f32aeSBjoern A. Zeeb } else {
4559dd4f32aeSBjoern A. Zeeb ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4560dd4f32aeSBjoern A. Zeeb "Monitor Link Desc Ring %d Full", mac_id);
4561dd4f32aeSBjoern A. Zeeb ret = -ENOMEM;
4562dd4f32aeSBjoern A. Zeeb }
4563dd4f32aeSBjoern A. Zeeb
4564dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_end(ar->ab, hal_srng);
4565dd4f32aeSBjoern A. Zeeb return ret;
4566dd4f32aeSBjoern A. Zeeb }
4567dd4f32aeSBjoern A. Zeeb
4568dd4f32aeSBjoern A. Zeeb static
ath11k_dp_rx_mon_next_link_desc_get(void * rx_msdu_link_desc,dma_addr_t * paddr,u32 * sw_cookie,u8 * rbm,void ** pp_buf_addr_info)4569dd4f32aeSBjoern A. Zeeb void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc,
4570dd4f32aeSBjoern A. Zeeb dma_addr_t *paddr, u32 *sw_cookie,
4571dd4f32aeSBjoern A. Zeeb u8 *rbm,
4572dd4f32aeSBjoern A. Zeeb void **pp_buf_addr_info)
4573dd4f32aeSBjoern A. Zeeb {
4574dd4f32aeSBjoern A. Zeeb struct hal_rx_msdu_link *msdu_link =
4575dd4f32aeSBjoern A. Zeeb (struct hal_rx_msdu_link *)rx_msdu_link_desc;
4576dd4f32aeSBjoern A. Zeeb struct ath11k_buffer_addr *buf_addr_info;
4577dd4f32aeSBjoern A. Zeeb
4578dd4f32aeSBjoern A. Zeeb buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info;
4579dd4f32aeSBjoern A. Zeeb
4580dd4f32aeSBjoern A. Zeeb ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm);
4581dd4f32aeSBjoern A. Zeeb
4582dd4f32aeSBjoern A. Zeeb *pp_buf_addr_info = (void *)buf_addr_info;
4583dd4f32aeSBjoern A. Zeeb }
4584dd4f32aeSBjoern A. Zeeb
ath11k_dp_pkt_set_pktlen(struct sk_buff * skb,u32 len)4585dd4f32aeSBjoern A. Zeeb static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len)
4586dd4f32aeSBjoern A. Zeeb {
4587dd4f32aeSBjoern A. Zeeb if (skb->len > len) {
4588dd4f32aeSBjoern A. Zeeb skb_trim(skb, len);
4589dd4f32aeSBjoern A. Zeeb } else {
4590dd4f32aeSBjoern A. Zeeb if (skb_tailroom(skb) < len - skb->len) {
4591dd4f32aeSBjoern A. Zeeb if ((pskb_expand_head(skb, 0,
4592dd4f32aeSBjoern A. Zeeb len - skb->len - skb_tailroom(skb),
4593dd4f32aeSBjoern A. Zeeb GFP_ATOMIC))) {
4594dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(skb);
4595dd4f32aeSBjoern A. Zeeb return -ENOMEM;
4596dd4f32aeSBjoern A. Zeeb }
4597dd4f32aeSBjoern A. Zeeb }
4598dd4f32aeSBjoern A. Zeeb skb_put(skb, (len - skb->len));
4599dd4f32aeSBjoern A. Zeeb }
4600dd4f32aeSBjoern A. Zeeb return 0;
4601dd4f32aeSBjoern A. Zeeb }
4602dd4f32aeSBjoern A. Zeeb
ath11k_hal_rx_msdu_list_get(struct ath11k * ar,void * msdu_link_desc,struct hal_rx_msdu_list * msdu_list,u16 * num_msdus)4603dd4f32aeSBjoern A. Zeeb static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar,
4604dd4f32aeSBjoern A. Zeeb void *msdu_link_desc,
4605dd4f32aeSBjoern A. Zeeb struct hal_rx_msdu_list *msdu_list,
4606dd4f32aeSBjoern A. Zeeb u16 *num_msdus)
4607dd4f32aeSBjoern A. Zeeb {
4608dd4f32aeSBjoern A. Zeeb struct hal_rx_msdu_details *msdu_details = NULL;
4609dd4f32aeSBjoern A. Zeeb struct rx_msdu_desc *msdu_desc_info = NULL;
4610dd4f32aeSBjoern A. Zeeb struct hal_rx_msdu_link *msdu_link = NULL;
4611dd4f32aeSBjoern A. Zeeb int i;
4612dd4f32aeSBjoern A. Zeeb u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1);
4613dd4f32aeSBjoern A. Zeeb u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1);
4614dd4f32aeSBjoern A. Zeeb u8 tmp = 0;
4615dd4f32aeSBjoern A. Zeeb
4616dd4f32aeSBjoern A. Zeeb msdu_link = (struct hal_rx_msdu_link *)msdu_link_desc;
4617dd4f32aeSBjoern A. Zeeb msdu_details = &msdu_link->msdu_link[0];
4618dd4f32aeSBjoern A. Zeeb
4619dd4f32aeSBjoern A. Zeeb for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) {
4620dd4f32aeSBjoern A. Zeeb if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
4621dd4f32aeSBjoern A. Zeeb msdu_details[i].buf_addr_info.info0) == 0) {
4622dd4f32aeSBjoern A. Zeeb msdu_desc_info = &msdu_details[i - 1].rx_msdu_info;
4623dd4f32aeSBjoern A. Zeeb msdu_desc_info->info0 |= last;
4624dd4f32aeSBjoern A. Zeeb ;
4625dd4f32aeSBjoern A. Zeeb break;
4626dd4f32aeSBjoern A. Zeeb }
4627dd4f32aeSBjoern A. Zeeb msdu_desc_info = &msdu_details[i].rx_msdu_info;
4628dd4f32aeSBjoern A. Zeeb
4629dd4f32aeSBjoern A. Zeeb if (!i)
4630dd4f32aeSBjoern A. Zeeb msdu_desc_info->info0 |= first;
4631dd4f32aeSBjoern A. Zeeb else if (i == (HAL_RX_NUM_MSDU_DESC - 1))
4632dd4f32aeSBjoern A. Zeeb msdu_desc_info->info0 |= last;
4633dd4f32aeSBjoern A. Zeeb msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0;
4634dd4f32aeSBjoern A. Zeeb msdu_list->msdu_info[i].msdu_len =
4635dd4f32aeSBjoern A. Zeeb HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0);
4636dd4f32aeSBjoern A. Zeeb msdu_list->sw_cookie[i] =
4637dd4f32aeSBjoern A. Zeeb FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
4638dd4f32aeSBjoern A. Zeeb msdu_details[i].buf_addr_info.info1);
4639dd4f32aeSBjoern A. Zeeb tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
4640dd4f32aeSBjoern A. Zeeb msdu_details[i].buf_addr_info.info1);
4641dd4f32aeSBjoern A. Zeeb msdu_list->rbm[i] = tmp;
4642dd4f32aeSBjoern A. Zeeb }
4643dd4f32aeSBjoern A. Zeeb *num_msdus = i;
4644dd4f32aeSBjoern A. Zeeb }
4645dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id,u32 * ppdu_id,u32 * rx_bufs_used)4646dd4f32aeSBjoern A. Zeeb static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id,
4647dd4f32aeSBjoern A. Zeeb u32 *rx_bufs_used)
4648dd4f32aeSBjoern A. Zeeb {
4649dd4f32aeSBjoern A. Zeeb u32 ret = 0;
4650dd4f32aeSBjoern A. Zeeb
4651dd4f32aeSBjoern A. Zeeb if ((*ppdu_id < msdu_ppdu_id) &&
4652dd4f32aeSBjoern A. Zeeb ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) {
4653dd4f32aeSBjoern A. Zeeb *ppdu_id = msdu_ppdu_id;
4654dd4f32aeSBjoern A. Zeeb ret = msdu_ppdu_id;
4655dd4f32aeSBjoern A. Zeeb } else if ((*ppdu_id > msdu_ppdu_id) &&
4656dd4f32aeSBjoern A. Zeeb ((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) {
4657dd4f32aeSBjoern A. Zeeb /* mon_dst is behind than mon_status
4658dd4f32aeSBjoern A. Zeeb * skip dst_ring and free it
4659dd4f32aeSBjoern A. Zeeb */
4660dd4f32aeSBjoern A. Zeeb *rx_bufs_used += 1;
4661dd4f32aeSBjoern A. Zeeb *ppdu_id = msdu_ppdu_id;
4662dd4f32aeSBjoern A. Zeeb ret = msdu_ppdu_id;
4663dd4f32aeSBjoern A. Zeeb }
4664dd4f32aeSBjoern A. Zeeb return ret;
4665dd4f32aeSBjoern A. Zeeb }
4666dd4f32aeSBjoern A. Zeeb
ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info * info,bool * is_frag,u32 * total_len,u32 * frag_len,u32 * msdu_cnt)4667dd4f32aeSBjoern A. Zeeb static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
4668dd4f32aeSBjoern A. Zeeb bool *is_frag, u32 *total_len,
4669dd4f32aeSBjoern A. Zeeb u32 *frag_len, u32 *msdu_cnt)
4670dd4f32aeSBjoern A. Zeeb {
4671dd4f32aeSBjoern A. Zeeb if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) {
4672dd4f32aeSBjoern A. Zeeb if (!*is_frag) {
4673dd4f32aeSBjoern A. Zeeb *total_len = info->msdu_len;
4674dd4f32aeSBjoern A. Zeeb *is_frag = true;
4675dd4f32aeSBjoern A. Zeeb }
4676dd4f32aeSBjoern A. Zeeb ath11k_dp_mon_set_frag_len(total_len,
4677dd4f32aeSBjoern A. Zeeb frag_len);
4678dd4f32aeSBjoern A. Zeeb } else {
4679dd4f32aeSBjoern A. Zeeb if (*is_frag) {
4680dd4f32aeSBjoern A. Zeeb ath11k_dp_mon_set_frag_len(total_len,
4681dd4f32aeSBjoern A. Zeeb frag_len);
4682dd4f32aeSBjoern A. Zeeb } else {
4683dd4f32aeSBjoern A. Zeeb *frag_len = info->msdu_len;
4684dd4f32aeSBjoern A. Zeeb }
4685dd4f32aeSBjoern A. Zeeb *is_frag = false;
4686dd4f32aeSBjoern A. Zeeb *msdu_cnt -= 1;
4687dd4f32aeSBjoern A. Zeeb }
4688dd4f32aeSBjoern A. Zeeb }
4689dd4f32aeSBjoern A. Zeeb
4690dd4f32aeSBjoern A. Zeeb static u32
ath11k_dp_rx_mon_mpdu_pop(struct ath11k * ar,int mac_id,void * ring_entry,struct sk_buff ** head_msdu,struct sk_buff ** tail_msdu,u32 * npackets,u32 * ppdu_id)4691dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,
4692dd4f32aeSBjoern A. Zeeb void *ring_entry, struct sk_buff **head_msdu,
4693dd4f32aeSBjoern A. Zeeb struct sk_buff **tail_msdu, u32 *npackets,
4694dd4f32aeSBjoern A. Zeeb u32 *ppdu_id)
4695dd4f32aeSBjoern A. Zeeb {
4696dd4f32aeSBjoern A. Zeeb struct ath11k_pdev_dp *dp = &ar->dp;
4697dd4f32aeSBjoern A. Zeeb struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4698dd4f32aeSBjoern A. Zeeb struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
4699dd4f32aeSBjoern A. Zeeb struct sk_buff *msdu = NULL, *last = NULL;
4700dd4f32aeSBjoern A. Zeeb struct hal_rx_msdu_list msdu_list;
4701dd4f32aeSBjoern A. Zeeb void *p_buf_addr_info, *p_last_buf_addr_info;
4702dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *rx_desc;
4703dd4f32aeSBjoern A. Zeeb void *rx_msdu_link_desc;
4704dd4f32aeSBjoern A. Zeeb dma_addr_t paddr;
4705dd4f32aeSBjoern A. Zeeb u16 num_msdus = 0;
4706dd4f32aeSBjoern A. Zeeb u32 rx_buf_size, rx_pkt_offset, sw_cookie;
4707dd4f32aeSBjoern A. Zeeb u32 rx_bufs_used = 0, i = 0;
4708dd4f32aeSBjoern A. Zeeb u32 msdu_ppdu_id = 0, msdu_cnt = 0;
4709dd4f32aeSBjoern A. Zeeb u32 total_len = 0, frag_len = 0;
4710dd4f32aeSBjoern A. Zeeb bool is_frag, is_first_msdu;
4711dd4f32aeSBjoern A. Zeeb bool drop_mpdu = false;
4712dd4f32aeSBjoern A. Zeeb struct ath11k_skb_rxcb *rxcb;
4713dd4f32aeSBjoern A. Zeeb struct hal_reo_entrance_ring *ent_desc =
4714dd4f32aeSBjoern A. Zeeb (struct hal_reo_entrance_ring *)ring_entry;
4715dd4f32aeSBjoern A. Zeeb int buf_id;
4716dd4f32aeSBjoern A. Zeeb u32 rx_link_buf_info[2];
4717dd4f32aeSBjoern A. Zeeb u8 rbm;
4718dd4f32aeSBjoern A. Zeeb
4719dd4f32aeSBjoern A. Zeeb if (!ar->ab->hw_params.rxdma1_enable)
4720dd4f32aeSBjoern A. Zeeb rx_ring = &dp->rx_refill_buf_ring;
4721dd4f32aeSBjoern A. Zeeb
4722dd4f32aeSBjoern A. Zeeb ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr,
4723dd4f32aeSBjoern A. Zeeb &sw_cookie,
4724dd4f32aeSBjoern A. Zeeb &p_last_buf_addr_info, &rbm,
4725dd4f32aeSBjoern A. Zeeb &msdu_cnt);
4726dd4f32aeSBjoern A. Zeeb
4727dd4f32aeSBjoern A. Zeeb if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON,
4728dd4f32aeSBjoern A. Zeeb ent_desc->info1) ==
4729dd4f32aeSBjoern A. Zeeb HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
4730dd4f32aeSBjoern A. Zeeb u8 rxdma_err =
4731dd4f32aeSBjoern A. Zeeb FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
4732dd4f32aeSBjoern A. Zeeb ent_desc->info1);
4733dd4f32aeSBjoern A. Zeeb if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
4734dd4f32aeSBjoern A. Zeeb rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
4735dd4f32aeSBjoern A. Zeeb rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
4736dd4f32aeSBjoern A. Zeeb drop_mpdu = true;
4737dd4f32aeSBjoern A. Zeeb pmon->rx_mon_stats.dest_mpdu_drop++;
4738dd4f32aeSBjoern A. Zeeb }
4739dd4f32aeSBjoern A. Zeeb }
4740dd4f32aeSBjoern A. Zeeb
4741dd4f32aeSBjoern A. Zeeb is_frag = false;
4742dd4f32aeSBjoern A. Zeeb is_first_msdu = true;
4743dd4f32aeSBjoern A. Zeeb
4744dd4f32aeSBjoern A. Zeeb do {
4745dd4f32aeSBjoern A. Zeeb if (pmon->mon_last_linkdesc_paddr == paddr) {
4746dd4f32aeSBjoern A. Zeeb pmon->rx_mon_stats.dup_mon_linkdesc_cnt++;
4747dd4f32aeSBjoern A. Zeeb return rx_bufs_used;
4748dd4f32aeSBjoern A. Zeeb }
4749dd4f32aeSBjoern A. Zeeb
4750dd4f32aeSBjoern A. Zeeb if (ar->ab->hw_params.rxdma1_enable)
4751dd4f32aeSBjoern A. Zeeb rx_msdu_link_desc =
4752dd4f32aeSBjoern A. Zeeb #if defined(__linux__)
4753dd4f32aeSBjoern A. Zeeb (void *)pmon->link_desc_banks[sw_cookie].vaddr +
4754dd4f32aeSBjoern A. Zeeb #elif defined(__FreeBSD__)
4755dd4f32aeSBjoern A. Zeeb (u8 *)pmon->link_desc_banks[sw_cookie].vaddr +
4756dd4f32aeSBjoern A. Zeeb #endif
4757dd4f32aeSBjoern A. Zeeb (paddr - pmon->link_desc_banks[sw_cookie].paddr);
4758dd4f32aeSBjoern A. Zeeb else
4759dd4f32aeSBjoern A. Zeeb rx_msdu_link_desc =
4760dd4f32aeSBjoern A. Zeeb #if defined(__linux__)
4761dd4f32aeSBjoern A. Zeeb (void *)ar->ab->dp.link_desc_banks[sw_cookie].vaddr +
4762dd4f32aeSBjoern A. Zeeb #elif defined(__FreeBSD__)
4763dd4f32aeSBjoern A. Zeeb (u8 *)ar->ab->dp.link_desc_banks[sw_cookie].vaddr +
4764dd4f32aeSBjoern A. Zeeb #endif
4765dd4f32aeSBjoern A. Zeeb (paddr - ar->ab->dp.link_desc_banks[sw_cookie].paddr);
4766dd4f32aeSBjoern A. Zeeb
4767dd4f32aeSBjoern A. Zeeb ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
4768dd4f32aeSBjoern A. Zeeb &num_msdus);
4769dd4f32aeSBjoern A. Zeeb
4770dd4f32aeSBjoern A. Zeeb for (i = 0; i < num_msdus; i++) {
4771dd4f32aeSBjoern A. Zeeb u32 l2_hdr_offset;
4772dd4f32aeSBjoern A. Zeeb
4773dd4f32aeSBjoern A. Zeeb if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) {
4774dd4f32aeSBjoern A. Zeeb ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4775dd4f32aeSBjoern A. Zeeb "i %d last_cookie %d is same\n",
4776dd4f32aeSBjoern A. Zeeb i, pmon->mon_last_buf_cookie);
4777dd4f32aeSBjoern A. Zeeb drop_mpdu = true;
4778dd4f32aeSBjoern A. Zeeb pmon->rx_mon_stats.dup_mon_buf_cnt++;
4779dd4f32aeSBjoern A. Zeeb continue;
4780dd4f32aeSBjoern A. Zeeb }
4781dd4f32aeSBjoern A. Zeeb buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
4782dd4f32aeSBjoern A. Zeeb msdu_list.sw_cookie[i]);
4783dd4f32aeSBjoern A. Zeeb
4784dd4f32aeSBjoern A. Zeeb spin_lock_bh(&rx_ring->idr_lock);
4785dd4f32aeSBjoern A. Zeeb msdu = idr_find(&rx_ring->bufs_idr, buf_id);
4786dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&rx_ring->idr_lock);
4787dd4f32aeSBjoern A. Zeeb if (!msdu) {
4788dd4f32aeSBjoern A. Zeeb ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4789dd4f32aeSBjoern A. Zeeb "msdu_pop: invalid buf_id %d\n", buf_id);
4790dd4f32aeSBjoern A. Zeeb break;
4791dd4f32aeSBjoern A. Zeeb }
4792dd4f32aeSBjoern A. Zeeb rxcb = ATH11K_SKB_RXCB(msdu);
4793dd4f32aeSBjoern A. Zeeb if (!rxcb->unmapped) {
4794dd4f32aeSBjoern A. Zeeb dma_unmap_single(ar->ab->dev, rxcb->paddr,
4795dd4f32aeSBjoern A. Zeeb msdu->len +
4796dd4f32aeSBjoern A. Zeeb skb_tailroom(msdu),
4797dd4f32aeSBjoern A. Zeeb DMA_FROM_DEVICE);
4798dd4f32aeSBjoern A. Zeeb rxcb->unmapped = 1;
4799dd4f32aeSBjoern A. Zeeb }
4800dd4f32aeSBjoern A. Zeeb if (drop_mpdu) {
4801dd4f32aeSBjoern A. Zeeb ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4802dd4f32aeSBjoern A. Zeeb "i %d drop msdu %p *ppdu_id %x\n",
4803dd4f32aeSBjoern A. Zeeb i, msdu, *ppdu_id);
4804dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(msdu);
4805dd4f32aeSBjoern A. Zeeb msdu = NULL;
4806dd4f32aeSBjoern A. Zeeb goto next_msdu;
4807dd4f32aeSBjoern A. Zeeb }
4808dd4f32aeSBjoern A. Zeeb
4809dd4f32aeSBjoern A. Zeeb rx_desc = (struct hal_rx_desc *)msdu->data;
4810dd4f32aeSBjoern A. Zeeb
4811dd4f32aeSBjoern A. Zeeb rx_pkt_offset = sizeof(struct hal_rx_desc);
4812dd4f32aeSBjoern A. Zeeb l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc);
4813dd4f32aeSBjoern A. Zeeb
4814dd4f32aeSBjoern A. Zeeb if (is_first_msdu) {
4815dd4f32aeSBjoern A. Zeeb if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {
4816dd4f32aeSBjoern A. Zeeb drop_mpdu = true;
4817dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(msdu);
4818dd4f32aeSBjoern A. Zeeb msdu = NULL;
4819dd4f32aeSBjoern A. Zeeb pmon->mon_last_linkdesc_paddr = paddr;
4820dd4f32aeSBjoern A. Zeeb goto next_msdu;
4821dd4f32aeSBjoern A. Zeeb }
4822dd4f32aeSBjoern A. Zeeb
4823dd4f32aeSBjoern A. Zeeb msdu_ppdu_id =
4824dd4f32aeSBjoern A. Zeeb ath11k_dp_rxdesc_get_ppduid(ar->ab, rx_desc);
4825dd4f32aeSBjoern A. Zeeb
4826dd4f32aeSBjoern A. Zeeb if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id,
4827dd4f32aeSBjoern A. Zeeb ppdu_id,
4828dd4f32aeSBjoern A. Zeeb &rx_bufs_used)) {
4829dd4f32aeSBjoern A. Zeeb if (rx_bufs_used) {
4830dd4f32aeSBjoern A. Zeeb drop_mpdu = true;
4831dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(msdu);
4832dd4f32aeSBjoern A. Zeeb msdu = NULL;
4833dd4f32aeSBjoern A. Zeeb goto next_msdu;
4834dd4f32aeSBjoern A. Zeeb }
4835dd4f32aeSBjoern A. Zeeb return rx_bufs_used;
4836dd4f32aeSBjoern A. Zeeb }
4837dd4f32aeSBjoern A. Zeeb pmon->mon_last_linkdesc_paddr = paddr;
4838dd4f32aeSBjoern A. Zeeb is_first_msdu = false;
4839dd4f32aeSBjoern A. Zeeb }
4840dd4f32aeSBjoern A. Zeeb ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
4841dd4f32aeSBjoern A. Zeeb &is_frag, &total_len,
4842dd4f32aeSBjoern A. Zeeb &frag_len, &msdu_cnt);
4843dd4f32aeSBjoern A. Zeeb rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
4844dd4f32aeSBjoern A. Zeeb
4845dd4f32aeSBjoern A. Zeeb ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
4846dd4f32aeSBjoern A. Zeeb
4847dd4f32aeSBjoern A. Zeeb if (!(*head_msdu))
4848dd4f32aeSBjoern A. Zeeb *head_msdu = msdu;
4849dd4f32aeSBjoern A. Zeeb else if (last)
4850dd4f32aeSBjoern A. Zeeb last->next = msdu;
4851dd4f32aeSBjoern A. Zeeb
4852dd4f32aeSBjoern A. Zeeb last = msdu;
4853dd4f32aeSBjoern A. Zeeb next_msdu:
4854dd4f32aeSBjoern A. Zeeb pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i];
4855dd4f32aeSBjoern A. Zeeb rx_bufs_used++;
4856dd4f32aeSBjoern A. Zeeb spin_lock_bh(&rx_ring->idr_lock);
4857dd4f32aeSBjoern A. Zeeb idr_remove(&rx_ring->bufs_idr, buf_id);
4858dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&rx_ring->idr_lock);
4859dd4f32aeSBjoern A. Zeeb }
4860dd4f32aeSBjoern A. Zeeb
4861dd4f32aeSBjoern A. Zeeb ath11k_hal_rx_buf_addr_info_set(rx_link_buf_info, paddr, sw_cookie, rbm);
4862dd4f32aeSBjoern A. Zeeb
4863dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr,
4864dd4f32aeSBjoern A. Zeeb &sw_cookie, &rbm,
4865dd4f32aeSBjoern A. Zeeb &p_buf_addr_info);
4866dd4f32aeSBjoern A. Zeeb
4867dd4f32aeSBjoern A. Zeeb if (ar->ab->hw_params.rxdma1_enable) {
4868dd4f32aeSBjoern A. Zeeb if (ath11k_dp_rx_monitor_link_desc_return(ar,
4869dd4f32aeSBjoern A. Zeeb p_last_buf_addr_info,
4870dd4f32aeSBjoern A. Zeeb dp->mac_id))
4871dd4f32aeSBjoern A. Zeeb ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4872dd4f32aeSBjoern A. Zeeb "dp_rx_monitor_link_desc_return failed");
4873dd4f32aeSBjoern A. Zeeb } else {
4874dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_link_desc_return(ar->ab, rx_link_buf_info,
4875dd4f32aeSBjoern A. Zeeb HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
4876dd4f32aeSBjoern A. Zeeb }
4877dd4f32aeSBjoern A. Zeeb
4878dd4f32aeSBjoern A. Zeeb p_last_buf_addr_info = p_buf_addr_info;
4879dd4f32aeSBjoern A. Zeeb
4880dd4f32aeSBjoern A. Zeeb } while (paddr && msdu_cnt);
4881dd4f32aeSBjoern A. Zeeb
4882dd4f32aeSBjoern A. Zeeb if (last)
4883dd4f32aeSBjoern A. Zeeb last->next = NULL;
4884dd4f32aeSBjoern A. Zeeb
4885dd4f32aeSBjoern A. Zeeb *tail_msdu = msdu;
4886dd4f32aeSBjoern A. Zeeb
4887dd4f32aeSBjoern A. Zeeb if (msdu_cnt == 0)
4888dd4f32aeSBjoern A. Zeeb *npackets = 1;
4889dd4f32aeSBjoern A. Zeeb
4890dd4f32aeSBjoern A. Zeeb return rx_bufs_used;
4891dd4f32aeSBjoern A. Zeeb }
4892dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_msdus_set_payload(struct ath11k * ar,struct sk_buff * msdu)4893dd4f32aeSBjoern A. Zeeb static void ath11k_dp_rx_msdus_set_payload(struct ath11k *ar, struct sk_buff *msdu)
4894dd4f32aeSBjoern A. Zeeb {
4895dd4f32aeSBjoern A. Zeeb u32 rx_pkt_offset, l2_hdr_offset;
4896dd4f32aeSBjoern A. Zeeb
4897dd4f32aeSBjoern A. Zeeb rx_pkt_offset = ar->ab->hw_params.hal_desc_sz;
4898dd4f32aeSBjoern A. Zeeb l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab,
4899dd4f32aeSBjoern A. Zeeb (struct hal_rx_desc *)msdu->data);
4900dd4f32aeSBjoern A. Zeeb skb_pull(msdu, rx_pkt_offset + l2_hdr_offset);
4901dd4f32aeSBjoern A. Zeeb }
4902dd4f32aeSBjoern A. Zeeb
4903dd4f32aeSBjoern A. Zeeb static struct sk_buff *
ath11k_dp_rx_mon_merg_msdus(struct ath11k * ar,u32 mac_id,struct sk_buff * head_msdu,struct sk_buff * last_msdu,struct ieee80211_rx_status * rxs,bool * fcs_err)4904dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
4905dd4f32aeSBjoern A. Zeeb u32 mac_id, struct sk_buff *head_msdu,
4906dd4f32aeSBjoern A. Zeeb struct sk_buff *last_msdu,
4907dd4f32aeSBjoern A. Zeeb struct ieee80211_rx_status *rxs, bool *fcs_err)
4908dd4f32aeSBjoern A. Zeeb {
4909dd4f32aeSBjoern A. Zeeb struct ath11k_base *ab = ar->ab;
4910dd4f32aeSBjoern A. Zeeb struct sk_buff *msdu, *prev_buf;
4911dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *rx_desc;
4912dd4f32aeSBjoern A. Zeeb char *hdr_desc;
4913dd4f32aeSBjoern A. Zeeb u8 *dest, decap_format;
4914dd4f32aeSBjoern A. Zeeb struct ieee80211_hdr_3addr *wh;
4915dd4f32aeSBjoern A. Zeeb struct rx_attention *rx_attention;
4916dd4f32aeSBjoern A. Zeeb u32 err_bitmap;
4917dd4f32aeSBjoern A. Zeeb
4918dd4f32aeSBjoern A. Zeeb if (!head_msdu)
4919dd4f32aeSBjoern A. Zeeb goto err_merge_fail;
4920dd4f32aeSBjoern A. Zeeb
4921dd4f32aeSBjoern A. Zeeb rx_desc = (struct hal_rx_desc *)head_msdu->data;
4922dd4f32aeSBjoern A. Zeeb rx_attention = ath11k_dp_rx_get_attention(ab, rx_desc);
4923dd4f32aeSBjoern A. Zeeb err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
4924dd4f32aeSBjoern A. Zeeb
4925dd4f32aeSBjoern A. Zeeb if (err_bitmap & DP_RX_MPDU_ERR_FCS)
4926dd4f32aeSBjoern A. Zeeb *fcs_err = true;
4927dd4f32aeSBjoern A. Zeeb
4928dd4f32aeSBjoern A. Zeeb if (ath11k_dp_rxdesc_get_mpdulen_err(rx_attention))
4929dd4f32aeSBjoern A. Zeeb return NULL;
4930dd4f32aeSBjoern A. Zeeb
4931dd4f32aeSBjoern A. Zeeb decap_format = ath11k_dp_rx_h_msdu_start_decap_type(ab, rx_desc);
4932dd4f32aeSBjoern A. Zeeb
4933dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
4934dd4f32aeSBjoern A. Zeeb
4935dd4f32aeSBjoern A. Zeeb if (decap_format == DP_RX_DECAP_TYPE_RAW) {
4936dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_msdus_set_payload(ar, head_msdu);
4937dd4f32aeSBjoern A. Zeeb
4938dd4f32aeSBjoern A. Zeeb prev_buf = head_msdu;
4939dd4f32aeSBjoern A. Zeeb msdu = head_msdu->next;
4940dd4f32aeSBjoern A. Zeeb
4941dd4f32aeSBjoern A. Zeeb while (msdu) {
4942dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_msdus_set_payload(ar, msdu);
4943dd4f32aeSBjoern A. Zeeb
4944dd4f32aeSBjoern A. Zeeb prev_buf = msdu;
4945dd4f32aeSBjoern A. Zeeb msdu = msdu->next;
4946dd4f32aeSBjoern A. Zeeb }
4947dd4f32aeSBjoern A. Zeeb
4948dd4f32aeSBjoern A. Zeeb prev_buf->next = NULL;
4949dd4f32aeSBjoern A. Zeeb
4950dd4f32aeSBjoern A. Zeeb skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
4951dd4f32aeSBjoern A. Zeeb } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {
4952dd4f32aeSBjoern A. Zeeb u8 qos_pkt = 0;
4953dd4f32aeSBjoern A. Zeeb
4954dd4f32aeSBjoern A. Zeeb rx_desc = (struct hal_rx_desc *)head_msdu->data;
4955dd4f32aeSBjoern A. Zeeb hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc);
4956dd4f32aeSBjoern A. Zeeb
4957dd4f32aeSBjoern A. Zeeb /* Base size */
4958dd4f32aeSBjoern A. Zeeb wh = (struct ieee80211_hdr_3addr *)hdr_desc;
4959dd4f32aeSBjoern A. Zeeb
4960*28348caeSBjoern A. Zeeb if (ieee80211_is_data_qos(wh->frame_control))
4961dd4f32aeSBjoern A. Zeeb qos_pkt = 1;
4962*28348caeSBjoern A. Zeeb
4963dd4f32aeSBjoern A. Zeeb msdu = head_msdu;
4964dd4f32aeSBjoern A. Zeeb
4965dd4f32aeSBjoern A. Zeeb while (msdu) {
4966*28348caeSBjoern A. Zeeb ath11k_dp_rx_msdus_set_payload(ar, msdu);
4967dd4f32aeSBjoern A. Zeeb if (qos_pkt) {
4968dd4f32aeSBjoern A. Zeeb dest = skb_push(msdu, sizeof(__le16));
4969dd4f32aeSBjoern A. Zeeb if (!dest)
4970dd4f32aeSBjoern A. Zeeb goto err_merge_fail;
4971*28348caeSBjoern A. Zeeb memcpy(dest, hdr_desc, sizeof(struct ieee80211_qos_hdr));
4972dd4f32aeSBjoern A. Zeeb }
4973dd4f32aeSBjoern A. Zeeb prev_buf = msdu;
4974dd4f32aeSBjoern A. Zeeb msdu = msdu->next;
4975dd4f32aeSBjoern A. Zeeb }
4976dd4f32aeSBjoern A. Zeeb dest = skb_put(prev_buf, HAL_RX_FCS_LEN);
4977dd4f32aeSBjoern A. Zeeb if (!dest)
4978dd4f32aeSBjoern A. Zeeb goto err_merge_fail;
4979dd4f32aeSBjoern A. Zeeb
4980dd4f32aeSBjoern A. Zeeb ath11k_dbg(ab, ATH11K_DBG_DATA,
4981*28348caeSBjoern A. Zeeb "mpdu_buf %p mpdu_buf->len %u",
4982dd4f32aeSBjoern A. Zeeb prev_buf, prev_buf->len);
4983dd4f32aeSBjoern A. Zeeb } else {
4984dd4f32aeSBjoern A. Zeeb ath11k_dbg(ab, ATH11K_DBG_DATA,
4985dd4f32aeSBjoern A. Zeeb "decap format %d is not supported!\n",
4986dd4f32aeSBjoern A. Zeeb decap_format);
4987dd4f32aeSBjoern A. Zeeb goto err_merge_fail;
4988dd4f32aeSBjoern A. Zeeb }
4989dd4f32aeSBjoern A. Zeeb
4990dd4f32aeSBjoern A. Zeeb return head_msdu;
4991dd4f32aeSBjoern A. Zeeb
4992dd4f32aeSBjoern A. Zeeb err_merge_fail:
4993dd4f32aeSBjoern A. Zeeb return NULL;
4994dd4f32aeSBjoern A. Zeeb }
4995dd4f32aeSBjoern A. Zeeb
4996*28348caeSBjoern A. Zeeb static void
ath11k_dp_rx_update_radiotap_he(struct hal_rx_mon_ppdu_info * rx_status,u8 * rtap_buf)4997*28348caeSBjoern A. Zeeb ath11k_dp_rx_update_radiotap_he(struct hal_rx_mon_ppdu_info *rx_status,
4998*28348caeSBjoern A. Zeeb u8 *rtap_buf)
4999*28348caeSBjoern A. Zeeb {
5000*28348caeSBjoern A. Zeeb u32 rtap_len = 0;
5001*28348caeSBjoern A. Zeeb
5002*28348caeSBjoern A. Zeeb put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
5003*28348caeSBjoern A. Zeeb rtap_len += 2;
5004*28348caeSBjoern A. Zeeb
5005*28348caeSBjoern A. Zeeb put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
5006*28348caeSBjoern A. Zeeb rtap_len += 2;
5007*28348caeSBjoern A. Zeeb
5008*28348caeSBjoern A. Zeeb put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
5009*28348caeSBjoern A. Zeeb rtap_len += 2;
5010*28348caeSBjoern A. Zeeb
5011*28348caeSBjoern A. Zeeb put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
5012*28348caeSBjoern A. Zeeb rtap_len += 2;
5013*28348caeSBjoern A. Zeeb
5014*28348caeSBjoern A. Zeeb put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
5015*28348caeSBjoern A. Zeeb rtap_len += 2;
5016*28348caeSBjoern A. Zeeb
5017*28348caeSBjoern A. Zeeb put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
5018*28348caeSBjoern A. Zeeb }
5019*28348caeSBjoern A. Zeeb
5020*28348caeSBjoern A. Zeeb static void
ath11k_dp_rx_update_radiotap_he_mu(struct hal_rx_mon_ppdu_info * rx_status,u8 * rtap_buf)5021*28348caeSBjoern A. Zeeb ath11k_dp_rx_update_radiotap_he_mu(struct hal_rx_mon_ppdu_info *rx_status,
5022*28348caeSBjoern A. Zeeb u8 *rtap_buf)
5023*28348caeSBjoern A. Zeeb {
5024*28348caeSBjoern A. Zeeb u32 rtap_len = 0;
5025*28348caeSBjoern A. Zeeb
5026*28348caeSBjoern A. Zeeb put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
5027*28348caeSBjoern A. Zeeb rtap_len += 2;
5028*28348caeSBjoern A. Zeeb
5029*28348caeSBjoern A. Zeeb put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
5030*28348caeSBjoern A. Zeeb rtap_len += 2;
5031*28348caeSBjoern A. Zeeb
5032*28348caeSBjoern A. Zeeb rtap_buf[rtap_len] = rx_status->he_RU[0];
5033*28348caeSBjoern A. Zeeb rtap_len += 1;
5034*28348caeSBjoern A. Zeeb
5035*28348caeSBjoern A. Zeeb rtap_buf[rtap_len] = rx_status->he_RU[1];
5036*28348caeSBjoern A. Zeeb rtap_len += 1;
5037*28348caeSBjoern A. Zeeb
5038*28348caeSBjoern A. Zeeb rtap_buf[rtap_len] = rx_status->he_RU[2];
5039*28348caeSBjoern A. Zeeb rtap_len += 1;
5040*28348caeSBjoern A. Zeeb
5041*28348caeSBjoern A. Zeeb rtap_buf[rtap_len] = rx_status->he_RU[3];
5042*28348caeSBjoern A. Zeeb }
5043*28348caeSBjoern A. Zeeb
ath11k_update_radiotap(struct ath11k * ar,struct hal_rx_mon_ppdu_info * ppduinfo,struct sk_buff * mon_skb,struct ieee80211_rx_status * rxs)5044*28348caeSBjoern A. Zeeb static void ath11k_update_radiotap(struct ath11k *ar,
5045*28348caeSBjoern A. Zeeb struct hal_rx_mon_ppdu_info *ppduinfo,
5046*28348caeSBjoern A. Zeeb struct sk_buff *mon_skb,
5047*28348caeSBjoern A. Zeeb struct ieee80211_rx_status *rxs)
5048*28348caeSBjoern A. Zeeb {
5049*28348caeSBjoern A. Zeeb struct ieee80211_supported_band *sband;
5050*28348caeSBjoern A. Zeeb u8 *ptr = NULL;
5051*28348caeSBjoern A. Zeeb
5052*28348caeSBjoern A. Zeeb rxs->flag |= RX_FLAG_MACTIME_START;
5053*28348caeSBjoern A. Zeeb rxs->signal = ppduinfo->rssi_comb + ATH11K_DEFAULT_NOISE_FLOOR;
5054*28348caeSBjoern A. Zeeb
5055*28348caeSBjoern A. Zeeb if (ppduinfo->nss)
5056*28348caeSBjoern A. Zeeb rxs->nss = ppduinfo->nss;
5057*28348caeSBjoern A. Zeeb
5058*28348caeSBjoern A. Zeeb if (ppduinfo->he_mu_flags) {
5059*28348caeSBjoern A. Zeeb rxs->flag |= RX_FLAG_RADIOTAP_HE_MU;
5060*28348caeSBjoern A. Zeeb rxs->encoding = RX_ENC_HE;
5061*28348caeSBjoern A. Zeeb ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu));
5062*28348caeSBjoern A. Zeeb ath11k_dp_rx_update_radiotap_he_mu(ppduinfo, ptr);
5063*28348caeSBjoern A. Zeeb } else if (ppduinfo->he_flags) {
5064*28348caeSBjoern A. Zeeb rxs->flag |= RX_FLAG_RADIOTAP_HE;
5065*28348caeSBjoern A. Zeeb rxs->encoding = RX_ENC_HE;
5066*28348caeSBjoern A. Zeeb ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he));
5067*28348caeSBjoern A. Zeeb ath11k_dp_rx_update_radiotap_he(ppduinfo, ptr);
5068*28348caeSBjoern A. Zeeb rxs->rate_idx = ppduinfo->rate;
5069*28348caeSBjoern A. Zeeb } else if (ppduinfo->vht_flags) {
5070*28348caeSBjoern A. Zeeb rxs->encoding = RX_ENC_VHT;
5071*28348caeSBjoern A. Zeeb rxs->rate_idx = ppduinfo->rate;
5072*28348caeSBjoern A. Zeeb } else if (ppduinfo->ht_flags) {
5073*28348caeSBjoern A. Zeeb rxs->encoding = RX_ENC_HT;
5074*28348caeSBjoern A. Zeeb rxs->rate_idx = ppduinfo->rate;
5075*28348caeSBjoern A. Zeeb } else {
5076*28348caeSBjoern A. Zeeb rxs->encoding = RX_ENC_LEGACY;
5077*28348caeSBjoern A. Zeeb sband = &ar->mac.sbands[rxs->band];
5078*28348caeSBjoern A. Zeeb rxs->rate_idx = ath11k_mac_hw_rate_to_idx(sband, ppduinfo->rate,
5079*28348caeSBjoern A. Zeeb ppduinfo->cck_flag);
5080*28348caeSBjoern A. Zeeb }
5081*28348caeSBjoern A. Zeeb
5082*28348caeSBjoern A. Zeeb rxs->mactime = ppduinfo->tsft;
5083*28348caeSBjoern A. Zeeb }
5084*28348caeSBjoern A. Zeeb
ath11k_dp_rx_mon_deliver(struct ath11k * ar,u32 mac_id,struct sk_buff * head_msdu,struct hal_rx_mon_ppdu_info * ppduinfo,struct sk_buff * tail_msdu,struct napi_struct * napi)5085dd4f32aeSBjoern A. Zeeb static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
5086dd4f32aeSBjoern A. Zeeb struct sk_buff *head_msdu,
5087*28348caeSBjoern A. Zeeb struct hal_rx_mon_ppdu_info *ppduinfo,
5088dd4f32aeSBjoern A. Zeeb struct sk_buff *tail_msdu,
5089dd4f32aeSBjoern A. Zeeb struct napi_struct *napi)
5090dd4f32aeSBjoern A. Zeeb {
5091dd4f32aeSBjoern A. Zeeb struct ath11k_pdev_dp *dp = &ar->dp;
5092dd4f32aeSBjoern A. Zeeb struct sk_buff *mon_skb, *skb_next, *header;
5093dd4f32aeSBjoern A. Zeeb struct ieee80211_rx_status *rxs = &dp->rx_status;
5094dd4f32aeSBjoern A. Zeeb bool fcs_err = false;
5095dd4f32aeSBjoern A. Zeeb
5096dd4f32aeSBjoern A. Zeeb mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu,
5097dd4f32aeSBjoern A. Zeeb tail_msdu, rxs, &fcs_err);
5098dd4f32aeSBjoern A. Zeeb
5099dd4f32aeSBjoern A. Zeeb if (!mon_skb)
5100dd4f32aeSBjoern A. Zeeb goto mon_deliver_fail;
5101dd4f32aeSBjoern A. Zeeb
5102dd4f32aeSBjoern A. Zeeb header = mon_skb;
5103dd4f32aeSBjoern A. Zeeb
5104dd4f32aeSBjoern A. Zeeb rxs->flag = 0;
5105dd4f32aeSBjoern A. Zeeb
5106dd4f32aeSBjoern A. Zeeb if (fcs_err)
5107dd4f32aeSBjoern A. Zeeb rxs->flag = RX_FLAG_FAILED_FCS_CRC;
5108dd4f32aeSBjoern A. Zeeb
5109dd4f32aeSBjoern A. Zeeb do {
5110dd4f32aeSBjoern A. Zeeb skb_next = mon_skb->next;
5111dd4f32aeSBjoern A. Zeeb if (!skb_next)
5112dd4f32aeSBjoern A. Zeeb rxs->flag &= ~RX_FLAG_AMSDU_MORE;
5113dd4f32aeSBjoern A. Zeeb else
5114dd4f32aeSBjoern A. Zeeb rxs->flag |= RX_FLAG_AMSDU_MORE;
5115dd4f32aeSBjoern A. Zeeb
5116dd4f32aeSBjoern A. Zeeb if (mon_skb == header) {
5117dd4f32aeSBjoern A. Zeeb header = NULL;
5118dd4f32aeSBjoern A. Zeeb rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN;
5119dd4f32aeSBjoern A. Zeeb } else {
5120dd4f32aeSBjoern A. Zeeb rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
5121dd4f32aeSBjoern A. Zeeb }
5122dd4f32aeSBjoern A. Zeeb rxs->flag |= RX_FLAG_ONLY_MONITOR;
5123*28348caeSBjoern A. Zeeb ath11k_update_radiotap(ar, ppduinfo, mon_skb, rxs);
5124dd4f32aeSBjoern A. Zeeb
5125dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb, rxs);
5126dd4f32aeSBjoern A. Zeeb mon_skb = skb_next;
5127dd4f32aeSBjoern A. Zeeb } while (mon_skb);
5128dd4f32aeSBjoern A. Zeeb rxs->flag = 0;
5129dd4f32aeSBjoern A. Zeeb
5130dd4f32aeSBjoern A. Zeeb return 0;
5131dd4f32aeSBjoern A. Zeeb
5132dd4f32aeSBjoern A. Zeeb mon_deliver_fail:
5133dd4f32aeSBjoern A. Zeeb mon_skb = head_msdu;
5134dd4f32aeSBjoern A. Zeeb while (mon_skb) {
5135dd4f32aeSBjoern A. Zeeb skb_next = mon_skb->next;
5136dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(mon_skb);
5137dd4f32aeSBjoern A. Zeeb mon_skb = skb_next;
5138dd4f32aeSBjoern A. Zeeb }
5139dd4f32aeSBjoern A. Zeeb return -EINVAL;
5140dd4f32aeSBjoern A. Zeeb }
5141dd4f32aeSBjoern A. Zeeb
5142*28348caeSBjoern A. Zeeb /* The destination ring processing is stuck if the destination is not
5143*28348caeSBjoern A. Zeeb * moving while status ring moves 16 PPDU. The destination ring processing
5144*28348caeSBjoern A. Zeeb * skips this destination ring PPDU as a workaround.
5145*28348caeSBjoern A. Zeeb */
5146*28348caeSBjoern A. Zeeb #define MON_DEST_RING_STUCK_MAX_CNT 16
5147*28348caeSBjoern A. Zeeb
ath11k_dp_rx_mon_dest_process(struct ath11k * ar,int mac_id,u32 quota,struct napi_struct * napi)5148dd4f32aeSBjoern A. Zeeb static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
5149dd4f32aeSBjoern A. Zeeb u32 quota, struct napi_struct *napi)
5150dd4f32aeSBjoern A. Zeeb {
5151dd4f32aeSBjoern A. Zeeb struct ath11k_pdev_dp *dp = &ar->dp;
5152dd4f32aeSBjoern A. Zeeb struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
5153dd4f32aeSBjoern A. Zeeb const struct ath11k_hw_hal_params *hal_params;
5154dd4f32aeSBjoern A. Zeeb void *ring_entry;
5155dd4f32aeSBjoern A. Zeeb void *mon_dst_srng;
5156dd4f32aeSBjoern A. Zeeb u32 ppdu_id;
5157dd4f32aeSBjoern A. Zeeb u32 rx_bufs_used;
5158dd4f32aeSBjoern A. Zeeb u32 ring_id;
5159dd4f32aeSBjoern A. Zeeb struct ath11k_pdev_mon_stats *rx_mon_stats;
5160dd4f32aeSBjoern A. Zeeb u32 npackets = 0;
5161*28348caeSBjoern A. Zeeb u32 mpdu_rx_bufs_used;
5162dd4f32aeSBjoern A. Zeeb
5163dd4f32aeSBjoern A. Zeeb if (ar->ab->hw_params.rxdma1_enable)
5164dd4f32aeSBjoern A. Zeeb ring_id = dp->rxdma_mon_dst_ring.ring_id;
5165dd4f32aeSBjoern A. Zeeb else
5166dd4f32aeSBjoern A. Zeeb ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id;
5167dd4f32aeSBjoern A. Zeeb
5168dd4f32aeSBjoern A. Zeeb mon_dst_srng = &ar->ab->hal.srng_list[ring_id];
5169dd4f32aeSBjoern A. Zeeb
5170dd4f32aeSBjoern A. Zeeb if (!mon_dst_srng) {
5171dd4f32aeSBjoern A. Zeeb ath11k_warn(ar->ab,
5172*28348caeSBjoern A. Zeeb "HAL Monitor Destination Ring Init Failed -- %p",
5173dd4f32aeSBjoern A. Zeeb mon_dst_srng);
5174dd4f32aeSBjoern A. Zeeb return;
5175dd4f32aeSBjoern A. Zeeb }
5176dd4f32aeSBjoern A. Zeeb
5177dd4f32aeSBjoern A. Zeeb spin_lock_bh(&pmon->mon_lock);
5178dd4f32aeSBjoern A. Zeeb
5179dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
5180dd4f32aeSBjoern A. Zeeb
5181dd4f32aeSBjoern A. Zeeb ppdu_id = pmon->mon_ppdu_info.ppdu_id;
5182dd4f32aeSBjoern A. Zeeb rx_bufs_used = 0;
5183dd4f32aeSBjoern A. Zeeb rx_mon_stats = &pmon->rx_mon_stats;
5184dd4f32aeSBjoern A. Zeeb
5185dd4f32aeSBjoern A. Zeeb while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
5186dd4f32aeSBjoern A. Zeeb struct sk_buff *head_msdu, *tail_msdu;
5187dd4f32aeSBjoern A. Zeeb
5188dd4f32aeSBjoern A. Zeeb head_msdu = NULL;
5189dd4f32aeSBjoern A. Zeeb tail_msdu = NULL;
5190dd4f32aeSBjoern A. Zeeb
5191*28348caeSBjoern A. Zeeb mpdu_rx_bufs_used = ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry,
5192dd4f32aeSBjoern A. Zeeb &head_msdu,
5193dd4f32aeSBjoern A. Zeeb &tail_msdu,
5194dd4f32aeSBjoern A. Zeeb &npackets, &ppdu_id);
5195dd4f32aeSBjoern A. Zeeb
5196*28348caeSBjoern A. Zeeb rx_bufs_used += mpdu_rx_bufs_used;
5197*28348caeSBjoern A. Zeeb
5198*28348caeSBjoern A. Zeeb if (mpdu_rx_bufs_used) {
5199*28348caeSBjoern A. Zeeb dp->mon_dest_ring_stuck_cnt = 0;
5200*28348caeSBjoern A. Zeeb } else {
5201*28348caeSBjoern A. Zeeb dp->mon_dest_ring_stuck_cnt++;
5202*28348caeSBjoern A. Zeeb rx_mon_stats->dest_mon_not_reaped++;
5203*28348caeSBjoern A. Zeeb }
5204*28348caeSBjoern A. Zeeb
5205*28348caeSBjoern A. Zeeb if (dp->mon_dest_ring_stuck_cnt > MON_DEST_RING_STUCK_MAX_CNT) {
5206*28348caeSBjoern A. Zeeb rx_mon_stats->dest_mon_stuck++;
5207*28348caeSBjoern A. Zeeb ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5208*28348caeSBjoern A. Zeeb "status ring ppdu_id=%d dest ring ppdu_id=%d mon_dest_ring_stuck_cnt=%d dest_mon_not_reaped=%u dest_mon_stuck=%u\n",
5209*28348caeSBjoern A. Zeeb pmon->mon_ppdu_info.ppdu_id, ppdu_id,
5210*28348caeSBjoern A. Zeeb dp->mon_dest_ring_stuck_cnt,
5211*28348caeSBjoern A. Zeeb rx_mon_stats->dest_mon_not_reaped,
5212*28348caeSBjoern A. Zeeb rx_mon_stats->dest_mon_stuck);
5213*28348caeSBjoern A. Zeeb pmon->mon_ppdu_info.ppdu_id = ppdu_id;
5214*28348caeSBjoern A. Zeeb continue;
5215*28348caeSBjoern A. Zeeb }
5216*28348caeSBjoern A. Zeeb
5217dd4f32aeSBjoern A. Zeeb if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) {
5218dd4f32aeSBjoern A. Zeeb pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5219dd4f32aeSBjoern A. Zeeb ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5220*28348caeSBjoern A. Zeeb "dest_rx: new ppdu_id %x != status ppdu_id %x dest_mon_not_reaped = %u dest_mon_stuck = %u\n",
5221*28348caeSBjoern A. Zeeb ppdu_id, pmon->mon_ppdu_info.ppdu_id,
5222*28348caeSBjoern A. Zeeb rx_mon_stats->dest_mon_not_reaped,
5223*28348caeSBjoern A. Zeeb rx_mon_stats->dest_mon_stuck);
5224dd4f32aeSBjoern A. Zeeb break;
5225dd4f32aeSBjoern A. Zeeb }
5226dd4f32aeSBjoern A. Zeeb if (head_msdu && tail_msdu) {
5227dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu,
5228*28348caeSBjoern A. Zeeb &pmon->mon_ppdu_info,
5229dd4f32aeSBjoern A. Zeeb tail_msdu, napi);
5230dd4f32aeSBjoern A. Zeeb rx_mon_stats->dest_mpdu_done++;
5231dd4f32aeSBjoern A. Zeeb }
5232dd4f32aeSBjoern A. Zeeb
5233dd4f32aeSBjoern A. Zeeb ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
5234dd4f32aeSBjoern A. Zeeb mon_dst_srng);
5235dd4f32aeSBjoern A. Zeeb }
5236dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
5237dd4f32aeSBjoern A. Zeeb
5238dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&pmon->mon_lock);
5239dd4f32aeSBjoern A. Zeeb
5240dd4f32aeSBjoern A. Zeeb if (rx_bufs_used) {
5241dd4f32aeSBjoern A. Zeeb rx_mon_stats->dest_ppdu_done++;
5242dd4f32aeSBjoern A. Zeeb hal_params = ar->ab->hw_params.hal_params;
5243dd4f32aeSBjoern A. Zeeb
5244dd4f32aeSBjoern A. Zeeb if (ar->ab->hw_params.rxdma1_enable)
5245dd4f32aeSBjoern A. Zeeb ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
5246dd4f32aeSBjoern A. Zeeb &dp->rxdma_mon_buf_ring,
5247dd4f32aeSBjoern A. Zeeb rx_bufs_used,
5248dd4f32aeSBjoern A. Zeeb hal_params->rx_buf_rbm);
5249dd4f32aeSBjoern A. Zeeb else
5250dd4f32aeSBjoern A. Zeeb ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
5251dd4f32aeSBjoern A. Zeeb &dp->rx_refill_buf_ring,
5252dd4f32aeSBjoern A. Zeeb rx_bufs_used,
5253dd4f32aeSBjoern A. Zeeb hal_params->rx_buf_rbm);
5254dd4f32aeSBjoern A. Zeeb }
5255dd4f32aeSBjoern A. Zeeb }
5256dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_process_mon_status(struct ath11k_base * ab,int mac_id,struct napi_struct * napi,int budget)5257dd4f32aeSBjoern A. Zeeb int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
5258dd4f32aeSBjoern A. Zeeb struct napi_struct *napi, int budget)
5259dd4f32aeSBjoern A. Zeeb {
5260dd4f32aeSBjoern A. Zeeb struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
5261dd4f32aeSBjoern A. Zeeb enum hal_rx_mon_status hal_status;
5262dd4f32aeSBjoern A. Zeeb struct sk_buff *skb;
5263dd4f32aeSBjoern A. Zeeb struct sk_buff_head skb_list;
5264dd4f32aeSBjoern A. Zeeb struct ath11k_peer *peer;
5265dd4f32aeSBjoern A. Zeeb struct ath11k_sta *arsta;
5266dd4f32aeSBjoern A. Zeeb int num_buffs_reaped = 0;
5267dd4f32aeSBjoern A. Zeeb u32 rx_buf_sz;
5268*28348caeSBjoern A. Zeeb u16 log_type;
5269dd4f32aeSBjoern A. Zeeb struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&ar->dp.mon_data;
5270dd4f32aeSBjoern A. Zeeb struct ath11k_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats;
5271dd4f32aeSBjoern A. Zeeb struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
5272dd4f32aeSBjoern A. Zeeb
5273dd4f32aeSBjoern A. Zeeb __skb_queue_head_init(&skb_list);
5274dd4f32aeSBjoern A. Zeeb
5275dd4f32aeSBjoern A. Zeeb num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget,
5276dd4f32aeSBjoern A. Zeeb &skb_list);
5277dd4f32aeSBjoern A. Zeeb if (!num_buffs_reaped)
5278dd4f32aeSBjoern A. Zeeb goto exit;
5279dd4f32aeSBjoern A. Zeeb
5280dd4f32aeSBjoern A. Zeeb memset(ppdu_info, 0, sizeof(*ppdu_info));
5281dd4f32aeSBjoern A. Zeeb ppdu_info->peer_id = HAL_INVALID_PEERID;
5282dd4f32aeSBjoern A. Zeeb
5283dd4f32aeSBjoern A. Zeeb while ((skb = __skb_dequeue(&skb_list))) {
5284dd4f32aeSBjoern A. Zeeb if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) {
5285dd4f32aeSBjoern A. Zeeb log_type = ATH11K_PKTLOG_TYPE_LITE_RX;
5286dd4f32aeSBjoern A. Zeeb rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
5287dd4f32aeSBjoern A. Zeeb } else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) {
5288dd4f32aeSBjoern A. Zeeb log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF;
5289dd4f32aeSBjoern A. Zeeb rx_buf_sz = DP_RX_BUFFER_SIZE;
5290*28348caeSBjoern A. Zeeb } else {
5291*28348caeSBjoern A. Zeeb log_type = ATH11K_PKTLOG_TYPE_INVALID;
5292*28348caeSBjoern A. Zeeb rx_buf_sz = 0;
5293dd4f32aeSBjoern A. Zeeb }
5294dd4f32aeSBjoern A. Zeeb
5295*28348caeSBjoern A. Zeeb if (log_type != ATH11K_PKTLOG_TYPE_INVALID)
5296dd4f32aeSBjoern A. Zeeb trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
5297dd4f32aeSBjoern A. Zeeb
5298*28348caeSBjoern A. Zeeb memset(ppdu_info, 0, sizeof(*ppdu_info));
5299*28348caeSBjoern A. Zeeb ppdu_info->peer_id = HAL_INVALID_PEERID;
5300dd4f32aeSBjoern A. Zeeb hal_status = ath11k_hal_rx_parse_mon_status(ab, ppdu_info, skb);
5301dd4f32aeSBjoern A. Zeeb
5302dd4f32aeSBjoern A. Zeeb if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
5303dd4f32aeSBjoern A. Zeeb pmon->mon_ppdu_status == DP_PPDU_STATUS_START &&
5304dd4f32aeSBjoern A. Zeeb hal_status == HAL_TLV_STATUS_PPDU_DONE) {
5305dd4f32aeSBjoern A. Zeeb rx_mon_stats->status_ppdu_done++;
5306dd4f32aeSBjoern A. Zeeb pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
5307dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_mon_dest_process(ar, mac_id, budget, napi);
5308dd4f32aeSBjoern A. Zeeb pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5309dd4f32aeSBjoern A. Zeeb }
5310dd4f32aeSBjoern A. Zeeb
5311dd4f32aeSBjoern A. Zeeb if (ppdu_info->peer_id == HAL_INVALID_PEERID ||
5312dd4f32aeSBjoern A. Zeeb hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
5313dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(skb);
5314dd4f32aeSBjoern A. Zeeb continue;
5315dd4f32aeSBjoern A. Zeeb }
5316dd4f32aeSBjoern A. Zeeb
5317dd4f32aeSBjoern A. Zeeb rcu_read_lock();
5318dd4f32aeSBjoern A. Zeeb spin_lock_bh(&ab->base_lock);
5319dd4f32aeSBjoern A. Zeeb peer = ath11k_peer_find_by_id(ab, ppdu_info->peer_id);
5320dd4f32aeSBjoern A. Zeeb
5321dd4f32aeSBjoern A. Zeeb if (!peer || !peer->sta) {
5322dd4f32aeSBjoern A. Zeeb ath11k_dbg(ab, ATH11K_DBG_DATA,
5323dd4f32aeSBjoern A. Zeeb "failed to find the peer with peer_id %d\n",
5324dd4f32aeSBjoern A. Zeeb ppdu_info->peer_id);
5325dd4f32aeSBjoern A. Zeeb goto next_skb;
5326dd4f32aeSBjoern A. Zeeb }
5327dd4f32aeSBjoern A. Zeeb
5328dd4f32aeSBjoern A. Zeeb arsta = (struct ath11k_sta *)peer->sta->drv_priv;
5329dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_update_peer_stats(arsta, ppdu_info);
5330dd4f32aeSBjoern A. Zeeb
5331dd4f32aeSBjoern A. Zeeb if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr))
5332dd4f32aeSBjoern A. Zeeb trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
5333dd4f32aeSBjoern A. Zeeb
5334dd4f32aeSBjoern A. Zeeb next_skb:
5335dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ab->base_lock);
5336dd4f32aeSBjoern A. Zeeb rcu_read_unlock();
5337dd4f32aeSBjoern A. Zeeb
5338dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(skb);
5339dd4f32aeSBjoern A. Zeeb memset(ppdu_info, 0, sizeof(*ppdu_info));
5340dd4f32aeSBjoern A. Zeeb ppdu_info->peer_id = HAL_INVALID_PEERID;
5341dd4f32aeSBjoern A. Zeeb }
5342dd4f32aeSBjoern A. Zeeb exit:
5343dd4f32aeSBjoern A. Zeeb return num_buffs_reaped;
5344dd4f32aeSBjoern A. Zeeb }
5345dd4f32aeSBjoern A. Zeeb
5346dd4f32aeSBjoern A. Zeeb static u32
ath11k_dp_rx_full_mon_mpdu_pop(struct ath11k * ar,void * ring_entry,struct sk_buff ** head_msdu,struct sk_buff ** tail_msdu,struct hal_sw_mon_ring_entries * sw_mon_entries)5347dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_full_mon_mpdu_pop(struct ath11k *ar,
5348dd4f32aeSBjoern A. Zeeb void *ring_entry, struct sk_buff **head_msdu,
5349dd4f32aeSBjoern A. Zeeb struct sk_buff **tail_msdu,
5350dd4f32aeSBjoern A. Zeeb struct hal_sw_mon_ring_entries *sw_mon_entries)
5351dd4f32aeSBjoern A. Zeeb {
5352dd4f32aeSBjoern A. Zeeb struct ath11k_pdev_dp *dp = &ar->dp;
5353dd4f32aeSBjoern A. Zeeb struct ath11k_mon_data *pmon = &dp->mon_data;
5354dd4f32aeSBjoern A. Zeeb struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
5355dd4f32aeSBjoern A. Zeeb struct sk_buff *msdu = NULL, *last = NULL;
5356dd4f32aeSBjoern A. Zeeb struct hal_sw_monitor_ring *sw_desc = ring_entry;
5357dd4f32aeSBjoern A. Zeeb struct hal_rx_msdu_list msdu_list;
5358dd4f32aeSBjoern A. Zeeb struct hal_rx_desc *rx_desc;
5359dd4f32aeSBjoern A. Zeeb struct ath11k_skb_rxcb *rxcb;
5360dd4f32aeSBjoern A. Zeeb void *rx_msdu_link_desc;
5361dd4f32aeSBjoern A. Zeeb void *p_buf_addr_info, *p_last_buf_addr_info;
5362dd4f32aeSBjoern A. Zeeb int buf_id, i = 0;
5363dd4f32aeSBjoern A. Zeeb u32 rx_buf_size, rx_pkt_offset, l2_hdr_offset;
5364dd4f32aeSBjoern A. Zeeb u32 rx_bufs_used = 0, msdu_cnt = 0;
5365dd4f32aeSBjoern A. Zeeb u32 total_len = 0, frag_len = 0, sw_cookie;
5366dd4f32aeSBjoern A. Zeeb u16 num_msdus = 0;
5367dd4f32aeSBjoern A. Zeeb u8 rxdma_err, rbm;
5368dd4f32aeSBjoern A. Zeeb bool is_frag, is_first_msdu;
5369dd4f32aeSBjoern A. Zeeb bool drop_mpdu = false;
5370dd4f32aeSBjoern A. Zeeb
5371dd4f32aeSBjoern A. Zeeb ath11k_hal_rx_sw_mon_ring_buf_paddr_get(ring_entry, sw_mon_entries);
5372dd4f32aeSBjoern A. Zeeb
5373dd4f32aeSBjoern A. Zeeb sw_cookie = sw_mon_entries->mon_dst_sw_cookie;
5374dd4f32aeSBjoern A. Zeeb sw_mon_entries->end_of_ppdu = false;
5375dd4f32aeSBjoern A. Zeeb sw_mon_entries->drop_ppdu = false;
5376dd4f32aeSBjoern A. Zeeb p_last_buf_addr_info = sw_mon_entries->dst_buf_addr_info;
5377dd4f32aeSBjoern A. Zeeb msdu_cnt = sw_mon_entries->msdu_cnt;
5378dd4f32aeSBjoern A. Zeeb
5379dd4f32aeSBjoern A. Zeeb sw_mon_entries->end_of_ppdu =
5380dd4f32aeSBjoern A. Zeeb FIELD_GET(HAL_SW_MON_RING_INFO0_END_OF_PPDU, sw_desc->info0);
5381dd4f32aeSBjoern A. Zeeb if (sw_mon_entries->end_of_ppdu)
5382dd4f32aeSBjoern A. Zeeb return rx_bufs_used;
5383dd4f32aeSBjoern A. Zeeb
5384dd4f32aeSBjoern A. Zeeb if (FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_PUSH_REASON,
5385dd4f32aeSBjoern A. Zeeb sw_desc->info0) ==
5386dd4f32aeSBjoern A. Zeeb HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
5387dd4f32aeSBjoern A. Zeeb rxdma_err =
5388dd4f32aeSBjoern A. Zeeb FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_ERROR_CODE,
5389dd4f32aeSBjoern A. Zeeb sw_desc->info0);
5390dd4f32aeSBjoern A. Zeeb if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
5391dd4f32aeSBjoern A. Zeeb rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
5392dd4f32aeSBjoern A. Zeeb rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
5393dd4f32aeSBjoern A. Zeeb pmon->rx_mon_stats.dest_mpdu_drop++;
5394dd4f32aeSBjoern A. Zeeb drop_mpdu = true;
5395dd4f32aeSBjoern A. Zeeb }
5396dd4f32aeSBjoern A. Zeeb }
5397dd4f32aeSBjoern A. Zeeb
5398dd4f32aeSBjoern A. Zeeb is_frag = false;
5399dd4f32aeSBjoern A. Zeeb is_first_msdu = true;
5400dd4f32aeSBjoern A. Zeeb
5401dd4f32aeSBjoern A. Zeeb do {
5402dd4f32aeSBjoern A. Zeeb rx_msdu_link_desc =
5403dd4f32aeSBjoern A. Zeeb (u8 *)pmon->link_desc_banks[sw_cookie].vaddr +
5404dd4f32aeSBjoern A. Zeeb (sw_mon_entries->mon_dst_paddr -
5405dd4f32aeSBjoern A. Zeeb pmon->link_desc_banks[sw_cookie].paddr);
5406dd4f32aeSBjoern A. Zeeb
5407dd4f32aeSBjoern A. Zeeb ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
5408dd4f32aeSBjoern A. Zeeb &num_msdus);
5409dd4f32aeSBjoern A. Zeeb
5410dd4f32aeSBjoern A. Zeeb for (i = 0; i < num_msdus; i++) {
5411dd4f32aeSBjoern A. Zeeb buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
5412dd4f32aeSBjoern A. Zeeb msdu_list.sw_cookie[i]);
5413dd4f32aeSBjoern A. Zeeb
5414dd4f32aeSBjoern A. Zeeb spin_lock_bh(&rx_ring->idr_lock);
5415dd4f32aeSBjoern A. Zeeb msdu = idr_find(&rx_ring->bufs_idr, buf_id);
5416dd4f32aeSBjoern A. Zeeb if (!msdu) {
5417dd4f32aeSBjoern A. Zeeb ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5418dd4f32aeSBjoern A. Zeeb "full mon msdu_pop: invalid buf_id %d\n",
5419dd4f32aeSBjoern A. Zeeb buf_id);
5420dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&rx_ring->idr_lock);
5421dd4f32aeSBjoern A. Zeeb break;
5422dd4f32aeSBjoern A. Zeeb }
5423dd4f32aeSBjoern A. Zeeb idr_remove(&rx_ring->bufs_idr, buf_id);
5424dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&rx_ring->idr_lock);
5425dd4f32aeSBjoern A. Zeeb
5426dd4f32aeSBjoern A. Zeeb rxcb = ATH11K_SKB_RXCB(msdu);
5427dd4f32aeSBjoern A. Zeeb if (!rxcb->unmapped) {
5428dd4f32aeSBjoern A. Zeeb dma_unmap_single(ar->ab->dev, rxcb->paddr,
5429dd4f32aeSBjoern A. Zeeb msdu->len +
5430dd4f32aeSBjoern A. Zeeb skb_tailroom(msdu),
5431dd4f32aeSBjoern A. Zeeb DMA_FROM_DEVICE);
5432dd4f32aeSBjoern A. Zeeb rxcb->unmapped = 1;
5433dd4f32aeSBjoern A. Zeeb }
5434dd4f32aeSBjoern A. Zeeb if (drop_mpdu) {
5435dd4f32aeSBjoern A. Zeeb ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5436dd4f32aeSBjoern A. Zeeb "full mon: i %d drop msdu %p *ppdu_id %x\n",
5437dd4f32aeSBjoern A. Zeeb i, msdu, sw_mon_entries->ppdu_id);
5438dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(msdu);
5439dd4f32aeSBjoern A. Zeeb msdu_cnt--;
5440dd4f32aeSBjoern A. Zeeb goto next_msdu;
5441dd4f32aeSBjoern A. Zeeb }
5442dd4f32aeSBjoern A. Zeeb
5443dd4f32aeSBjoern A. Zeeb rx_desc = (struct hal_rx_desc *)msdu->data;
5444dd4f32aeSBjoern A. Zeeb
5445dd4f32aeSBjoern A. Zeeb rx_pkt_offset = sizeof(struct hal_rx_desc);
5446dd4f32aeSBjoern A. Zeeb l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc);
5447dd4f32aeSBjoern A. Zeeb
5448dd4f32aeSBjoern A. Zeeb if (is_first_msdu) {
5449dd4f32aeSBjoern A. Zeeb if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {
5450dd4f32aeSBjoern A. Zeeb drop_mpdu = true;
5451dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(msdu);
5452dd4f32aeSBjoern A. Zeeb msdu = NULL;
5453dd4f32aeSBjoern A. Zeeb goto next_msdu;
5454dd4f32aeSBjoern A. Zeeb }
5455dd4f32aeSBjoern A. Zeeb is_first_msdu = false;
5456dd4f32aeSBjoern A. Zeeb }
5457dd4f32aeSBjoern A. Zeeb
5458dd4f32aeSBjoern A. Zeeb ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
5459dd4f32aeSBjoern A. Zeeb &is_frag, &total_len,
5460dd4f32aeSBjoern A. Zeeb &frag_len, &msdu_cnt);
5461dd4f32aeSBjoern A. Zeeb
5462dd4f32aeSBjoern A. Zeeb rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
5463dd4f32aeSBjoern A. Zeeb
5464dd4f32aeSBjoern A. Zeeb ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
5465dd4f32aeSBjoern A. Zeeb
5466dd4f32aeSBjoern A. Zeeb if (!(*head_msdu))
5467dd4f32aeSBjoern A. Zeeb *head_msdu = msdu;
5468dd4f32aeSBjoern A. Zeeb else if (last)
5469dd4f32aeSBjoern A. Zeeb last->next = msdu;
5470dd4f32aeSBjoern A. Zeeb
5471dd4f32aeSBjoern A. Zeeb last = msdu;
5472dd4f32aeSBjoern A. Zeeb next_msdu:
5473dd4f32aeSBjoern A. Zeeb rx_bufs_used++;
5474dd4f32aeSBjoern A. Zeeb }
5475dd4f32aeSBjoern A. Zeeb
5476dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc,
5477dd4f32aeSBjoern A. Zeeb &sw_mon_entries->mon_dst_paddr,
5478dd4f32aeSBjoern A. Zeeb &sw_mon_entries->mon_dst_sw_cookie,
5479dd4f32aeSBjoern A. Zeeb &rbm,
5480dd4f32aeSBjoern A. Zeeb &p_buf_addr_info);
5481dd4f32aeSBjoern A. Zeeb
5482dd4f32aeSBjoern A. Zeeb if (ath11k_dp_rx_monitor_link_desc_return(ar,
5483dd4f32aeSBjoern A. Zeeb p_last_buf_addr_info,
5484dd4f32aeSBjoern A. Zeeb dp->mac_id))
5485dd4f32aeSBjoern A. Zeeb ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5486dd4f32aeSBjoern A. Zeeb "full mon: dp_rx_monitor_link_desc_return failed\n");
5487dd4f32aeSBjoern A. Zeeb
5488dd4f32aeSBjoern A. Zeeb p_last_buf_addr_info = p_buf_addr_info;
5489dd4f32aeSBjoern A. Zeeb
5490dd4f32aeSBjoern A. Zeeb } while (sw_mon_entries->mon_dst_paddr && msdu_cnt);
5491dd4f32aeSBjoern A. Zeeb
5492dd4f32aeSBjoern A. Zeeb if (last)
5493dd4f32aeSBjoern A. Zeeb last->next = NULL;
5494dd4f32aeSBjoern A. Zeeb
5495dd4f32aeSBjoern A. Zeeb *tail_msdu = msdu;
5496dd4f32aeSBjoern A. Zeeb
5497dd4f32aeSBjoern A. Zeeb return rx_bufs_used;
5498dd4f32aeSBjoern A. Zeeb }
5499dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_full_mon_prepare_mpdu(struct ath11k_dp * dp,struct dp_full_mon_mpdu * mon_mpdu,struct sk_buff * head,struct sk_buff * tail)5500dd4f32aeSBjoern A. Zeeb static int ath11k_dp_rx_full_mon_prepare_mpdu(struct ath11k_dp *dp,
5501dd4f32aeSBjoern A. Zeeb struct dp_full_mon_mpdu *mon_mpdu,
5502dd4f32aeSBjoern A. Zeeb struct sk_buff *head,
5503dd4f32aeSBjoern A. Zeeb struct sk_buff *tail)
5504dd4f32aeSBjoern A. Zeeb {
5505dd4f32aeSBjoern A. Zeeb mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC);
5506dd4f32aeSBjoern A. Zeeb if (!mon_mpdu)
5507dd4f32aeSBjoern A. Zeeb return -ENOMEM;
5508dd4f32aeSBjoern A. Zeeb
5509dd4f32aeSBjoern A. Zeeb list_add_tail(&mon_mpdu->list, &dp->dp_full_mon_mpdu_list);
5510dd4f32aeSBjoern A. Zeeb mon_mpdu->head = head;
5511dd4f32aeSBjoern A. Zeeb mon_mpdu->tail = tail;
5512dd4f32aeSBjoern A. Zeeb
5513dd4f32aeSBjoern A. Zeeb return 0;
5514dd4f32aeSBjoern A. Zeeb }
5515dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_full_mon_drop_ppdu(struct ath11k_dp * dp,struct dp_full_mon_mpdu * mon_mpdu)5516dd4f32aeSBjoern A. Zeeb static void ath11k_dp_rx_full_mon_drop_ppdu(struct ath11k_dp *dp,
5517dd4f32aeSBjoern A. Zeeb struct dp_full_mon_mpdu *mon_mpdu)
5518dd4f32aeSBjoern A. Zeeb {
5519dd4f32aeSBjoern A. Zeeb struct dp_full_mon_mpdu *tmp;
5520dd4f32aeSBjoern A. Zeeb struct sk_buff *tmp_msdu, *skb_next;
5521dd4f32aeSBjoern A. Zeeb
5522dd4f32aeSBjoern A. Zeeb if (list_empty(&dp->dp_full_mon_mpdu_list))
5523dd4f32aeSBjoern A. Zeeb return;
5524dd4f32aeSBjoern A. Zeeb
5525dd4f32aeSBjoern A. Zeeb list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) {
5526dd4f32aeSBjoern A. Zeeb list_del(&mon_mpdu->list);
5527dd4f32aeSBjoern A. Zeeb
5528dd4f32aeSBjoern A. Zeeb tmp_msdu = mon_mpdu->head;
5529dd4f32aeSBjoern A. Zeeb while (tmp_msdu) {
5530dd4f32aeSBjoern A. Zeeb skb_next = tmp_msdu->next;
5531dd4f32aeSBjoern A. Zeeb dev_kfree_skb_any(tmp_msdu);
5532dd4f32aeSBjoern A. Zeeb tmp_msdu = skb_next;
5533dd4f32aeSBjoern A. Zeeb }
5534dd4f32aeSBjoern A. Zeeb
5535dd4f32aeSBjoern A. Zeeb kfree(mon_mpdu);
5536dd4f32aeSBjoern A. Zeeb }
5537dd4f32aeSBjoern A. Zeeb }
5538dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_full_mon_deliver_ppdu(struct ath11k * ar,int mac_id,struct ath11k_mon_data * pmon,struct napi_struct * napi)5539dd4f32aeSBjoern A. Zeeb static int ath11k_dp_rx_full_mon_deliver_ppdu(struct ath11k *ar,
5540dd4f32aeSBjoern A. Zeeb int mac_id,
5541dd4f32aeSBjoern A. Zeeb struct ath11k_mon_data *pmon,
5542dd4f32aeSBjoern A. Zeeb struct napi_struct *napi)
5543dd4f32aeSBjoern A. Zeeb {
5544dd4f32aeSBjoern A. Zeeb struct ath11k_pdev_mon_stats *rx_mon_stats;
5545dd4f32aeSBjoern A. Zeeb struct dp_full_mon_mpdu *tmp;
5546dd4f32aeSBjoern A. Zeeb struct dp_full_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
5547dd4f32aeSBjoern A. Zeeb struct sk_buff *head_msdu, *tail_msdu;
5548dd4f32aeSBjoern A. Zeeb struct ath11k_base *ab = ar->ab;
5549dd4f32aeSBjoern A. Zeeb struct ath11k_dp *dp = &ab->dp;
5550dd4f32aeSBjoern A. Zeeb int ret;
5551dd4f32aeSBjoern A. Zeeb
5552dd4f32aeSBjoern A. Zeeb rx_mon_stats = &pmon->rx_mon_stats;
5553dd4f32aeSBjoern A. Zeeb
5554dd4f32aeSBjoern A. Zeeb list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) {
5555dd4f32aeSBjoern A. Zeeb list_del(&mon_mpdu->list);
5556dd4f32aeSBjoern A. Zeeb head_msdu = mon_mpdu->head;
5557dd4f32aeSBjoern A. Zeeb tail_msdu = mon_mpdu->tail;
5558dd4f32aeSBjoern A. Zeeb if (head_msdu && tail_msdu) {
5559dd4f32aeSBjoern A. Zeeb ret = ath11k_dp_rx_mon_deliver(ar, mac_id, head_msdu,
5560*28348caeSBjoern A. Zeeb &pmon->mon_ppdu_info,
5561dd4f32aeSBjoern A. Zeeb tail_msdu, napi);
5562dd4f32aeSBjoern A. Zeeb rx_mon_stats->dest_mpdu_done++;
5563dd4f32aeSBjoern A. Zeeb ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "full mon: deliver ppdu\n");
5564dd4f32aeSBjoern A. Zeeb }
5565dd4f32aeSBjoern A. Zeeb kfree(mon_mpdu);
5566dd4f32aeSBjoern A. Zeeb }
5567dd4f32aeSBjoern A. Zeeb
5568dd4f32aeSBjoern A. Zeeb return ret;
5569dd4f32aeSBjoern A. Zeeb }
5570dd4f32aeSBjoern A. Zeeb
5571dd4f32aeSBjoern A. Zeeb static int
ath11k_dp_rx_process_full_mon_status_ring(struct ath11k_base * ab,int mac_id,struct napi_struct * napi,int budget)5572dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_process_full_mon_status_ring(struct ath11k_base *ab, int mac_id,
5573dd4f32aeSBjoern A. Zeeb struct napi_struct *napi, int budget)
5574dd4f32aeSBjoern A. Zeeb {
5575dd4f32aeSBjoern A. Zeeb struct ath11k *ar = ab->pdevs[mac_id].ar;
5576dd4f32aeSBjoern A. Zeeb struct ath11k_pdev_dp *dp = &ar->dp;
5577dd4f32aeSBjoern A. Zeeb struct ath11k_mon_data *pmon = &dp->mon_data;
5578dd4f32aeSBjoern A. Zeeb struct hal_sw_mon_ring_entries *sw_mon_entries;
5579dd4f32aeSBjoern A. Zeeb int quota = 0, work = 0, count;
5580dd4f32aeSBjoern A. Zeeb
5581dd4f32aeSBjoern A. Zeeb sw_mon_entries = &pmon->sw_mon_entries;
5582dd4f32aeSBjoern A. Zeeb
5583dd4f32aeSBjoern A. Zeeb while (pmon->hold_mon_dst_ring) {
5584dd4f32aeSBjoern A. Zeeb quota = ath11k_dp_rx_process_mon_status(ab, mac_id,
5585dd4f32aeSBjoern A. Zeeb napi, 1);
5586dd4f32aeSBjoern A. Zeeb if (pmon->buf_state == DP_MON_STATUS_MATCH) {
5587dd4f32aeSBjoern A. Zeeb count = sw_mon_entries->status_buf_count;
5588dd4f32aeSBjoern A. Zeeb if (count > 1) {
5589dd4f32aeSBjoern A. Zeeb quota += ath11k_dp_rx_process_mon_status(ab, mac_id,
5590dd4f32aeSBjoern A. Zeeb napi, count);
5591dd4f32aeSBjoern A. Zeeb }
5592dd4f32aeSBjoern A. Zeeb
5593dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_full_mon_deliver_ppdu(ar, dp->mac_id,
5594dd4f32aeSBjoern A. Zeeb pmon, napi);
5595dd4f32aeSBjoern A. Zeeb pmon->hold_mon_dst_ring = false;
5596dd4f32aeSBjoern A. Zeeb } else if (!pmon->mon_status_paddr ||
5597dd4f32aeSBjoern A. Zeeb pmon->buf_state == DP_MON_STATUS_LEAD) {
5598dd4f32aeSBjoern A. Zeeb sw_mon_entries->drop_ppdu = true;
5599dd4f32aeSBjoern A. Zeeb pmon->hold_mon_dst_ring = false;
5600dd4f32aeSBjoern A. Zeeb }
5601dd4f32aeSBjoern A. Zeeb
5602dd4f32aeSBjoern A. Zeeb if (!quota)
5603dd4f32aeSBjoern A. Zeeb break;
5604dd4f32aeSBjoern A. Zeeb
5605dd4f32aeSBjoern A. Zeeb work += quota;
5606dd4f32aeSBjoern A. Zeeb }
5607dd4f32aeSBjoern A. Zeeb
5608dd4f32aeSBjoern A. Zeeb if (sw_mon_entries->drop_ppdu)
5609dd4f32aeSBjoern A. Zeeb ath11k_dp_rx_full_mon_drop_ppdu(&ab->dp, pmon->mon_mpdu);
5610dd4f32aeSBjoern A. Zeeb
5611dd4f32aeSBjoern A. Zeeb return work;
5612dd4f32aeSBjoern A. Zeeb }
5613dd4f32aeSBjoern A. Zeeb
ath11k_dp_full_mon_process_rx(struct ath11k_base * ab,int mac_id,struct napi_struct * napi,int budget)5614dd4f32aeSBjoern A. Zeeb static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id,
5615dd4f32aeSBjoern A. Zeeb struct napi_struct *napi, int budget)
5616dd4f32aeSBjoern A. Zeeb {
5617dd4f32aeSBjoern A. Zeeb struct ath11k *ar = ab->pdevs[mac_id].ar;
5618dd4f32aeSBjoern A. Zeeb struct ath11k_pdev_dp *dp = &ar->dp;
5619dd4f32aeSBjoern A. Zeeb struct ath11k_mon_data *pmon = &dp->mon_data;
5620dd4f32aeSBjoern A. Zeeb struct hal_sw_mon_ring_entries *sw_mon_entries;
5621dd4f32aeSBjoern A. Zeeb struct ath11k_pdev_mon_stats *rx_mon_stats;
5622dd4f32aeSBjoern A. Zeeb struct sk_buff *head_msdu, *tail_msdu;
5623dd4f32aeSBjoern A. Zeeb void *mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
5624dd4f32aeSBjoern A. Zeeb void *ring_entry;
5625dd4f32aeSBjoern A. Zeeb u32 rx_bufs_used = 0, mpdu_rx_bufs_used;
5626dd4f32aeSBjoern A. Zeeb int quota = 0, ret;
5627dd4f32aeSBjoern A. Zeeb bool break_dst_ring = false;
5628dd4f32aeSBjoern A. Zeeb
5629dd4f32aeSBjoern A. Zeeb spin_lock_bh(&pmon->mon_lock);
5630dd4f32aeSBjoern A. Zeeb
5631dd4f32aeSBjoern A. Zeeb sw_mon_entries = &pmon->sw_mon_entries;
5632dd4f32aeSBjoern A. Zeeb rx_mon_stats = &pmon->rx_mon_stats;
5633dd4f32aeSBjoern A. Zeeb
5634dd4f32aeSBjoern A. Zeeb if (pmon->hold_mon_dst_ring) {
5635dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&pmon->mon_lock);
5636dd4f32aeSBjoern A. Zeeb goto reap_status_ring;
5637dd4f32aeSBjoern A. Zeeb }
5638dd4f32aeSBjoern A. Zeeb
5639dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
5640dd4f32aeSBjoern A. Zeeb while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
5641dd4f32aeSBjoern A. Zeeb head_msdu = NULL;
5642dd4f32aeSBjoern A. Zeeb tail_msdu = NULL;
5643dd4f32aeSBjoern A. Zeeb
5644dd4f32aeSBjoern A. Zeeb mpdu_rx_bufs_used = ath11k_dp_rx_full_mon_mpdu_pop(ar, ring_entry,
5645dd4f32aeSBjoern A. Zeeb &head_msdu,
5646dd4f32aeSBjoern A. Zeeb &tail_msdu,
5647dd4f32aeSBjoern A. Zeeb sw_mon_entries);
5648dd4f32aeSBjoern A. Zeeb rx_bufs_used += mpdu_rx_bufs_used;
5649dd4f32aeSBjoern A. Zeeb
5650dd4f32aeSBjoern A. Zeeb if (!sw_mon_entries->end_of_ppdu) {
5651dd4f32aeSBjoern A. Zeeb if (head_msdu) {
5652dd4f32aeSBjoern A. Zeeb ret = ath11k_dp_rx_full_mon_prepare_mpdu(&ab->dp,
5653dd4f32aeSBjoern A. Zeeb pmon->mon_mpdu,
5654dd4f32aeSBjoern A. Zeeb head_msdu,
5655dd4f32aeSBjoern A. Zeeb tail_msdu);
5656dd4f32aeSBjoern A. Zeeb if (ret)
5657dd4f32aeSBjoern A. Zeeb break_dst_ring = true;
5658dd4f32aeSBjoern A. Zeeb }
5659dd4f32aeSBjoern A. Zeeb
5660dd4f32aeSBjoern A. Zeeb goto next_entry;
5661dd4f32aeSBjoern A. Zeeb } else {
5662dd4f32aeSBjoern A. Zeeb if (!sw_mon_entries->ppdu_id &&
5663dd4f32aeSBjoern A. Zeeb !sw_mon_entries->mon_status_paddr) {
5664dd4f32aeSBjoern A. Zeeb break_dst_ring = true;
5665dd4f32aeSBjoern A. Zeeb goto next_entry;
5666dd4f32aeSBjoern A. Zeeb }
5667dd4f32aeSBjoern A. Zeeb }
5668dd4f32aeSBjoern A. Zeeb
5669dd4f32aeSBjoern A. Zeeb rx_mon_stats->dest_ppdu_done++;
5670dd4f32aeSBjoern A. Zeeb pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5671dd4f32aeSBjoern A. Zeeb pmon->buf_state = DP_MON_STATUS_LAG;
5672dd4f32aeSBjoern A. Zeeb pmon->mon_status_paddr = sw_mon_entries->mon_status_paddr;
5673dd4f32aeSBjoern A. Zeeb pmon->hold_mon_dst_ring = true;
5674dd4f32aeSBjoern A. Zeeb next_entry:
5675dd4f32aeSBjoern A. Zeeb ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
5676dd4f32aeSBjoern A. Zeeb mon_dst_srng);
5677dd4f32aeSBjoern A. Zeeb if (break_dst_ring)
5678dd4f32aeSBjoern A. Zeeb break;
5679dd4f32aeSBjoern A. Zeeb }
5680dd4f32aeSBjoern A. Zeeb
5681dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
5682dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&pmon->mon_lock);
5683dd4f32aeSBjoern A. Zeeb
5684dd4f32aeSBjoern A. Zeeb if (rx_bufs_used) {
5685dd4f32aeSBjoern A. Zeeb ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
5686dd4f32aeSBjoern A. Zeeb &dp->rxdma_mon_buf_ring,
5687dd4f32aeSBjoern A. Zeeb rx_bufs_used,
5688dd4f32aeSBjoern A. Zeeb HAL_RX_BUF_RBM_SW3_BM);
5689dd4f32aeSBjoern A. Zeeb }
5690dd4f32aeSBjoern A. Zeeb
5691dd4f32aeSBjoern A. Zeeb reap_status_ring:
5692dd4f32aeSBjoern A. Zeeb quota = ath11k_dp_rx_process_full_mon_status_ring(ab, mac_id,
5693dd4f32aeSBjoern A. Zeeb napi, budget);
5694dd4f32aeSBjoern A. Zeeb
5695dd4f32aeSBjoern A. Zeeb return quota;
5696dd4f32aeSBjoern A. Zeeb }
5697dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_process_mon_rings(struct ath11k_base * ab,int mac_id,struct napi_struct * napi,int budget)5698dd4f32aeSBjoern A. Zeeb int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
5699dd4f32aeSBjoern A. Zeeb struct napi_struct *napi, int budget)
5700dd4f32aeSBjoern A. Zeeb {
5701dd4f32aeSBjoern A. Zeeb struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
5702dd4f32aeSBjoern A. Zeeb int ret = 0;
5703dd4f32aeSBjoern A. Zeeb
5704dd4f32aeSBjoern A. Zeeb if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
5705dd4f32aeSBjoern A. Zeeb ab->hw_params.full_monitor_mode)
5706dd4f32aeSBjoern A. Zeeb ret = ath11k_dp_full_mon_process_rx(ab, mac_id, napi, budget);
5707dd4f32aeSBjoern A. Zeeb else
5708dd4f32aeSBjoern A. Zeeb ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget);
5709dd4f32aeSBjoern A. Zeeb
5710dd4f32aeSBjoern A. Zeeb return ret;
5711dd4f32aeSBjoern A. Zeeb }
5712dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_pdev_mon_status_attach(struct ath11k * ar)5713dd4f32aeSBjoern A. Zeeb static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar)
5714dd4f32aeSBjoern A. Zeeb {
5715dd4f32aeSBjoern A. Zeeb struct ath11k_pdev_dp *dp = &ar->dp;
5716dd4f32aeSBjoern A. Zeeb struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
5717dd4f32aeSBjoern A. Zeeb
5718dd4f32aeSBjoern A. Zeeb skb_queue_head_init(&pmon->rx_status_q);
5719dd4f32aeSBjoern A. Zeeb
5720dd4f32aeSBjoern A. Zeeb pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5721dd4f32aeSBjoern A. Zeeb
5722dd4f32aeSBjoern A. Zeeb memset(&pmon->rx_mon_stats, 0,
5723dd4f32aeSBjoern A. Zeeb sizeof(pmon->rx_mon_stats));
5724dd4f32aeSBjoern A. Zeeb return 0;
5725dd4f32aeSBjoern A. Zeeb }
5726dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_pdev_mon_attach(struct ath11k * ar)5727dd4f32aeSBjoern A. Zeeb int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar)
5728dd4f32aeSBjoern A. Zeeb {
5729dd4f32aeSBjoern A. Zeeb struct ath11k_pdev_dp *dp = &ar->dp;
5730dd4f32aeSBjoern A. Zeeb struct ath11k_mon_data *pmon = &dp->mon_data;
5731dd4f32aeSBjoern A. Zeeb struct hal_srng *mon_desc_srng = NULL;
5732dd4f32aeSBjoern A. Zeeb struct dp_srng *dp_srng;
5733dd4f32aeSBjoern A. Zeeb int ret = 0;
5734dd4f32aeSBjoern A. Zeeb u32 n_link_desc = 0;
5735dd4f32aeSBjoern A. Zeeb
5736dd4f32aeSBjoern A. Zeeb ret = ath11k_dp_rx_pdev_mon_status_attach(ar);
5737dd4f32aeSBjoern A. Zeeb if (ret) {
5738dd4f32aeSBjoern A. Zeeb ath11k_warn(ar->ab, "pdev_mon_status_attach() failed");
5739dd4f32aeSBjoern A. Zeeb return ret;
5740dd4f32aeSBjoern A. Zeeb }
5741dd4f32aeSBjoern A. Zeeb
5742dd4f32aeSBjoern A. Zeeb /* if rxdma1_enable is false, no need to setup
5743dd4f32aeSBjoern A. Zeeb * rxdma_mon_desc_ring.
5744dd4f32aeSBjoern A. Zeeb */
5745dd4f32aeSBjoern A. Zeeb if (!ar->ab->hw_params.rxdma1_enable)
5746dd4f32aeSBjoern A. Zeeb return 0;
5747dd4f32aeSBjoern A. Zeeb
5748dd4f32aeSBjoern A. Zeeb dp_srng = &dp->rxdma_mon_desc_ring;
5749dd4f32aeSBjoern A. Zeeb n_link_desc = dp_srng->size /
5750dd4f32aeSBjoern A. Zeeb ath11k_hal_srng_get_entrysize(ar->ab, HAL_RXDMA_MONITOR_DESC);
5751dd4f32aeSBjoern A. Zeeb mon_desc_srng =
5752dd4f32aeSBjoern A. Zeeb &ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id];
5753dd4f32aeSBjoern A. Zeeb
5754dd4f32aeSBjoern A. Zeeb ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks,
5755dd4f32aeSBjoern A. Zeeb HAL_RXDMA_MONITOR_DESC, mon_desc_srng,
5756dd4f32aeSBjoern A. Zeeb n_link_desc);
5757dd4f32aeSBjoern A. Zeeb if (ret) {
5758dd4f32aeSBjoern A. Zeeb ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed");
5759dd4f32aeSBjoern A. Zeeb return ret;
5760dd4f32aeSBjoern A. Zeeb }
5761dd4f32aeSBjoern A. Zeeb pmon->mon_last_linkdesc_paddr = 0;
5762dd4f32aeSBjoern A. Zeeb pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
5763dd4f32aeSBjoern A. Zeeb spin_lock_init(&pmon->mon_lock);
5764dd4f32aeSBjoern A. Zeeb
5765dd4f32aeSBjoern A. Zeeb return 0;
5766dd4f32aeSBjoern A. Zeeb }
5767dd4f32aeSBjoern A. Zeeb
ath11k_dp_mon_link_free(struct ath11k * ar)5768dd4f32aeSBjoern A. Zeeb static int ath11k_dp_mon_link_free(struct ath11k *ar)
5769dd4f32aeSBjoern A. Zeeb {
5770dd4f32aeSBjoern A. Zeeb struct ath11k_pdev_dp *dp = &ar->dp;
5771dd4f32aeSBjoern A. Zeeb struct ath11k_mon_data *pmon = &dp->mon_data;
5772dd4f32aeSBjoern A. Zeeb
5773dd4f32aeSBjoern A. Zeeb ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks,
5774dd4f32aeSBjoern A. Zeeb HAL_RXDMA_MONITOR_DESC,
5775dd4f32aeSBjoern A. Zeeb &dp->rxdma_mon_desc_ring);
5776dd4f32aeSBjoern A. Zeeb return 0;
5777dd4f32aeSBjoern A. Zeeb }
5778dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_pdev_mon_detach(struct ath11k * ar)5779dd4f32aeSBjoern A. Zeeb int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar)
5780dd4f32aeSBjoern A. Zeeb {
5781dd4f32aeSBjoern A. Zeeb ath11k_dp_mon_link_free(ar);
5782dd4f32aeSBjoern A. Zeeb return 0;
5783dd4f32aeSBjoern A. Zeeb }
5784dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_pktlog_start(struct ath11k_base * ab)5785dd4f32aeSBjoern A. Zeeb int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab)
5786dd4f32aeSBjoern A. Zeeb {
5787dd4f32aeSBjoern A. Zeeb /* start reap timer */
5788dd4f32aeSBjoern A. Zeeb mod_timer(&ab->mon_reap_timer,
5789dd4f32aeSBjoern A. Zeeb jiffies + msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
5790dd4f32aeSBjoern A. Zeeb
5791dd4f32aeSBjoern A. Zeeb return 0;
5792dd4f32aeSBjoern A. Zeeb }
5793dd4f32aeSBjoern A. Zeeb
ath11k_dp_rx_pktlog_stop(struct ath11k_base * ab,bool stop_timer)5794dd4f32aeSBjoern A. Zeeb int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer)
5795dd4f32aeSBjoern A. Zeeb {
5796dd4f32aeSBjoern A. Zeeb int ret;
5797dd4f32aeSBjoern A. Zeeb
5798dd4f32aeSBjoern A. Zeeb if (stop_timer)
5799dd4f32aeSBjoern A. Zeeb del_timer_sync(&ab->mon_reap_timer);
5800dd4f32aeSBjoern A. Zeeb
5801dd4f32aeSBjoern A. Zeeb /* reap all the monitor related rings */
5802dd4f32aeSBjoern A. Zeeb ret = ath11k_dp_purge_mon_ring(ab);
5803dd4f32aeSBjoern A. Zeeb if (ret) {
5804dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "failed to purge dp mon ring: %d\n", ret);
5805dd4f32aeSBjoern A. Zeeb return ret;
5806dd4f32aeSBjoern A. Zeeb }
5807dd4f32aeSBjoern A. Zeeb
5808dd4f32aeSBjoern A. Zeeb return 0;
5809dd4f32aeSBjoern A. Zeeb }
5810