xref: /linux/drivers/net/wireless/ath/ath12k/dp_mon.c (revision a34b0e4e21d6be3c3d620aa7f9dfbf0e9550c19e)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
5  */
6 
7 #include "dp_mon.h"
8 #include "debug.h"
9 #include "dp_tx.h"
10 #include "peer.h"
11 
12 static void
13 ath12k_dp_mon_fill_rx_stats_info(struct hal_rx_mon_ppdu_info *ppdu_info,
14 				 struct ieee80211_rx_status *rx_status)
15 {
16 	u32 center_freq = ppdu_info->freq;
17 
18 	rx_status->freq = center_freq;
19 	rx_status->bw = ath12k_mac_bw_to_mac80211_bw(ppdu_info->bw);
20 	rx_status->nss = ppdu_info->nss;
21 	rx_status->rate_idx = 0;
22 	rx_status->encoding = RX_ENC_LEGACY;
23 	rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
24 
25 	if (center_freq >= ATH12K_MIN_6GHZ_FREQ &&
26 	    center_freq <= ATH12K_MAX_6GHZ_FREQ) {
27 		rx_status->band = NL80211_BAND_6GHZ;
28 	} else if (center_freq >= ATH12K_MIN_2GHZ_FREQ &&
29 		   center_freq <= ATH12K_MAX_2GHZ_FREQ) {
30 		rx_status->band = NL80211_BAND_2GHZ;
31 	} else if (center_freq >= ATH12K_MIN_5GHZ_FREQ &&
32 		   center_freq <= ATH12K_MAX_5GHZ_FREQ) {
33 		rx_status->band = NL80211_BAND_5GHZ;
34 	} else {
35 		rx_status->band = NUM_NL80211_BANDS;
36 	}
37 }
38 
39 struct sk_buff
40 *ath12k_dp_rx_alloc_mon_status_buf(struct ath12k_base *ab,
41 				   struct dp_rxdma_mon_ring *rx_ring,
42 				   int *buf_id)
43 {
44 	struct sk_buff *skb;
45 	dma_addr_t paddr;
46 
47 	skb = dev_alloc_skb(RX_MON_STATUS_BUF_SIZE);
48 
49 	if (!skb)
50 		goto fail_alloc_skb;
51 
52 	if (!IS_ALIGNED((unsigned long)skb->data,
53 			RX_MON_STATUS_BUF_ALIGN)) {
54 		skb_pull(skb, PTR_ALIGN(skb->data, RX_MON_STATUS_BUF_ALIGN) -
55 			 skb->data);
56 	}
57 
58 	paddr = dma_map_single(ab->dev, skb->data,
59 			       skb->len + skb_tailroom(skb),
60 			       DMA_FROM_DEVICE);
61 	if (unlikely(dma_mapping_error(ab->dev, paddr)))
62 		goto fail_free_skb;
63 
64 	spin_lock_bh(&rx_ring->idr_lock);
65 	*buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
66 			    rx_ring->bufs_max, GFP_ATOMIC);
67 	spin_unlock_bh(&rx_ring->idr_lock);
68 	if (*buf_id < 0)
69 		goto fail_dma_unmap;
70 
71 	ATH12K_SKB_RXCB(skb)->paddr = paddr;
72 	return skb;
73 
74 fail_dma_unmap:
75 	dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
76 			 DMA_FROM_DEVICE);
77 fail_free_skb:
78 	dev_kfree_skb_any(skb);
79 fail_alloc_skb:
80 	return NULL;
81 }
82 EXPORT_SYMBOL(ath12k_dp_rx_alloc_mon_status_buf);
83 
84 u32 ath12k_dp_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id)
85 {
86 	u32 ret = 0;
87 
88 	if ((*ppdu_id < msdu_ppdu_id) &&
89 	    ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) {
90 		/* Hold on mon dest ring, and reap mon status ring. */
91 		*ppdu_id = msdu_ppdu_id;
92 		ret = msdu_ppdu_id;
93 	} else if ((*ppdu_id > msdu_ppdu_id) &&
94 		((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) {
95 		/* PPDU ID has exceeded the maximum value and will
96 		 * restart from 0.
97 		 */
98 		*ppdu_id = msdu_ppdu_id;
99 		ret = msdu_ppdu_id;
100 	}
101 	return ret;
102 }
103 EXPORT_SYMBOL(ath12k_dp_mon_comp_ppduid);
104 
105 static void
106 ath12k_dp_mon_fill_rx_rate(struct ath12k_pdev_dp *dp_pdev,
107 			   struct hal_rx_mon_ppdu_info *ppdu_info,
108 			   struct ieee80211_rx_status *rx_status)
109 {
110 	struct ath12k_dp *dp = dp_pdev->dp;
111 	struct ath12k_base *ab = dp->ab;
112 	struct ieee80211_supported_band *sband;
113 	enum rx_msdu_start_pkt_type pkt_type;
114 	u8 rate_mcs, nss, sgi;
115 	bool is_cck;
116 
117 	pkt_type = ppdu_info->preamble_type;
118 	rate_mcs = ppdu_info->rate;
119 	nss = ppdu_info->nss;
120 	sgi = ppdu_info->gi;
121 
122 	switch (pkt_type) {
123 	case RX_MSDU_START_PKT_TYPE_11A:
124 	case RX_MSDU_START_PKT_TYPE_11B:
125 		is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
126 		if (rx_status->band < NUM_NL80211_BANDS) {
127 			struct ath12k *ar = ath12k_pdev_dp_to_ar(dp_pdev);
128 
129 			sband = &ar->mac.sbands[rx_status->band];
130 			rx_status->rate_idx = ath12k_mac_hw_rate_to_idx(sband, rate_mcs,
131 									is_cck);
132 		}
133 		break;
134 	case RX_MSDU_START_PKT_TYPE_11N:
135 		rx_status->encoding = RX_ENC_HT;
136 		if (rate_mcs > ATH12K_HT_MCS_MAX) {
137 			ath12k_warn(ab,
138 				    "Received with invalid mcs in HT mode %d\n",
139 				     rate_mcs);
140 			break;
141 		}
142 		rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
143 		if (sgi)
144 			rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
145 		break;
146 	case RX_MSDU_START_PKT_TYPE_11AC:
147 		rx_status->encoding = RX_ENC_VHT;
148 		rx_status->rate_idx = rate_mcs;
149 		if (rate_mcs > ATH12K_VHT_MCS_MAX) {
150 			ath12k_warn(ab,
151 				    "Received with invalid mcs in VHT mode %d\n",
152 				     rate_mcs);
153 			break;
154 		}
155 		if (sgi)
156 			rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
157 		break;
158 	case RX_MSDU_START_PKT_TYPE_11AX:
159 		rx_status->rate_idx = rate_mcs;
160 		if (rate_mcs > ATH12K_HE_MCS_MAX) {
161 			ath12k_warn(ab,
162 				    "Received with invalid mcs in HE mode %d\n",
163 				    rate_mcs);
164 			break;
165 		}
166 		rx_status->encoding = RX_ENC_HE;
167 		rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
168 		break;
169 	case RX_MSDU_START_PKT_TYPE_11BE:
170 		rx_status->rate_idx = rate_mcs;
171 		if (rate_mcs > ATH12K_EHT_MCS_MAX) {
172 			ath12k_warn(ab,
173 				    "Received with invalid mcs in EHT mode %d\n",
174 				    rate_mcs);
175 			break;
176 		}
177 		rx_status->encoding = RX_ENC_EHT;
178 		rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
179 		break;
180 	default:
181 		ath12k_dbg(ab, ATH12K_DBG_DATA,
182 			   "monitor receives invalid preamble type %d",
183 			    pkt_type);
184 		break;
185 	}
186 }
187 
188 static void ath12k_dp_mon_rx_msdus_set_payload(struct ath12k_base *ab,
189 					       struct sk_buff *head_msdu,
190 					       struct sk_buff *tail_msdu)
191 {
192 	u32 rx_pkt_offset, l2_hdr_offset, total_offset;
193 
194 	rx_pkt_offset = ab->hal.hal_desc_sz;
195 	l2_hdr_offset =
196 		ath12k_dp_rx_h_l3pad(ab, (struct hal_rx_desc *)tail_msdu->data);
197 
198 	if (ab->hw_params->rxdma1_enable)
199 		total_offset = ATH12K_MON_RX_PKT_OFFSET;
200 	else
201 		total_offset = rx_pkt_offset + l2_hdr_offset;
202 
203 	skb_pull(head_msdu, total_offset);
204 }
205 
206 struct sk_buff *
207 ath12k_dp_mon_rx_merg_msdus(struct ath12k_pdev_dp *dp_pdev,
208 			    struct dp_mon_mpdu *mon_mpdu,
209 			    struct hal_rx_mon_ppdu_info *ppdu_info,
210 			    struct ieee80211_rx_status *rxs)
211 {
212 	struct ath12k_dp *dp = dp_pdev->dp;
213 	struct ath12k_base *ab = dp->ab;
214 	struct sk_buff *msdu, *mpdu_buf, *prev_buf, *head_frag_list;
215 	struct sk_buff *head_msdu, *tail_msdu;
216 	struct hal_rx_desc *rx_desc;
217 	u8 *hdr_desc, *dest, decap_format = mon_mpdu->decap_format;
218 	struct ieee80211_hdr_3addr *wh;
219 	struct ieee80211_channel *channel;
220 	u32 frag_list_sum_len = 0;
221 	u8 channel_num = ppdu_info->chan_num;
222 
223 	mpdu_buf = NULL;
224 	head_msdu = mon_mpdu->head;
225 	tail_msdu = mon_mpdu->tail;
226 
227 	if (!head_msdu || !tail_msdu)
228 		goto err_merge_fail;
229 
230 	ath12k_dp_mon_fill_rx_stats_info(ppdu_info, rxs);
231 
232 	if (unlikely(rxs->band == NUM_NL80211_BANDS ||
233 		     !ath12k_pdev_dp_to_hw(dp_pdev)->wiphy->bands[rxs->band])) {
234 		struct ath12k *ar = ath12k_pdev_dp_to_ar(dp_pdev);
235 
236 		ath12k_dbg(ab, ATH12K_DBG_DATA,
237 			   "sband is NULL for status band %d channel_num %d center_freq %d pdev_id %d\n",
238 			   rxs->band, channel_num, ppdu_info->freq, ar->pdev_idx);
239 
240 		spin_lock_bh(&ar->data_lock);
241 		channel = ar->rx_channel;
242 		if (channel) {
243 			rxs->band = channel->band;
244 			channel_num =
245 				ieee80211_frequency_to_channel(channel->center_freq);
246 		}
247 		spin_unlock_bh(&ar->data_lock);
248 	}
249 
250 	if (rxs->band < NUM_NL80211_BANDS)
251 		rxs->freq = ieee80211_channel_to_frequency(channel_num,
252 							   rxs->band);
253 
254 	ath12k_dp_mon_fill_rx_rate(dp_pdev, ppdu_info, rxs);
255 
256 	if (decap_format == DP_RX_DECAP_TYPE_RAW) {
257 		ath12k_dp_mon_rx_msdus_set_payload(ab, head_msdu, tail_msdu);
258 
259 		prev_buf = head_msdu;
260 		msdu = head_msdu->next;
261 		head_frag_list = NULL;
262 
263 		while (msdu) {
264 			ath12k_dp_mon_rx_msdus_set_payload(ab, head_msdu, tail_msdu);
265 
266 			if (!head_frag_list)
267 				head_frag_list = msdu;
268 
269 			frag_list_sum_len += msdu->len;
270 			prev_buf = msdu;
271 			msdu = msdu->next;
272 		}
273 
274 		prev_buf->next = NULL;
275 
276 		skb_trim(prev_buf, prev_buf->len);
277 		if (head_frag_list) {
278 			skb_shinfo(head_msdu)->frag_list = head_frag_list;
279 			head_msdu->data_len = frag_list_sum_len;
280 			head_msdu->len += head_msdu->data_len;
281 			head_msdu->next = NULL;
282 		}
283 	} else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {
284 		u8 qos_pkt = 0;
285 
286 		rx_desc = (struct hal_rx_desc *)head_msdu->data;
287 		hdr_desc =
288 			ab->hal.ops->rx_desc_get_msdu_payload(rx_desc);
289 
290 		/* Base size */
291 		wh = (struct ieee80211_hdr_3addr *)hdr_desc;
292 
293 		if (ieee80211_is_data_qos(wh->frame_control))
294 			qos_pkt = 1;
295 
296 		msdu = head_msdu;
297 
298 		while (msdu) {
299 			ath12k_dp_mon_rx_msdus_set_payload(ab, head_msdu, tail_msdu);
300 			if (qos_pkt) {
301 				dest = skb_push(msdu, sizeof(__le16));
302 				if (!dest)
303 					goto err_merge_fail;
304 				memcpy(dest, hdr_desc, sizeof(struct ieee80211_qos_hdr));
305 			}
306 			prev_buf = msdu;
307 			msdu = msdu->next;
308 		}
309 		dest = skb_put(prev_buf, HAL_RX_FCS_LEN);
310 		if (!dest)
311 			goto err_merge_fail;
312 
313 		ath12k_dbg(ab, ATH12K_DBG_DATA,
314 			   "mpdu_buf %p mpdu_buf->len %u",
315 			   prev_buf, prev_buf->len);
316 	} else {
317 		ath12k_dbg(ab, ATH12K_DBG_DATA,
318 			   "decap format %d is not supported!\n",
319 			   decap_format);
320 		goto err_merge_fail;
321 	}
322 
323 	return head_msdu;
324 
325 err_merge_fail:
326 	if (mpdu_buf && decap_format != DP_RX_DECAP_TYPE_RAW) {
327 		ath12k_dbg(ab, ATH12K_DBG_DATA,
328 			   "err_merge_fail mpdu_buf %p", mpdu_buf);
329 		/* Free the head buffer */
330 		dev_kfree_skb_any(mpdu_buf);
331 	}
332 	return NULL;
333 }
334 EXPORT_SYMBOL(ath12k_dp_mon_rx_merg_msdus);
335 
336 static void
337 ath12k_dp_mon_rx_update_radiotap_he(struct hal_rx_mon_ppdu_info *rx_status,
338 				    u8 *rtap_buf)
339 {
340 	u32 rtap_len = 0;
341 
342 	put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
343 	rtap_len += 2;
344 
345 	put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
346 	rtap_len += 2;
347 
348 	put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
349 	rtap_len += 2;
350 
351 	put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
352 	rtap_len += 2;
353 
354 	put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
355 	rtap_len += 2;
356 
357 	put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
358 }
359 
360 static void
361 ath12k_dp_mon_rx_update_radiotap_he_mu(struct hal_rx_mon_ppdu_info *rx_status,
362 				       u8 *rtap_buf)
363 {
364 	u32 rtap_len = 0;
365 
366 	put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
367 	rtap_len += 2;
368 
369 	put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
370 	rtap_len += 2;
371 
372 	rtap_buf[rtap_len] = rx_status->he_RU[0];
373 	rtap_len += 1;
374 
375 	rtap_buf[rtap_len] = rx_status->he_RU[1];
376 	rtap_len += 1;
377 
378 	rtap_buf[rtap_len] = rx_status->he_RU[2];
379 	rtap_len += 1;
380 
381 	rtap_buf[rtap_len] = rx_status->he_RU[3];
382 }
383 
384 void ath12k_dp_mon_update_radiotap(struct ath12k_pdev_dp *dp_pdev,
385 				   struct hal_rx_mon_ppdu_info *ppduinfo,
386 				   struct sk_buff *mon_skb,
387 				   struct ieee80211_rx_status *rxs)
388 {
389 	struct ath12k *ar = ath12k_pdev_dp_to_ar(dp_pdev);
390 	struct ieee80211_supported_band *sband;
391 	s32 noise_floor;
392 	u8 *ptr = NULL;
393 
394 	spin_lock_bh(&ar->data_lock);
395 	noise_floor = ath12k_pdev_get_noise_floor(ar);
396 	spin_unlock_bh(&ar->data_lock);
397 
398 	rxs->flag |= RX_FLAG_MACTIME_START;
399 	rxs->nss = ppduinfo->nss;
400 	if (test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
401 		     ar->ab->wmi_ab.svc_map))
402 		rxs->signal = ppduinfo->rssi_comb;
403 	else
404 		rxs->signal = ppduinfo->rssi_comb + noise_floor;
405 
406 	if (ppduinfo->userstats[ppduinfo->userid].ampdu_present) {
407 		rxs->flag |= RX_FLAG_AMPDU_DETAILS;
408 		rxs->ampdu_reference = ppduinfo->userstats[ppduinfo->userid].ampdu_id;
409 	}
410 
411 	if (ppduinfo->is_eht || ppduinfo->eht_usig) {
412 		struct ieee80211_radiotap_tlv *tlv;
413 		struct ieee80211_radiotap_eht *eht;
414 		struct ieee80211_radiotap_eht_usig *usig;
415 		u16 len = 0, i, eht_len, usig_len;
416 		u8 user;
417 
418 		if (ppduinfo->is_eht) {
419 			eht_len = struct_size(eht,
420 					      user_info,
421 					      ppduinfo->eht_info.num_user_info);
422 			len += sizeof(*tlv) + eht_len;
423 		}
424 
425 		if (ppduinfo->eht_usig) {
426 			usig_len = sizeof(*usig);
427 			len += sizeof(*tlv) + usig_len;
428 		}
429 
430 		rxs->flag |= RX_FLAG_RADIOTAP_TLV_AT_END;
431 		rxs->encoding = RX_ENC_EHT;
432 
433 		skb_reset_mac_header(mon_skb);
434 
435 		tlv = skb_push(mon_skb, len);
436 
437 		if (ppduinfo->is_eht) {
438 			tlv->type = cpu_to_le16(IEEE80211_RADIOTAP_EHT);
439 			tlv->len = cpu_to_le16(eht_len);
440 
441 			eht = (struct ieee80211_radiotap_eht *)tlv->data;
442 			eht->known = ppduinfo->eht_info.eht.known;
443 
444 			for (i = 0;
445 			     i < ARRAY_SIZE(eht->data) &&
446 			     i < ARRAY_SIZE(ppduinfo->eht_info.eht.data);
447 			     i++)
448 				eht->data[i] = ppduinfo->eht_info.eht.data[i];
449 
450 			for (user = 0; user < ppduinfo->eht_info.num_user_info; user++)
451 				put_unaligned_le32(ppduinfo->eht_info.user_info[user],
452 						   &eht->user_info[user]);
453 
454 			tlv = (struct ieee80211_radiotap_tlv *)&tlv->data[eht_len];
455 		}
456 
457 		if (ppduinfo->eht_usig) {
458 			tlv->type = cpu_to_le16(IEEE80211_RADIOTAP_EHT_USIG);
459 			tlv->len = cpu_to_le16(usig_len);
460 
461 			usig = (struct ieee80211_radiotap_eht_usig *)tlv->data;
462 			*usig = ppduinfo->u_sig_info.usig;
463 		}
464 	} else if (ppduinfo->he_mu_flags) {
465 		rxs->flag |= RX_FLAG_RADIOTAP_HE_MU;
466 		rxs->encoding = RX_ENC_HE;
467 		ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu));
468 		ath12k_dp_mon_rx_update_radiotap_he_mu(ppduinfo, ptr);
469 	} else if (ppduinfo->he_flags) {
470 		rxs->flag |= RX_FLAG_RADIOTAP_HE;
471 		rxs->encoding = RX_ENC_HE;
472 		ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he));
473 		ath12k_dp_mon_rx_update_radiotap_he(ppduinfo, ptr);
474 		rxs->rate_idx = ppduinfo->rate;
475 	} else if (ppduinfo->vht_flags) {
476 		rxs->encoding = RX_ENC_VHT;
477 		rxs->rate_idx = ppduinfo->rate;
478 	} else if (ppduinfo->ht_flags) {
479 		rxs->encoding = RX_ENC_HT;
480 		rxs->rate_idx = ppduinfo->rate;
481 	} else {
482 		struct ath12k *ar;
483 
484 		ar = ath12k_pdev_dp_to_ar(dp_pdev);
485 		rxs->encoding = RX_ENC_LEGACY;
486 		sband = &ar->mac.sbands[rxs->band];
487 		rxs->rate_idx = ath12k_mac_hw_rate_to_idx(sband, ppduinfo->rate,
488 							  ppduinfo->cck_flag);
489 	}
490 
491 	rxs->mactime = ppduinfo->tsft;
492 }
493 EXPORT_SYMBOL(ath12k_dp_mon_update_radiotap);
494 
495 void ath12k_dp_mon_rx_deliver_msdu(struct ath12k_pdev_dp *dp_pdev,
496 				   struct napi_struct *napi,
497 				   struct sk_buff *msdu,
498 				   const struct hal_rx_mon_ppdu_info *ppduinfo,
499 				   struct ieee80211_rx_status *status,
500 				   u8 decap)
501 {
502 	struct ath12k_dp *dp = dp_pdev->dp;
503 	struct ath12k_base *ab = dp->ab;
504 	static const struct ieee80211_radiotap_he known = {
505 		.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
506 				     IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
507 		.data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
508 	};
509 	struct ieee80211_rx_status *rx_status;
510 	struct ieee80211_radiotap_he *he = NULL;
511 	struct ieee80211_sta *pubsta = NULL;
512 	struct ath12k_dp_link_peer *peer;
513 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
514 	struct hal_rx_desc_data rx_info;
515 	bool is_mcbc = rxcb->is_mcbc;
516 	bool is_eapol_tkip = rxcb->is_eapol;
517 	struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
518 	u8 addr[ETH_ALEN] = {};
519 
520 	status->link_valid = 0;
521 
522 	if ((status->encoding == RX_ENC_HE) && !(status->flag & RX_FLAG_RADIOTAP_HE) &&
523 	    !(status->flag & RX_FLAG_SKIP_MONITOR)) {
524 		he = skb_push(msdu, sizeof(known));
525 		memcpy(he, &known, sizeof(known));
526 		status->flag |= RX_FLAG_RADIOTAP_HE;
527 	}
528 
529 	ath12k_dp_extract_rx_desc_data(dp->hal, &rx_info, rx_desc, rx_desc);
530 
531 	rcu_read_lock();
532 	spin_lock_bh(&dp->dp_lock);
533 	peer = ath12k_dp_rx_h_find_link_peer(dp_pdev, msdu, &rx_info);
534 	if (peer && peer->sta) {
535 		pubsta = peer->sta;
536 		memcpy(addr, peer->addr, ETH_ALEN);
537 		if (pubsta->valid_links) {
538 			status->link_valid = 1;
539 			status->link_id = peer->link_id;
540 		}
541 	}
542 
543 	spin_unlock_bh(&dp->dp_lock);
544 	rcu_read_unlock();
545 
546 	ath12k_dbg(ab, ATH12K_DBG_DATA,
547 		   "rx skb %p len %u peer %pM %u %s %s%s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
548 		   msdu,
549 		   msdu->len,
550 		   addr,
551 		   rxcb->tid,
552 		   (is_mcbc) ? "mcast" : "ucast",
553 		   (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
554 		   (status->encoding == RX_ENC_HT) ? "ht" : "",
555 		   (status->encoding == RX_ENC_VHT) ? "vht" : "",
556 		   (status->encoding == RX_ENC_HE) ? "he" : "",
557 		   (status->bw == RATE_INFO_BW_40) ? "40" : "",
558 		   (status->bw == RATE_INFO_BW_80) ? "80" : "",
559 		   (status->bw == RATE_INFO_BW_160) ? "160" : "",
560 		   (status->bw == RATE_INFO_BW_320) ? "320" : "",
561 		   status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
562 		   status->rate_idx,
563 		   status->nss,
564 		   status->freq,
565 		   status->band, status->flag,
566 		   !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
567 		   !!(status->flag & RX_FLAG_MMIC_ERROR),
568 		   !!(status->flag & RX_FLAG_AMSDU_MORE));
569 
570 	ath12k_dbg_dump(ab, ATH12K_DBG_DP_RX, NULL, "dp rx msdu: ",
571 			msdu->data, msdu->len);
572 	rx_status = IEEE80211_SKB_RXCB(msdu);
573 	*rx_status = *status;
574 
575 	/* TODO: trace rx packet */
576 
577 	/* PN for multicast packets are not validate in HW,
578 	 * so skip 802.3 rx path
579 	 * Also, fast_rx expects the STA to be authorized, hence
580 	 * eapol packets are sent in slow path.
581 	 */
582 	if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol_tkip &&
583 	    !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
584 		rx_status->flag |= RX_FLAG_8023;
585 
586 	ieee80211_rx_napi(ath12k_pdev_dp_to_hw(dp_pdev), pubsta, msdu, napi);
587 }
588 EXPORT_SYMBOL(ath12k_dp_mon_rx_deliver_msdu);
589 
590 int ath12k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len)
591 {
592 	if (skb->len > len) {
593 		skb_trim(skb, len);
594 	} else {
595 		if (skb_tailroom(skb) < len - skb->len) {
596 			if ((pskb_expand_head(skb, 0,
597 					      len - skb->len - skb_tailroom(skb),
598 					      GFP_ATOMIC))) {
599 				return -ENOMEM;
600 			}
601 		}
602 		skb_put(skb, (len - skb->len));
603 	}
604 
605 	return 0;
606 }
607 EXPORT_SYMBOL(ath12k_dp_pkt_set_pktlen);
608 
609 int
610 ath12k_dp_mon_parse_status_buf(struct ath12k_pdev_dp *dp_pdev,
611 			       struct ath12k_mon_data *pmon,
612 			       const struct dp_mon_packet_info *packet_info)
613 {
614 	struct ath12k_dp *dp = dp_pdev->dp;
615 	struct ath12k_base *ab = dp->ab;
616 	struct dp_rxdma_mon_ring *buf_ring = &dp->rxdma_mon_buf_ring;
617 	struct sk_buff *msdu;
618 	int buf_id;
619 	u32 offset;
620 
621 	buf_id = u32_get_bits(packet_info->cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
622 
623 	spin_lock_bh(&buf_ring->idr_lock);
624 	msdu = idr_remove(&buf_ring->bufs_idr, buf_id);
625 	spin_unlock_bh(&buf_ring->idr_lock);
626 
627 	if (unlikely(!msdu)) {
628 		ath12k_warn(ab, "mon dest desc with inval buf_id %d\n", buf_id);
629 		return 0;
630 	}
631 
632 	dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(msdu)->paddr,
633 			 msdu->len + skb_tailroom(msdu),
634 			 DMA_FROM_DEVICE);
635 
636 	offset = packet_info->dma_length + ATH12K_MON_RX_DOT11_OFFSET;
637 	if (ath12k_dp_pkt_set_pktlen(msdu, offset)) {
638 		dev_kfree_skb_any(msdu);
639 		goto dest_replenish;
640 	}
641 
642 	if (!pmon->mon_mpdu->head)
643 		pmon->mon_mpdu->head = msdu;
644 	else
645 		pmon->mon_mpdu->tail->next = msdu;
646 
647 	pmon->mon_mpdu->tail = msdu;
648 
649 dest_replenish:
650 	ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
651 
652 	return 0;
653 }
654 EXPORT_SYMBOL(ath12k_dp_mon_parse_status_buf);
655 
656 int ath12k_dp_mon_buf_replenish(struct ath12k_base *ab,
657 				struct dp_rxdma_mon_ring *buf_ring,
658 				int req_entries)
659 {
660 	struct hal_mon_buf_ring *mon_buf;
661 	struct sk_buff *skb;
662 	struct hal_srng *srng;
663 	dma_addr_t paddr;
664 	u32 cookie;
665 	int buf_id;
666 
667 	srng = &ab->hal.srng_list[buf_ring->refill_buf_ring.ring_id];
668 	spin_lock_bh(&srng->lock);
669 	ath12k_hal_srng_access_begin(ab, srng);
670 
671 	while (req_entries > 0) {
672 		skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + DP_RX_BUFFER_ALIGN_SIZE);
673 		if (unlikely(!skb))
674 			goto fail_alloc_skb;
675 
676 		if (!IS_ALIGNED((unsigned long)skb->data, DP_RX_BUFFER_ALIGN_SIZE)) {
677 			skb_pull(skb,
678 				 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
679 				 skb->data);
680 		}
681 
682 		paddr = dma_map_single(ab->dev, skb->data,
683 				       skb->len + skb_tailroom(skb),
684 				       DMA_FROM_DEVICE);
685 
686 		if (unlikely(dma_mapping_error(ab->dev, paddr)))
687 			goto fail_free_skb;
688 
689 		spin_lock_bh(&buf_ring->idr_lock);
690 		buf_id = idr_alloc(&buf_ring->bufs_idr, skb, 0,
691 				   buf_ring->bufs_max * 3, GFP_ATOMIC);
692 		spin_unlock_bh(&buf_ring->idr_lock);
693 
694 		if (unlikely(buf_id < 0))
695 			goto fail_dma_unmap;
696 
697 		mon_buf = ath12k_hal_srng_src_get_next_entry(ab, srng);
698 		if (unlikely(!mon_buf))
699 			goto fail_idr_remove;
700 
701 		ATH12K_SKB_RXCB(skb)->paddr = paddr;
702 
703 		cookie = u32_encode_bits(buf_id, DP_RXDMA_BUF_COOKIE_BUF_ID);
704 
705 		mon_buf->paddr_lo = cpu_to_le32(lower_32_bits(paddr));
706 		mon_buf->paddr_hi = cpu_to_le32(upper_32_bits(paddr));
707 		mon_buf->cookie = cpu_to_le64(cookie);
708 
709 		req_entries--;
710 	}
711 
712 	ath12k_hal_srng_access_end(ab, srng);
713 	spin_unlock_bh(&srng->lock);
714 	return 0;
715 
716 fail_idr_remove:
717 	spin_lock_bh(&buf_ring->idr_lock);
718 	idr_remove(&buf_ring->bufs_idr, buf_id);
719 	spin_unlock_bh(&buf_ring->idr_lock);
720 fail_dma_unmap:
721 	dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
722 			 DMA_FROM_DEVICE);
723 fail_free_skb:
724 	dev_kfree_skb_any(skb);
725 fail_alloc_skb:
726 	ath12k_hal_srng_access_end(ab, srng);
727 	spin_unlock_bh(&srng->lock);
728 	return -ENOMEM;
729 }
730 EXPORT_SYMBOL(ath12k_dp_mon_buf_replenish);
731 
732 int ath12k_dp_mon_status_bufs_replenish(struct ath12k_base *ab,
733 					struct dp_rxdma_mon_ring *rx_ring,
734 					int req_entries)
735 {
736 	enum hal_rx_buf_return_buf_manager mgr =
737 		ab->hal.hal_params->rx_buf_rbm;
738 	int num_free, num_remain, buf_id;
739 	struct ath12k_buffer_addr *desc;
740 	struct hal_srng *srng;
741 	struct sk_buff *skb;
742 	dma_addr_t paddr;
743 	u32 cookie;
744 
745 	req_entries = min(req_entries, rx_ring->bufs_max);
746 
747 	srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
748 
749 	spin_lock_bh(&srng->lock);
750 
751 	ath12k_hal_srng_access_begin(ab, srng);
752 
753 	num_free = ath12k_hal_srng_src_num_free(ab, srng, true);
754 	if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
755 		req_entries = num_free;
756 
757 	req_entries = min(num_free, req_entries);
758 	num_remain = req_entries;
759 
760 	while (num_remain > 0) {
761 		skb = dev_alloc_skb(RX_MON_STATUS_BUF_SIZE);
762 		if (!skb)
763 			break;
764 
765 		if (!IS_ALIGNED((unsigned long)skb->data,
766 				RX_MON_STATUS_BUF_ALIGN)) {
767 			skb_pull(skb,
768 				 PTR_ALIGN(skb->data, RX_MON_STATUS_BUF_ALIGN) -
769 				 skb->data);
770 		}
771 
772 		paddr = dma_map_single(ab->dev, skb->data,
773 				       skb->len + skb_tailroom(skb),
774 				       DMA_FROM_DEVICE);
775 		if (dma_mapping_error(ab->dev, paddr))
776 			goto fail_free_skb;
777 
778 		spin_lock_bh(&rx_ring->idr_lock);
779 		buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
780 				   rx_ring->bufs_max * 3, GFP_ATOMIC);
781 		spin_unlock_bh(&rx_ring->idr_lock);
782 		if (buf_id < 0)
783 			goto fail_dma_unmap;
784 		cookie = u32_encode_bits(buf_id, DP_RXDMA_BUF_COOKIE_BUF_ID);
785 
786 		desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
787 		if (!desc)
788 			goto fail_buf_unassign;
789 
790 		ATH12K_SKB_RXCB(skb)->paddr = paddr;
791 
792 		num_remain--;
793 
794 		ath12k_hal_rx_buf_addr_info_set(&ab->hal, desc, paddr, cookie, mgr);
795 	}
796 
797 	ath12k_hal_srng_access_end(ab, srng);
798 
799 	spin_unlock_bh(&srng->lock);
800 
801 	return req_entries - num_remain;
802 
803 fail_buf_unassign:
804 	spin_lock_bh(&rx_ring->idr_lock);
805 	idr_remove(&rx_ring->bufs_idr, buf_id);
806 	spin_unlock_bh(&rx_ring->idr_lock);
807 fail_dma_unmap:
808 	dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
809 			 DMA_FROM_DEVICE);
810 fail_free_skb:
811 	dev_kfree_skb_any(skb);
812 
813 	ath12k_hal_srng_access_end(ab, srng);
814 
815 	spin_unlock_bh(&srng->lock);
816 
817 	return req_entries - num_remain;
818 }
819 
820 static void
821 ath12k_dp_mon_rx_update_peer_rate_table_stats(struct ath12k_rx_peer_stats *rx_stats,
822 					      struct hal_rx_mon_ppdu_info *ppdu_info,
823 					      struct hal_rx_user_status *user_stats,
824 					      u32 num_msdu)
825 {
826 	struct ath12k_rx_peer_rate_stats *stats;
827 	u32 mcs_idx = (user_stats) ? user_stats->mcs : ppdu_info->mcs;
828 	u32 nss_idx = (user_stats) ? user_stats->nss - 1 : ppdu_info->nss - 1;
829 	u32 bw_idx = ppdu_info->bw;
830 	u32 gi_idx = ppdu_info->gi;
831 	u32 len;
832 
833 	if (!rx_stats)
834 		return;
835 
836 	if (mcs_idx > HAL_RX_MAX_MCS_HT || nss_idx >= HAL_RX_MAX_NSS ||
837 	    bw_idx >= HAL_RX_BW_MAX || gi_idx >= HAL_RX_GI_MAX) {
838 		return;
839 	}
840 
841 	if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AX ||
842 	    ppdu_info->preamble_type == HAL_RX_PREAMBLE_11BE)
843 		gi_idx = ath12k_he_gi_to_nl80211_he_gi(ppdu_info->gi);
844 
845 	rx_stats->pkt_stats.rx_rate[bw_idx][gi_idx][nss_idx][mcs_idx] += num_msdu;
846 	stats = &rx_stats->byte_stats;
847 
848 	if (user_stats)
849 		len = user_stats->mpdu_ok_byte_count;
850 	else
851 		len = ppdu_info->mpdu_len;
852 
853 	stats->rx_rate[bw_idx][gi_idx][nss_idx][mcs_idx] += len;
854 }
855 
856 void ath12k_dp_mon_rx_update_peer_su_stats(struct ath12k_dp_link_peer *peer,
857 					   struct hal_rx_mon_ppdu_info *ppdu_info)
858 {
859 	struct ath12k_rx_peer_stats *rx_stats = peer->peer_stats.rx_stats;
860 	u32 num_msdu;
861 
862 	peer->rssi_comb = ppdu_info->rssi_comb;
863 	ewma_avg_rssi_add(&peer->avg_rssi, ppdu_info->rssi_comb);
864 	if (!rx_stats)
865 		return;
866 
867 	num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
868 		   ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
869 
870 	rx_stats->num_msdu += num_msdu;
871 	rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count +
872 				    ppdu_info->tcp_ack_msdu_count;
873 	rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count;
874 	rx_stats->other_msdu_count += ppdu_info->other_msdu_count;
875 
876 	if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A ||
877 	    ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) {
878 		ppdu_info->nss = 1;
879 		ppdu_info->mcs = HAL_RX_MAX_MCS;
880 		ppdu_info->tid = IEEE80211_NUM_TIDS;
881 	}
882 
883 	if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX)
884 		rx_stats->coding_count[ppdu_info->ldpc] += num_msdu;
885 
886 	if (ppdu_info->tid <= IEEE80211_NUM_TIDS)
887 		rx_stats->tid_count[ppdu_info->tid] += num_msdu;
888 
889 	if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX)
890 		rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu;
891 
892 	if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX)
893 		rx_stats->reception_type[ppdu_info->reception_type] += num_msdu;
894 
895 	if (ppdu_info->is_stbc)
896 		rx_stats->stbc_count += num_msdu;
897 
898 	if (ppdu_info->beamformed)
899 		rx_stats->beamformed_count += num_msdu;
900 
901 	if (ppdu_info->num_mpdu_fcs_ok > 1)
902 		rx_stats->ampdu_msdu_count += num_msdu;
903 	else
904 		rx_stats->non_ampdu_msdu_count += num_msdu;
905 
906 	rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok;
907 	rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err;
908 	rx_stats->dcm_count += ppdu_info->dcm;
909 
910 	rx_stats->rx_duration += ppdu_info->rx_duration;
911 	peer->rx_duration = rx_stats->rx_duration;
912 
913 	if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS) {
914 		rx_stats->pkt_stats.nss_count[ppdu_info->nss - 1] += num_msdu;
915 		rx_stats->byte_stats.nss_count[ppdu_info->nss - 1] += ppdu_info->mpdu_len;
916 	}
917 
918 	if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11N &&
919 	    ppdu_info->mcs <= HAL_RX_MAX_MCS_HT) {
920 		rx_stats->pkt_stats.ht_mcs_count[ppdu_info->mcs] += num_msdu;
921 		rx_stats->byte_stats.ht_mcs_count[ppdu_info->mcs] += ppdu_info->mpdu_len;
922 		/* To fit into rate table for HT packets */
923 		ppdu_info->mcs = ppdu_info->mcs % 8;
924 	}
925 
926 	if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AC &&
927 	    ppdu_info->mcs <= HAL_RX_MAX_MCS_VHT) {
928 		rx_stats->pkt_stats.vht_mcs_count[ppdu_info->mcs] += num_msdu;
929 		rx_stats->byte_stats.vht_mcs_count[ppdu_info->mcs] += ppdu_info->mpdu_len;
930 	}
931 
932 	if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AX &&
933 	    ppdu_info->mcs <= HAL_RX_MAX_MCS_HE) {
934 		rx_stats->pkt_stats.he_mcs_count[ppdu_info->mcs] += num_msdu;
935 		rx_stats->byte_stats.he_mcs_count[ppdu_info->mcs] += ppdu_info->mpdu_len;
936 	}
937 
938 	if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11BE &&
939 	    ppdu_info->mcs <= HAL_RX_MAX_MCS_BE) {
940 		rx_stats->pkt_stats.be_mcs_count[ppdu_info->mcs] += num_msdu;
941 		rx_stats->byte_stats.be_mcs_count[ppdu_info->mcs] += ppdu_info->mpdu_len;
942 	}
943 
944 	if ((ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A ||
945 	     ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) &&
946 	     ppdu_info->rate < HAL_RX_LEGACY_RATE_INVALID) {
947 		rx_stats->pkt_stats.legacy_count[ppdu_info->rate] += num_msdu;
948 		rx_stats->byte_stats.legacy_count[ppdu_info->rate] += ppdu_info->mpdu_len;
949 	}
950 
951 	if (ppdu_info->gi < HAL_RX_GI_MAX) {
952 		rx_stats->pkt_stats.gi_count[ppdu_info->gi] += num_msdu;
953 		rx_stats->byte_stats.gi_count[ppdu_info->gi] += ppdu_info->mpdu_len;
954 	}
955 
956 	if (ppdu_info->bw < HAL_RX_BW_MAX) {
957 		rx_stats->pkt_stats.bw_count[ppdu_info->bw] += num_msdu;
958 		rx_stats->byte_stats.bw_count[ppdu_info->bw] += ppdu_info->mpdu_len;
959 	}
960 
961 	ath12k_dp_mon_rx_update_peer_rate_table_stats(rx_stats, ppdu_info,
962 						      NULL, num_msdu);
963 }
964 EXPORT_SYMBOL(ath12k_dp_mon_rx_update_peer_su_stats);
965 
966 void ath12k_dp_mon_rx_process_ulofdma(struct hal_rx_mon_ppdu_info *ppdu_info)
967 {
968 	struct hal_rx_user_status *rx_user_status;
969 	u32 num_users, i, mu_ul_user_v0_word0, mu_ul_user_v0_word1, ru_size;
970 
971 	if (!(ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_MIMO ||
972 	      ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA ||
973 	      ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO))
974 		return;
975 
976 	num_users = ppdu_info->num_users;
977 	if (num_users > HAL_MAX_UL_MU_USERS)
978 		num_users = HAL_MAX_UL_MU_USERS;
979 
980 	for (i = 0; i < num_users; i++) {
981 		rx_user_status = &ppdu_info->userstats[i];
982 		mu_ul_user_v0_word0 =
983 			rx_user_status->ul_ofdma_user_v0_word0;
984 		mu_ul_user_v0_word1 =
985 			rx_user_status->ul_ofdma_user_v0_word1;
986 
987 		if (u32_get_bits(mu_ul_user_v0_word0,
988 				 HAL_RX_UL_OFDMA_USER_INFO_V0_W0_VALID) &&
989 		    !u32_get_bits(mu_ul_user_v0_word0,
990 				  HAL_RX_UL_OFDMA_USER_INFO_V0_W0_VER)) {
991 			rx_user_status->mcs =
992 				u32_get_bits(mu_ul_user_v0_word1,
993 					     HAL_RX_UL_OFDMA_USER_INFO_V0_W1_MCS);
994 			rx_user_status->nss =
995 				u32_get_bits(mu_ul_user_v0_word1,
996 					     HAL_RX_UL_OFDMA_USER_INFO_V0_W1_NSS) + 1;
997 
998 			rx_user_status->ofdma_info_valid = 1;
999 			rx_user_status->ul_ofdma_ru_start_index =
1000 				u32_get_bits(mu_ul_user_v0_word1,
1001 					     HAL_RX_UL_OFDMA_USER_INFO_V0_W1_RU_START);
1002 
1003 			ru_size = u32_get_bits(mu_ul_user_v0_word1,
1004 					       HAL_RX_UL_OFDMA_USER_INFO_V0_W1_RU_SIZE);
1005 			rx_user_status->ul_ofdma_ru_width = ru_size;
1006 			rx_user_status->ul_ofdma_ru_size = ru_size;
1007 		}
1008 		rx_user_status->ldpc = u32_get_bits(mu_ul_user_v0_word1,
1009 						    HAL_RX_UL_OFDMA_USER_INFO_V0_W1_LDPC);
1010 	}
1011 	ppdu_info->ldpc = 1;
1012 }
1013 EXPORT_SYMBOL(ath12k_dp_mon_rx_process_ulofdma);
1014 
1015 static void
1016 ath12k_dp_mon_rx_update_user_stats(struct ath12k_base *ab,
1017 				   struct hal_rx_mon_ppdu_info *ppdu_info,
1018 				   u32 uid)
1019 {
1020 	struct ath12k_rx_peer_stats *rx_stats = NULL;
1021 	struct hal_rx_user_status *user_stats = &ppdu_info->userstats[uid];
1022 	struct ath12k_dp_link_peer *peer;
1023 	u32 num_msdu;
1024 	struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
1025 
1026 	if (user_stats->ast_index == 0 || user_stats->ast_index == 0xFFFF)
1027 		return;
1028 
1029 	peer = ath12k_dp_link_peer_find_by_ast(dp, user_stats->ast_index);
1030 
1031 	if (!peer) {
1032 		ath12k_warn(ab, "peer ast idx %d can't be found\n",
1033 			    user_stats->ast_index);
1034 		return;
1035 	}
1036 
1037 	peer->rssi_comb = ppdu_info->rssi_comb;
1038 	ewma_avg_rssi_add(&peer->avg_rssi, ppdu_info->rssi_comb);
1039 	rx_stats = peer->peer_stats.rx_stats;
1040 	if (!rx_stats)
1041 		return;
1042 
1043 	num_msdu = user_stats->tcp_msdu_count + user_stats->tcp_ack_msdu_count +
1044 		   user_stats->udp_msdu_count + user_stats->other_msdu_count;
1045 
1046 	rx_stats->num_msdu += num_msdu;
1047 	rx_stats->tcp_msdu_count += user_stats->tcp_msdu_count +
1048 				    user_stats->tcp_ack_msdu_count;
1049 	rx_stats->udp_msdu_count += user_stats->udp_msdu_count;
1050 	rx_stats->other_msdu_count += user_stats->other_msdu_count;
1051 
1052 	if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX)
1053 		rx_stats->coding_count[ppdu_info->ldpc] += num_msdu;
1054 
1055 	if (user_stats->tid <= IEEE80211_NUM_TIDS)
1056 		rx_stats->tid_count[user_stats->tid] += num_msdu;
1057 
1058 	if (user_stats->preamble_type < HAL_RX_PREAMBLE_MAX)
1059 		rx_stats->pream_cnt[user_stats->preamble_type] += num_msdu;
1060 
1061 	if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX)
1062 		rx_stats->reception_type[ppdu_info->reception_type] += num_msdu;
1063 
1064 	if (ppdu_info->is_stbc)
1065 		rx_stats->stbc_count += num_msdu;
1066 
1067 	if (ppdu_info->beamformed)
1068 		rx_stats->beamformed_count += num_msdu;
1069 
1070 	if (user_stats->mpdu_cnt_fcs_ok > 1)
1071 		rx_stats->ampdu_msdu_count += num_msdu;
1072 	else
1073 		rx_stats->non_ampdu_msdu_count += num_msdu;
1074 
1075 	rx_stats->num_mpdu_fcs_ok += user_stats->mpdu_cnt_fcs_ok;
1076 	rx_stats->num_mpdu_fcs_err += user_stats->mpdu_cnt_fcs_err;
1077 	rx_stats->dcm_count += ppdu_info->dcm;
1078 	if (ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA ||
1079 	    ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO)
1080 		rx_stats->ru_alloc_cnt[user_stats->ul_ofdma_ru_size] += num_msdu;
1081 
1082 	rx_stats->rx_duration += ppdu_info->rx_duration;
1083 	peer->rx_duration = rx_stats->rx_duration;
1084 
1085 	if (user_stats->nss > 0 && user_stats->nss <= HAL_RX_MAX_NSS) {
1086 		rx_stats->pkt_stats.nss_count[user_stats->nss - 1] += num_msdu;
1087 		rx_stats->byte_stats.nss_count[user_stats->nss - 1] +=
1088 						user_stats->mpdu_ok_byte_count;
1089 	}
1090 
1091 	if (user_stats->preamble_type == HAL_RX_PREAMBLE_11AX &&
1092 	    user_stats->mcs <= HAL_RX_MAX_MCS_HE) {
1093 		rx_stats->pkt_stats.he_mcs_count[user_stats->mcs] += num_msdu;
1094 		rx_stats->byte_stats.he_mcs_count[user_stats->mcs] +=
1095 						user_stats->mpdu_ok_byte_count;
1096 	}
1097 
1098 	if (ppdu_info->gi < HAL_RX_GI_MAX) {
1099 		rx_stats->pkt_stats.gi_count[ppdu_info->gi] += num_msdu;
1100 		rx_stats->byte_stats.gi_count[ppdu_info->gi] +=
1101 						user_stats->mpdu_ok_byte_count;
1102 	}
1103 
1104 	if (ppdu_info->bw < HAL_RX_BW_MAX) {
1105 		rx_stats->pkt_stats.bw_count[ppdu_info->bw] += num_msdu;
1106 		rx_stats->byte_stats.bw_count[ppdu_info->bw] +=
1107 						user_stats->mpdu_ok_byte_count;
1108 	}
1109 
1110 	ath12k_dp_mon_rx_update_peer_rate_table_stats(rx_stats, ppdu_info,
1111 						      user_stats, num_msdu);
1112 }
1113 
1114 void
1115 ath12k_dp_mon_rx_update_peer_mu_stats(struct ath12k_base *ab,
1116 				      struct hal_rx_mon_ppdu_info *ppdu_info)
1117 {
1118 	u32 num_users, i;
1119 
1120 	num_users = ppdu_info->num_users;
1121 	if (num_users > HAL_MAX_UL_MU_USERS)
1122 		num_users = HAL_MAX_UL_MU_USERS;
1123 
1124 	for (i = 0; i < num_users; i++)
1125 		ath12k_dp_mon_rx_update_user_stats(ab, ppdu_info, i);
1126 }
1127 EXPORT_SYMBOL(ath12k_dp_mon_rx_update_peer_mu_stats);
1128