1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4 * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
5 */
6
7 #include "core.h"
8 #include "peer.h"
9 #include "htc.h"
10 #include "dp_htt.h"
11 #include "debugfs_htt_stats.h"
12 #include "debugfs.h"
13
ath12k_dp_htt_htc_tx_complete(struct ath12k_base * ab,struct sk_buff * skb)14 static void ath12k_dp_htt_htc_tx_complete(struct ath12k_base *ab,
15 struct sk_buff *skb)
16 {
17 dev_kfree_skb_any(skb);
18 }
19
ath12k_dp_htt_connect(struct ath12k_dp * dp)20 int ath12k_dp_htt_connect(struct ath12k_dp *dp)
21 {
22 struct ath12k_htc_svc_conn_req conn_req = {};
23 struct ath12k_htc_svc_conn_resp conn_resp = {};
24 int status;
25
26 conn_req.ep_ops.ep_tx_complete = ath12k_dp_htt_htc_tx_complete;
27 conn_req.ep_ops.ep_rx_complete = ath12k_dp_htt_htc_t2h_msg_handler;
28
29 /* connect to control service */
30 conn_req.service_id = ATH12K_HTC_SVC_ID_HTT_DATA_MSG;
31
32 status = ath12k_htc_connect_service(&dp->ab->htc, &conn_req,
33 &conn_resp);
34
35 if (status)
36 return status;
37
38 dp->eid = conn_resp.eid;
39
40 return 0;
41 }
42
ath12k_get_ppdu_user_index(struct htt_ppdu_stats * ppdu_stats,u16 peer_id)43 static int ath12k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
44 u16 peer_id)
45 {
46 int i;
47
48 for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
49 if (ppdu_stats->user_stats[i].is_valid_peer_id) {
50 if (peer_id == ppdu_stats->user_stats[i].peer_id)
51 return i;
52 } else {
53 return i;
54 }
55 }
56
57 return -EINVAL;
58 }
59
ath12k_htt_tlv_ppdu_stats_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)60 static int ath12k_htt_tlv_ppdu_stats_parse(struct ath12k_base *ab,
61 u16 tag, u16 len, const void *ptr,
62 void *data)
63 {
64 const struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *ba_status;
65 const struct htt_ppdu_stats_usr_cmpltn_cmn *cmplt_cmn;
66 const struct htt_ppdu_stats_user_rate *user_rate;
67 struct htt_ppdu_stats_info *ppdu_info;
68 struct htt_ppdu_user_stats *user_stats;
69 int cur_user;
70 u16 peer_id;
71
72 ppdu_info = data;
73
74 switch (tag) {
75 case HTT_PPDU_STATS_TAG_COMMON:
76 if (len < sizeof(struct htt_ppdu_stats_common)) {
77 ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
78 len, tag);
79 return -EINVAL;
80 }
81 memcpy(&ppdu_info->ppdu_stats.common, ptr,
82 sizeof(struct htt_ppdu_stats_common));
83 break;
84 case HTT_PPDU_STATS_TAG_USR_RATE:
85 if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
86 ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
87 len, tag);
88 return -EINVAL;
89 }
90 user_rate = ptr;
91 peer_id = le16_to_cpu(user_rate->sw_peer_id);
92 cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
93 peer_id);
94 if (cur_user < 0)
95 return -EINVAL;
96 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
97 user_stats->peer_id = peer_id;
98 user_stats->is_valid_peer_id = true;
99 memcpy(&user_stats->rate, ptr,
100 sizeof(struct htt_ppdu_stats_user_rate));
101 user_stats->tlv_flags |= BIT(tag);
102 break;
103 case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
104 if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
105 ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
106 len, tag);
107 return -EINVAL;
108 }
109
110 cmplt_cmn = ptr;
111 peer_id = le16_to_cpu(cmplt_cmn->sw_peer_id);
112 cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
113 peer_id);
114 if (cur_user < 0)
115 return -EINVAL;
116 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
117 user_stats->peer_id = peer_id;
118 user_stats->is_valid_peer_id = true;
119 memcpy(&user_stats->cmpltn_cmn, ptr,
120 sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
121 user_stats->tlv_flags |= BIT(tag);
122 break;
123 case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
124 if (len <
125 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
126 ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
127 len, tag);
128 return -EINVAL;
129 }
130
131 ba_status = ptr;
132 peer_id = le16_to_cpu(ba_status->sw_peer_id);
133 cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
134 peer_id);
135 if (cur_user < 0)
136 return -EINVAL;
137 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
138 user_stats->peer_id = peer_id;
139 user_stats->is_valid_peer_id = true;
140 memcpy(&user_stats->ack_ba, ptr,
141 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
142 user_stats->tlv_flags |= BIT(tag);
143 break;
144 }
145 return 0;
146 }
147
ath12k_dp_htt_tlv_iter(struct ath12k_base * ab,const void * ptr,size_t len,int (* iter)(struct ath12k_base * ar,u16 tag,u16 len,const void * ptr,void * data),void * data)148 int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
149 int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
150 const void *ptr, void *data),
151 void *data)
152 {
153 const struct htt_tlv *tlv;
154 const void *begin = ptr;
155 u16 tlv_tag, tlv_len;
156 int ret = -EINVAL;
157
158 while (len > 0) {
159 if (len < sizeof(*tlv)) {
160 ath12k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
161 ptr - begin, len, sizeof(*tlv));
162 return -EINVAL;
163 }
164 tlv = (struct htt_tlv *)ptr;
165 tlv_tag = le32_get_bits(tlv->header, HTT_TLV_TAG);
166 tlv_len = le32_get_bits(tlv->header, HTT_TLV_LEN);
167 ptr += sizeof(*tlv);
168 len -= sizeof(*tlv);
169
170 if (tlv_len > len) {
171 ath12k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
172 tlv_tag, ptr - begin, len, tlv_len);
173 return -EINVAL;
174 }
175 ret = iter(ab, tlv_tag, tlv_len, ptr, data);
176 if (ret == -ENOMEM)
177 return ret;
178
179 ptr += tlv_len;
180 len -= tlv_len;
181 }
182 return 0;
183 }
184
185 static void
ath12k_update_per_peer_tx_stats(struct ath12k_pdev_dp * dp_pdev,struct htt_ppdu_stats * ppdu_stats,u8 user)186 ath12k_update_per_peer_tx_stats(struct ath12k_pdev_dp *dp_pdev,
187 struct htt_ppdu_stats *ppdu_stats, u8 user)
188 {
189 struct ath12k_dp *dp = dp_pdev->dp;
190 struct ath12k_base *ab = dp->ab;
191 struct ath12k_dp_link_peer *peer;
192 struct htt_ppdu_stats_user_rate *user_rate;
193 struct ath12k_per_peer_tx_stats *peer_stats = &dp_pdev->peer_tx_stats;
194 struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
195 struct htt_ppdu_stats_common *common = &ppdu_stats->common;
196 int ret;
197 u8 flags, mcs, nss, bw, sgi, dcm, ppdu_type, rate_idx = 0;
198 u32 v, succ_bytes = 0;
199 u16 tones, rate = 0, succ_pkts = 0;
200 u32 tx_duration = 0;
201 u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
202 u16 tx_retry_failed = 0, tx_retry_count = 0;
203 bool is_ampdu = false, is_ofdma;
204
205 if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
206 return;
207
208 if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) {
209 is_ampdu =
210 HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
211 tx_retry_failed =
212 __le16_to_cpu(usr_stats->cmpltn_cmn.mpdu_tried) -
213 __le16_to_cpu(usr_stats->cmpltn_cmn.mpdu_success);
214 tx_retry_count =
215 HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
216 HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
217 }
218
219 if (usr_stats->tlv_flags &
220 BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
221 succ_bytes = le32_to_cpu(usr_stats->ack_ba.success_bytes);
222 succ_pkts = le32_get_bits(usr_stats->ack_ba.info,
223 HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M);
224 tid = le32_get_bits(usr_stats->ack_ba.info,
225 HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM);
226 }
227
228 if (common->fes_duration_us)
229 tx_duration = le32_to_cpu(common->fes_duration_us);
230
231 user_rate = &usr_stats->rate;
232 flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
233 bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
234 nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
235 mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
236 sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
237 dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
238
239 ppdu_type = HTT_USR_RATE_PPDU_TYPE(user_rate->info1);
240 is_ofdma = (ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA) ||
241 (ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA);
242
243 /* Note: If host configured fixed rates and in some other special
244 * cases, the broadcast/management frames are sent in different rates.
245 * Firmware rate's control to be skipped for this?
246 */
247
248 if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH12K_HE_MCS_MAX) {
249 ath12k_warn(ab, "Invalid HE mcs %d peer stats", mcs);
250 return;
251 }
252
253 if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH12K_VHT_MCS_MAX) {
254 ath12k_warn(ab, "Invalid VHT mcs %d peer stats", mcs);
255 return;
256 }
257
258 if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH12K_HT_MCS_MAX || nss < 1)) {
259 ath12k_warn(ab, "Invalid HT mcs %d nss %d peer stats",
260 mcs, nss);
261 return;
262 }
263
264 if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
265 ret = ath12k_mac_hw_ratecode_to_legacy_rate(mcs,
266 flags,
267 &rate_idx,
268 &rate);
269 if (ret < 0)
270 return;
271 }
272
273 rcu_read_lock();
274 peer = ath12k_dp_link_peer_find_by_peerid(dp_pdev, usr_stats->peer_id);
275
276 if (!peer || !peer->sta) {
277 rcu_read_unlock();
278 return;
279 }
280
281 spin_lock_bh(&dp->dp_lock);
282
283 memset(&peer->txrate, 0, sizeof(peer->txrate));
284
285 peer->txrate.bw = ath12k_mac_bw_to_mac80211_bw(bw);
286
287 switch (flags) {
288 case WMI_RATE_PREAMBLE_OFDM:
289 peer->txrate.legacy = rate;
290 break;
291 case WMI_RATE_PREAMBLE_CCK:
292 peer->txrate.legacy = rate;
293 break;
294 case WMI_RATE_PREAMBLE_HT:
295 peer->txrate.mcs = mcs + 8 * (nss - 1);
296 peer->txrate.flags = RATE_INFO_FLAGS_MCS;
297 if (sgi)
298 peer->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
299 break;
300 case WMI_RATE_PREAMBLE_VHT:
301 peer->txrate.mcs = mcs;
302 peer->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
303 if (sgi)
304 peer->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
305 break;
306 case WMI_RATE_PREAMBLE_HE:
307 peer->txrate.mcs = mcs;
308 peer->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
309 peer->txrate.he_dcm = dcm;
310 peer->txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
311 tones = le16_to_cpu(user_rate->ru_end) -
312 le16_to_cpu(user_rate->ru_start) + 1;
313 v = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(tones);
314 peer->txrate.he_ru_alloc = v;
315 if (is_ofdma)
316 peer->txrate.bw = RATE_INFO_BW_HE_RU;
317 break;
318 case WMI_RATE_PREAMBLE_EHT:
319 peer->txrate.mcs = mcs;
320 peer->txrate.flags = RATE_INFO_FLAGS_EHT_MCS;
321 peer->txrate.he_dcm = dcm;
322 peer->txrate.eht_gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(sgi);
323 tones = le16_to_cpu(user_rate->ru_end) -
324 le16_to_cpu(user_rate->ru_start) + 1;
325 v = ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(tones);
326 peer->txrate.eht_ru_alloc = v;
327 if (is_ofdma)
328 peer->txrate.bw = RATE_INFO_BW_EHT_RU;
329 break;
330 }
331
332 peer->tx_retry_failed += tx_retry_failed;
333 peer->tx_retry_count += tx_retry_count;
334 peer->txrate.nss = nss;
335 peer->tx_duration += tx_duration;
336 memcpy(&peer->last_txrate, &peer->txrate, sizeof(struct rate_info));
337
338 spin_unlock_bh(&dp->dp_lock);
339
340 /* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
341 * So skip peer stats update for mgmt packets.
342 */
343 if (tid < HTT_PPDU_STATS_NON_QOS_TID) {
344 memset(peer_stats, 0, sizeof(*peer_stats));
345 peer_stats->succ_pkts = succ_pkts;
346 peer_stats->succ_bytes = succ_bytes;
347 peer_stats->is_ampdu = is_ampdu;
348 peer_stats->duration = tx_duration;
349 peer_stats->ba_fails =
350 HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
351 HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
352 }
353
354 rcu_read_unlock();
355 }
356
ath12k_htt_update_ppdu_stats(struct ath12k_pdev_dp * dp_pdev,struct htt_ppdu_stats * ppdu_stats)357 static void ath12k_htt_update_ppdu_stats(struct ath12k_pdev_dp *dp_pdev,
358 struct htt_ppdu_stats *ppdu_stats)
359 {
360 u8 user;
361
362 for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
363 ath12k_update_per_peer_tx_stats(dp_pdev, ppdu_stats, user);
364 }
365
366 static
ath12k_dp_htt_get_ppdu_desc(struct ath12k_pdev_dp * dp_pdev,u32 ppdu_id)367 struct htt_ppdu_stats_info *ath12k_dp_htt_get_ppdu_desc(struct ath12k_pdev_dp *dp_pdev,
368 u32 ppdu_id)
369 {
370 struct htt_ppdu_stats_info *ppdu_info;
371
372 lockdep_assert_held(&dp_pdev->ppdu_list_lock);
373 if (!list_empty(&dp_pdev->ppdu_stats_info)) {
374 list_for_each_entry(ppdu_info, &dp_pdev->ppdu_stats_info, list) {
375 if (ppdu_info->ppdu_id == ppdu_id)
376 return ppdu_info;
377 }
378
379 if (dp_pdev->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
380 ppdu_info = list_first_entry(&dp_pdev->ppdu_stats_info,
381 typeof(*ppdu_info), list);
382 list_del(&ppdu_info->list);
383 dp_pdev->ppdu_stat_list_depth--;
384 ath12k_htt_update_ppdu_stats(dp_pdev, &ppdu_info->ppdu_stats);
385 kfree(ppdu_info);
386 }
387 }
388
389 ppdu_info = kzalloc_obj(*ppdu_info, GFP_ATOMIC);
390 if (!ppdu_info)
391 return NULL;
392
393 list_add_tail(&ppdu_info->list, &dp_pdev->ppdu_stats_info);
394 dp_pdev->ppdu_stat_list_depth++;
395
396 return ppdu_info;
397 }
398
ath12k_copy_to_delay_stats(struct ath12k_dp_link_peer * peer,struct htt_ppdu_user_stats * usr_stats)399 static void ath12k_copy_to_delay_stats(struct ath12k_dp_link_peer *peer,
400 struct htt_ppdu_user_stats *usr_stats)
401 {
402 peer->ppdu_stats_delayba.sw_peer_id = le16_to_cpu(usr_stats->rate.sw_peer_id);
403 peer->ppdu_stats_delayba.info0 = le32_to_cpu(usr_stats->rate.info0);
404 peer->ppdu_stats_delayba.ru_end = le16_to_cpu(usr_stats->rate.ru_end);
405 peer->ppdu_stats_delayba.ru_start = le16_to_cpu(usr_stats->rate.ru_start);
406 peer->ppdu_stats_delayba.info1 = le32_to_cpu(usr_stats->rate.info1);
407 peer->ppdu_stats_delayba.rate_flags = le32_to_cpu(usr_stats->rate.rate_flags);
408 peer->ppdu_stats_delayba.resp_rate_flags =
409 le32_to_cpu(usr_stats->rate.resp_rate_flags);
410
411 peer->delayba_flag = true;
412 }
413
ath12k_copy_to_bar(struct ath12k_dp_link_peer * peer,struct htt_ppdu_user_stats * usr_stats)414 static void ath12k_copy_to_bar(struct ath12k_dp_link_peer *peer,
415 struct htt_ppdu_user_stats *usr_stats)
416 {
417 usr_stats->rate.sw_peer_id = cpu_to_le16(peer->ppdu_stats_delayba.sw_peer_id);
418 usr_stats->rate.info0 = cpu_to_le32(peer->ppdu_stats_delayba.info0);
419 usr_stats->rate.ru_end = cpu_to_le16(peer->ppdu_stats_delayba.ru_end);
420 usr_stats->rate.ru_start = cpu_to_le16(peer->ppdu_stats_delayba.ru_start);
421 usr_stats->rate.info1 = cpu_to_le32(peer->ppdu_stats_delayba.info1);
422 usr_stats->rate.rate_flags = cpu_to_le32(peer->ppdu_stats_delayba.rate_flags);
423 usr_stats->rate.resp_rate_flags =
424 cpu_to_le32(peer->ppdu_stats_delayba.resp_rate_flags);
425
426 peer->delayba_flag = false;
427 }
428
ath12k_htt_pull_ppdu_stats(struct ath12k_base * ab,struct sk_buff * skb)429 static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab,
430 struct sk_buff *skb)
431 {
432 struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
433 struct ath12k_htt_ppdu_stats_msg *msg;
434 struct htt_ppdu_stats_info *ppdu_info;
435 struct ath12k_dp_link_peer *peer = NULL;
436 struct htt_ppdu_user_stats *usr_stats = NULL;
437 u32 peer_id = 0;
438 struct ath12k_pdev_dp *dp_pdev;
439 int ret, i;
440 u8 pdev_id, pdev_idx;
441 u32 ppdu_id, len;
442
443 msg = (struct ath12k_htt_ppdu_stats_msg *)skb->data;
444 len = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE);
445 if (len > (skb->len - struct_size(msg, data, 0))) {
446 ath12k_warn(ab,
447 "HTT PPDU STATS event has unexpected payload size %u, should be smaller than %u\n",
448 len, skb->len);
449 return -EINVAL;
450 }
451
452 pdev_id = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PDEV_ID);
453 ppdu_id = le32_to_cpu(msg->ppdu_id);
454
455 pdev_idx = DP_HW2SW_MACID(pdev_id);
456 if (pdev_idx >= MAX_RADIOS) {
457 ath12k_warn(ab, "HTT PPDU STATS invalid pdev id %u", pdev_id);
458 return -EINVAL;
459 }
460
461 rcu_read_lock();
462
463 dp_pdev = ath12k_dp_to_pdev_dp(dp, pdev_idx);
464 if (!dp_pdev) {
465 ret = -EINVAL;
466 goto exit;
467 }
468
469 spin_lock_bh(&dp_pdev->ppdu_list_lock);
470 ppdu_info = ath12k_dp_htt_get_ppdu_desc(dp_pdev, ppdu_id);
471 if (!ppdu_info) {
472 spin_unlock_bh(&dp_pdev->ppdu_list_lock);
473 ret = -EINVAL;
474 goto exit;
475 }
476
477 ppdu_info->ppdu_id = ppdu_id;
478 ret = ath12k_dp_htt_tlv_iter(ab, msg->data, len,
479 ath12k_htt_tlv_ppdu_stats_parse,
480 (void *)ppdu_info);
481 if (ret) {
482 spin_unlock_bh(&dp_pdev->ppdu_list_lock);
483 ath12k_warn(ab, "Failed to parse tlv %d\n", ret);
484 goto exit;
485 }
486
487 if (ppdu_info->ppdu_stats.common.num_users >= HTT_PPDU_STATS_MAX_USERS) {
488 spin_unlock_bh(&dp_pdev->ppdu_list_lock);
489 ath12k_warn(ab,
490 "HTT PPDU STATS event has unexpected num_users %u, should be smaller than %u\n",
491 ppdu_info->ppdu_stats.common.num_users,
492 HTT_PPDU_STATS_MAX_USERS);
493 ret = -EINVAL;
494 goto exit;
495 }
496
497 /* back up data rate tlv for all peers */
498 if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_DATA &&
499 (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON)) &&
500 ppdu_info->delay_ba) {
501 for (i = 0; i < ppdu_info->ppdu_stats.common.num_users; i++) {
502 peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id;
503 peer = ath12k_dp_link_peer_find_by_peerid(dp_pdev, peer_id);
504 if (!peer)
505 continue;
506
507 usr_stats = &ppdu_info->ppdu_stats.user_stats[i];
508 if (usr_stats->delay_ba)
509 ath12k_copy_to_delay_stats(peer, usr_stats);
510 }
511 }
512
513 /* restore all peers' data rate tlv to mu-bar tlv */
514 if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_BAR &&
515 (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON))) {
516 for (i = 0; i < ppdu_info->bar_num_users; i++) {
517 peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id;
518 peer = ath12k_dp_link_peer_find_by_peerid(dp_pdev, peer_id);
519 if (!peer)
520 continue;
521
522 usr_stats = &ppdu_info->ppdu_stats.user_stats[i];
523 if (peer->delayba_flag)
524 ath12k_copy_to_bar(peer, usr_stats);
525 }
526 }
527
528 spin_unlock_bh(&dp_pdev->ppdu_list_lock);
529
530 exit:
531 rcu_read_unlock();
532
533 return ret;
534 }
535
ath12k_htt_mlo_offset_event_handler(struct ath12k_base * ab,struct sk_buff * skb)536 static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab,
537 struct sk_buff *skb)
538 {
539 struct ath12k_htt_mlo_offset_msg *msg;
540 struct ath12k_pdev *pdev;
541 struct ath12k *ar;
542 u8 pdev_id;
543
544 msg = (struct ath12k_htt_mlo_offset_msg *)skb->data;
545 pdev_id = u32_get_bits(__le32_to_cpu(msg->info),
546 HTT_T2H_MLO_OFFSET_INFO_PDEV_ID);
547
548 rcu_read_lock();
549 ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
550 if (!ar) {
551 /* It is possible that the ar is not yet active (started).
552 * The above function will only look for the active pdev
553 * and hence %NULL return is possible. Just silently
554 * discard this message
555 */
556 goto exit;
557 }
558
559 spin_lock_bh(&ar->data_lock);
560 pdev = ar->pdev;
561
562 pdev->timestamp.info = __le32_to_cpu(msg->info);
563 pdev->timestamp.sync_timestamp_lo_us = __le32_to_cpu(msg->sync_timestamp_lo_us);
564 pdev->timestamp.sync_timestamp_hi_us = __le32_to_cpu(msg->sync_timestamp_hi_us);
565 pdev->timestamp.mlo_offset_lo = __le32_to_cpu(msg->mlo_offset_lo);
566 pdev->timestamp.mlo_offset_hi = __le32_to_cpu(msg->mlo_offset_hi);
567 pdev->timestamp.mlo_offset_clks = __le32_to_cpu(msg->mlo_offset_clks);
568 pdev->timestamp.mlo_comp_clks = __le32_to_cpu(msg->mlo_comp_clks);
569 pdev->timestamp.mlo_comp_timer = __le32_to_cpu(msg->mlo_comp_timer);
570
571 spin_unlock_bh(&ar->data_lock);
572 exit:
573 rcu_read_unlock();
574 }
575
ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base * ab,struct sk_buff * skb)576 void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab,
577 struct sk_buff *skb)
578 {
579 struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
580 struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
581 enum htt_t2h_msg_type type;
582 u16 peer_id;
583 u8 vdev_id;
584 u8 mac_addr[ETH_ALEN];
585 u16 peer_mac_h16;
586 u16 ast_hash = 0;
587 u16 hw_peer_id;
588
589 type = le32_get_bits(resp->version_msg.version, HTT_T2H_MSG_TYPE);
590
591 ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
592
593 switch (type) {
594 case HTT_T2H_MSG_TYPE_VERSION_CONF:
595 dp->htt_tgt_ver_major = le32_get_bits(resp->version_msg.version,
596 HTT_T2H_VERSION_CONF_MAJOR);
597 dp->htt_tgt_ver_minor = le32_get_bits(resp->version_msg.version,
598 HTT_T2H_VERSION_CONF_MINOR);
599 complete(&dp->htt_tgt_version_received);
600 break;
601 /* TODO: remove unused peer map versions after testing */
602 case HTT_T2H_MSG_TYPE_PEER_MAP:
603 vdev_id = le32_get_bits(resp->peer_map_ev.info,
604 HTT_T2H_PEER_MAP_INFO_VDEV_ID);
605 peer_id = le32_get_bits(resp->peer_map_ev.info,
606 HTT_T2H_PEER_MAP_INFO_PEER_ID);
607 peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
608 HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
609 ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
610 peer_mac_h16, mac_addr);
611 ath12k_dp_link_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0);
612 break;
613 case HTT_T2H_MSG_TYPE_PEER_MAP2:
614 vdev_id = le32_get_bits(resp->peer_map_ev.info,
615 HTT_T2H_PEER_MAP_INFO_VDEV_ID);
616 peer_id = le32_get_bits(resp->peer_map_ev.info,
617 HTT_T2H_PEER_MAP_INFO_PEER_ID);
618 peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
619 HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
620 ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
621 peer_mac_h16, mac_addr);
622 ast_hash = le32_get_bits(resp->peer_map_ev.info2,
623 HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL);
624 hw_peer_id = le32_get_bits(resp->peer_map_ev.info1,
625 HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID);
626 ath12k_dp_link_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
627 hw_peer_id);
628 break;
629 case HTT_T2H_MSG_TYPE_PEER_MAP3:
630 vdev_id = le32_get_bits(resp->peer_map_ev.info,
631 HTT_T2H_PEER_MAP_INFO_VDEV_ID);
632 peer_id = le32_get_bits(resp->peer_map_ev.info,
633 HTT_T2H_PEER_MAP_INFO_PEER_ID);
634 peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
635 HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
636 ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
637 peer_mac_h16, mac_addr);
638 ast_hash = le32_get_bits(resp->peer_map_ev.info2,
639 HTT_T2H_PEER_MAP3_INFO2_AST_HASH_VAL);
640 hw_peer_id = le32_get_bits(resp->peer_map_ev.info2,
641 HTT_T2H_PEER_MAP3_INFO2_HW_PEER_ID);
642 ath12k_dp_link_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
643 hw_peer_id);
644 break;
645 case HTT_T2H_MSG_TYPE_PEER_UNMAP:
646 case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
647 peer_id = le32_get_bits(resp->peer_unmap_ev.info,
648 HTT_T2H_PEER_UNMAP_INFO_PEER_ID);
649 ath12k_dp_link_peer_unmap_event(ab, peer_id);
650 break;
651 case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
652 ath12k_htt_pull_ppdu_stats(ab, skb);
653 break;
654 case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
655 ath12k_debugfs_htt_ext_stats_handler(ab, skb);
656 break;
657 case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND:
658 ath12k_htt_mlo_offset_event_handler(ab, skb);
659 break;
660 default:
661 ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt event %d not handled\n",
662 type);
663 break;
664 }
665
666 dev_kfree_skb_any(skb);
667 }
668 EXPORT_SYMBOL(ath12k_dp_htt_htc_t2h_msg_handler);
669
670 static int
ath12k_dp_tx_get_ring_id_type(struct ath12k_base * ab,int mac_id,u32 ring_id,enum hal_ring_type ring_type,enum htt_srng_ring_type * htt_ring_type,enum htt_srng_ring_id * htt_ring_id)671 ath12k_dp_tx_get_ring_id_type(struct ath12k_base *ab,
672 int mac_id, u32 ring_id,
673 enum hal_ring_type ring_type,
674 enum htt_srng_ring_type *htt_ring_type,
675 enum htt_srng_ring_id *htt_ring_id)
676 {
677 int ret = 0;
678
679 switch (ring_type) {
680 case HAL_RXDMA_BUF:
681 /* for some targets, host fills rx buffer to fw and fw fills to
682 * rxbuf ring for each rxdma
683 */
684 if (!ab->hw_params->rx_mac_buf_ring) {
685 if (!(ring_id == HAL_SRNG_SW2RXDMA_BUF0 ||
686 ring_id == HAL_SRNG_SW2RXDMA_BUF1)) {
687 ret = -EINVAL;
688 }
689 *htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
690 *htt_ring_type = HTT_SW_TO_HW_RING;
691 } else {
692 if (ring_id == HAL_SRNG_SW2RXDMA_BUF0) {
693 *htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
694 *htt_ring_type = HTT_SW_TO_SW_RING;
695 } else {
696 *htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
697 *htt_ring_type = HTT_SW_TO_HW_RING;
698 }
699 }
700 break;
701 case HAL_RXDMA_DST:
702 *htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
703 *htt_ring_type = HTT_HW_TO_SW_RING;
704 break;
705 case HAL_RXDMA_MONITOR_BUF:
706 *htt_ring_id = HTT_RX_MON_HOST2MON_BUF_RING;
707 *htt_ring_type = HTT_SW_TO_HW_RING;
708 break;
709 case HAL_RXDMA_MONITOR_STATUS:
710 *htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
711 *htt_ring_type = HTT_SW_TO_HW_RING;
712 break;
713 case HAL_RXDMA_MONITOR_DST:
714 *htt_ring_id = HTT_RX_MON_MON2HOST_DEST_RING;
715 *htt_ring_type = HTT_HW_TO_SW_RING;
716 break;
717 case HAL_RXDMA_MONITOR_DESC:
718 *htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
719 *htt_ring_type = HTT_SW_TO_HW_RING;
720 break;
721 default:
722 ath12k_warn(ab, "Unsupported ring type in DP :%d\n", ring_type);
723 ret = -EINVAL;
724 }
725 return ret;
726 }
727
ath12k_dp_tx_htt_srng_setup(struct ath12k_base * ab,u32 ring_id,int mac_id,enum hal_ring_type ring_type)728 int ath12k_dp_tx_htt_srng_setup(struct ath12k_base *ab, u32 ring_id,
729 int mac_id, enum hal_ring_type ring_type)
730 {
731 struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
732 struct htt_srng_setup_cmd *cmd;
733 struct hal_srng *srng = &ab->hal.srng_list[ring_id];
734 struct hal_srng_params params;
735 struct sk_buff *skb;
736 u32 ring_entry_sz;
737 int len = sizeof(*cmd);
738 dma_addr_t hp_addr, tp_addr;
739 enum htt_srng_ring_type htt_ring_type;
740 enum htt_srng_ring_id htt_ring_id;
741 int ret;
742
743 skb = ath12k_htc_alloc_skb(ab, len);
744 if (!skb)
745 return -ENOMEM;
746
747 memset(¶ms, 0, sizeof(params));
748 ath12k_hal_srng_get_params(ab, srng, ¶ms);
749
750 hp_addr = ath12k_hal_srng_get_hp_addr(ab, srng);
751 tp_addr = ath12k_hal_srng_get_tp_addr(ab, srng);
752
753 ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
754 ring_type, &htt_ring_type,
755 &htt_ring_id);
756 if (ret)
757 goto err_free;
758
759 skb_put(skb, len);
760 cmd = (struct htt_srng_setup_cmd *)skb->data;
761 cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_SRING_SETUP,
762 HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE);
763 if (htt_ring_type == HTT_SW_TO_HW_RING ||
764 htt_ring_type == HTT_HW_TO_SW_RING)
765 cmd->info0 |= le32_encode_bits(DP_SW2HW_MACID(mac_id),
766 HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID);
767 else
768 cmd->info0 |= le32_encode_bits(mac_id,
769 HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID);
770 cmd->info0 |= le32_encode_bits(htt_ring_type,
771 HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE);
772 cmd->info0 |= le32_encode_bits(htt_ring_id,
773 HTT_SRNG_SETUP_CMD_INFO0_RING_ID);
774
775 cmd->ring_base_addr_lo = cpu_to_le32(params.ring_base_paddr &
776 HAL_ADDR_LSB_REG_MASK);
777
778 cmd->ring_base_addr_hi = cpu_to_le32((u64)params.ring_base_paddr >>
779 HAL_ADDR_MSB_REG_SHIFT);
780
781 ret = ath12k_hal_srng_get_entrysize(ab, ring_type);
782 if (ret < 0)
783 goto err_free;
784
785 ring_entry_sz = ret;
786
787 ring_entry_sz >>= 2;
788 cmd->info1 = le32_encode_bits(ring_entry_sz,
789 HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE);
790 cmd->info1 |= le32_encode_bits(params.num_entries * ring_entry_sz,
791 HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE);
792 cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP),
793 HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP);
794 cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
795 HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP);
796 cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_RING_PTR_SWAP),
797 HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP);
798 if (htt_ring_type == HTT_SW_TO_HW_RING)
799 cmd->info1 |= cpu_to_le32(HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS);
800
801 cmd->ring_head_off32_remote_addr_lo = cpu_to_le32(lower_32_bits(hp_addr));
802 cmd->ring_head_off32_remote_addr_hi = cpu_to_le32(upper_32_bits(hp_addr));
803
804 cmd->ring_tail_off32_remote_addr_lo = cpu_to_le32(lower_32_bits(tp_addr));
805 cmd->ring_tail_off32_remote_addr_hi = cpu_to_le32(upper_32_bits(tp_addr));
806
807 cmd->ring_msi_addr_lo = cpu_to_le32(lower_32_bits(params.msi_addr));
808 cmd->ring_msi_addr_hi = cpu_to_le32(upper_32_bits(params.msi_addr));
809 cmd->msi_data = cpu_to_le32(params.msi_data);
810
811 cmd->intr_info =
812 le32_encode_bits(params.intr_batch_cntr_thres_entries * ring_entry_sz,
813 HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH);
814 cmd->intr_info |=
815 le32_encode_bits(params.intr_timer_thres_us >> 3,
816 HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH);
817
818 cmd->info2 = 0;
819 if (params.flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
820 cmd->info2 = le32_encode_bits(params.low_threshold,
821 HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH);
822 }
823
824 ath12k_dbg(ab, ATH12K_DBG_HAL,
825 "%s msi_addr_lo:0x%x, msi_addr_hi:0x%x, msi_data:0x%x\n",
826 __func__, cmd->ring_msi_addr_lo, cmd->ring_msi_addr_hi,
827 cmd->msi_data);
828
829 ath12k_dbg(ab, ATH12K_DBG_HAL,
830 "ring_id:%d, ring_type:%d, intr_info:0x%x, flags:0x%x\n",
831 ring_id, ring_type, cmd->intr_info, cmd->info2);
832
833 ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
834 if (ret)
835 goto err_free;
836
837 return 0;
838
839 err_free:
840 dev_kfree_skb_any(skb);
841
842 return ret;
843 }
844
ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base * ab)845 int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab)
846 {
847 struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
848 struct sk_buff *skb;
849 struct htt_ver_req_cmd *cmd;
850 int len = sizeof(*cmd);
851 u32 metadata_version;
852 int ret;
853
854 init_completion(&dp->htt_tgt_version_received);
855
856 skb = ath12k_htc_alloc_skb(ab, len);
857 if (!skb)
858 return -ENOMEM;
859
860 skb_put(skb, len);
861 cmd = (struct htt_ver_req_cmd *)skb->data;
862 cmd->ver_reg_info = le32_encode_bits(HTT_H2T_MSG_TYPE_VERSION_REQ,
863 HTT_OPTION_TAG);
864 metadata_version = ath12k_ftm_mode ? HTT_OPTION_TCL_METADATA_VER_V1 :
865 HTT_OPTION_TCL_METADATA_VER_V2;
866
867 cmd->tcl_metadata_version = le32_encode_bits(HTT_TAG_TCL_METADATA_VERSION,
868 HTT_OPTION_TAG) |
869 le32_encode_bits(HTT_TCL_METADATA_VER_SZ,
870 HTT_OPTION_LEN) |
871 le32_encode_bits(metadata_version,
872 HTT_OPTION_VALUE);
873
874 ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
875 if (ret) {
876 dev_kfree_skb_any(skb);
877 return ret;
878 }
879
880 ret = wait_for_completion_timeout(&dp->htt_tgt_version_received,
881 HTT_TARGET_VERSION_TIMEOUT_HZ);
882 if (ret == 0) {
883 ath12k_warn(ab, "htt target version request timed out\n");
884 return -ETIMEDOUT;
885 }
886
887 if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
888 ath12k_err(ab, "unsupported htt major version %d supported version is %d\n",
889 dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
890 return -EOPNOTSUPP;
891 }
892
893 return 0;
894 }
895
ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k * ar,u32 mask)896 int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask)
897 {
898 struct ath12k_base *ab = ar->ab;
899 struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
900 struct sk_buff *skb;
901 struct htt_ppdu_stats_cfg_cmd *cmd;
902 int len = sizeof(*cmd);
903 u8 pdev_mask;
904 int ret;
905 int i;
906
907 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
908 skb = ath12k_htc_alloc_skb(ab, len);
909 if (!skb)
910 return -ENOMEM;
911
912 skb_put(skb, len);
913 cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data;
914 cmd->msg = le32_encode_bits(HTT_H2T_MSG_TYPE_PPDU_STATS_CFG,
915 HTT_PPDU_STATS_CFG_MSG_TYPE);
916
917 pdev_mask = 1 << (i + ar->pdev_idx);
918 cmd->msg |= le32_encode_bits(pdev_mask, HTT_PPDU_STATS_CFG_PDEV_ID);
919 cmd->msg |= le32_encode_bits(mask, HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK);
920
921 ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
922 if (ret) {
923 dev_kfree_skb_any(skb);
924 return ret;
925 }
926 }
927
928 return 0;
929 }
930
ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base * ab,u32 ring_id,int mac_id,enum hal_ring_type ring_type,int rx_buf_size,struct htt_rx_ring_tlv_filter * tlv_filter)931 int ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base *ab, u32 ring_id,
932 int mac_id, enum hal_ring_type ring_type,
933 int rx_buf_size,
934 struct htt_rx_ring_tlv_filter *tlv_filter)
935 {
936 struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
937 struct htt_rx_ring_selection_cfg_cmd *cmd;
938 struct hal_srng *srng = &ab->hal.srng_list[ring_id];
939 struct hal_srng_params params;
940 struct sk_buff *skb;
941 int len = sizeof(*cmd);
942 enum htt_srng_ring_type htt_ring_type;
943 enum htt_srng_ring_id htt_ring_id;
944 int ret;
945
946 skb = ath12k_htc_alloc_skb(ab, len);
947 if (!skb)
948 return -ENOMEM;
949
950 memset(¶ms, 0, sizeof(params));
951 ath12k_hal_srng_get_params(ab, srng, ¶ms);
952
953 ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
954 ring_type, &htt_ring_type,
955 &htt_ring_id);
956 if (ret)
957 goto err_free;
958
959 skb_put(skb, len);
960 cmd = (struct htt_rx_ring_selection_cfg_cmd *)skb->data;
961 cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG,
962 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE);
963 if (htt_ring_type == HTT_SW_TO_HW_RING ||
964 htt_ring_type == HTT_HW_TO_SW_RING)
965 cmd->info0 |=
966 le32_encode_bits(DP_SW2HW_MACID(mac_id),
967 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
968 else
969 cmd->info0 |=
970 le32_encode_bits(mac_id,
971 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
972 cmd->info0 |= le32_encode_bits(htt_ring_id,
973 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID);
974 cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP),
975 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS);
976 cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
977 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS);
978 cmd->info0 |= le32_encode_bits(tlv_filter->offset_valid,
979 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_OFFSET_VALID);
980 cmd->info0 |=
981 le32_encode_bits(tlv_filter->drop_threshold_valid,
982 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_DROP_THRES_VAL);
983 cmd->info0 |= le32_encode_bits(!tlv_filter->rxmon_disable,
984 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_EN_RXMON);
985
986 cmd->info1 = le32_encode_bits(rx_buf_size,
987 HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE);
988 cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_mgmt,
989 HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT);
990 cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_ctrl,
991 HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL);
992 cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_data,
993 HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA);
994 cmd->pkt_type_en_flags0 = cpu_to_le32(tlv_filter->pkt_filter_flags0);
995 cmd->pkt_type_en_flags1 = cpu_to_le32(tlv_filter->pkt_filter_flags1);
996 cmd->pkt_type_en_flags2 = cpu_to_le32(tlv_filter->pkt_filter_flags2);
997 cmd->pkt_type_en_flags3 = cpu_to_le32(tlv_filter->pkt_filter_flags3);
998 cmd->rx_filter_tlv = cpu_to_le32(tlv_filter->rx_filter);
999
1000 cmd->info2 = le32_encode_bits(tlv_filter->rx_drop_threshold,
1001 HTT_RX_RING_SELECTION_CFG_CMD_INFO2_DROP_THRESHOLD);
1002 cmd->info2 |=
1003 le32_encode_bits(tlv_filter->enable_log_mgmt_type,
1004 HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_MGMT_TYPE);
1005 cmd->info2 |=
1006 le32_encode_bits(tlv_filter->enable_log_ctrl_type,
1007 HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_CTRL_TYPE);
1008 cmd->info2 |=
1009 le32_encode_bits(tlv_filter->enable_log_data_type,
1010 HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_DATA_TYPE);
1011
1012 cmd->info3 =
1013 le32_encode_bits(tlv_filter->enable_rx_tlv_offset,
1014 HTT_RX_RING_SELECTION_CFG_CMD_INFO3_EN_TLV_PKT_OFFSET);
1015 cmd->info3 |=
1016 le32_encode_bits(tlv_filter->rx_tlv_offset,
1017 HTT_RX_RING_SELECTION_CFG_CMD_INFO3_PKT_TLV_OFFSET);
1018
1019 if (tlv_filter->offset_valid) {
1020 cmd->rx_packet_offset =
1021 le32_encode_bits(tlv_filter->rx_packet_offset,
1022 HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET);
1023
1024 cmd->rx_packet_offset |=
1025 le32_encode_bits(tlv_filter->rx_header_offset,
1026 HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET);
1027
1028 cmd->rx_mpdu_offset =
1029 le32_encode_bits(tlv_filter->rx_mpdu_end_offset,
1030 HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET);
1031
1032 cmd->rx_mpdu_offset |=
1033 le32_encode_bits(tlv_filter->rx_mpdu_start_offset,
1034 HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET);
1035
1036 cmd->rx_msdu_offset =
1037 le32_encode_bits(tlv_filter->rx_msdu_end_offset,
1038 HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET);
1039
1040 cmd->rx_msdu_offset |=
1041 le32_encode_bits(tlv_filter->rx_msdu_start_offset,
1042 HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET);
1043
1044 cmd->rx_attn_offset =
1045 le32_encode_bits(tlv_filter->rx_attn_offset,
1046 HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET);
1047 }
1048
1049 if (tlv_filter->rx_mpdu_start_wmask > 0 &&
1050 tlv_filter->rx_msdu_end_wmask > 0) {
1051 cmd->info2 |=
1052 le32_encode_bits(true,
1053 HTT_RX_RING_SELECTION_CFG_WORD_MASK_COMPACT_SET);
1054 cmd->rx_mpdu_start_end_mask =
1055 le32_encode_bits(tlv_filter->rx_mpdu_start_wmask,
1056 HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_MASK);
1057 /* mpdu_end is not used for any hardwares so far
1058 * please assign it in future if any chip is
1059 * using through hal ops
1060 */
1061 cmd->rx_mpdu_start_end_mask |=
1062 le32_encode_bits(tlv_filter->rx_mpdu_end_wmask,
1063 HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_MASK);
1064 cmd->rx_msdu_end_word_mask =
1065 le32_encode_bits(tlv_filter->rx_msdu_end_wmask,
1066 HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_MASK);
1067 }
1068
1069 ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
1070 if (ret)
1071 goto err_free;
1072
1073 return 0;
1074
1075 err_free:
1076 dev_kfree_skb_any(skb);
1077
1078 return ret;
1079 }
1080 EXPORT_SYMBOL(ath12k_dp_tx_htt_rx_filter_setup);
1081
1082 int
ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k * ar,u8 type,struct htt_ext_stats_cfg_params * cfg_params,u64 cookie)1083 ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type,
1084 struct htt_ext_stats_cfg_params *cfg_params,
1085 u64 cookie)
1086 {
1087 struct ath12k_base *ab = ar->ab;
1088 struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
1089 struct sk_buff *skb;
1090 struct htt_ext_stats_cfg_cmd *cmd;
1091 int len = sizeof(*cmd);
1092 int ret;
1093 u32 pdev_id;
1094
1095 skb = ath12k_htc_alloc_skb(ab, len);
1096 if (!skb)
1097 return -ENOMEM;
1098
1099 skb_put(skb, len);
1100
1101 cmd = (struct htt_ext_stats_cfg_cmd *)skb->data;
1102 memset(cmd, 0, sizeof(*cmd));
1103 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG;
1104
1105 pdev_id = ath12k_mac_get_target_pdev_id(ar);
1106 cmd->hdr.pdev_mask = 1 << pdev_id;
1107
1108 cmd->hdr.stats_type = type;
1109 cmd->cfg_param0 = cpu_to_le32(cfg_params->cfg0);
1110 cmd->cfg_param1 = cpu_to_le32(cfg_params->cfg1);
1111 cmd->cfg_param2 = cpu_to_le32(cfg_params->cfg2);
1112 cmd->cfg_param3 = cpu_to_le32(cfg_params->cfg3);
1113 cmd->cookie_lsb = cpu_to_le32(lower_32_bits(cookie));
1114 cmd->cookie_msb = cpu_to_le32(upper_32_bits(cookie));
1115
1116 ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
1117 if (ret) {
1118 ath12k_warn(ab, "failed to send htt type stats request: %d",
1119 ret);
1120 dev_kfree_skb_any(skb);
1121 return ret;
1122 }
1123
1124 return 0;
1125 }
1126
ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k * ar,bool reset)1127 int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset)
1128 {
1129 struct ath12k_base *ab = ar->ab;
1130 int ret;
1131
1132 ret = ath12k_dp_tx_htt_rx_monitor_mode_ring_config(ar, reset);
1133 if (ret) {
1134 ath12k_err(ab, "failed to setup rx monitor filter %d\n", ret);
1135 return ret;
1136 }
1137
1138 return 0;
1139 }
1140
ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k * ar,bool reset)1141 int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
1142 {
1143 struct ath12k_base *ab = ar->ab;
1144 struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
1145 struct htt_rx_ring_tlv_filter tlv_filter = {};
1146 int ret, ring_id, i;
1147
1148 tlv_filter.offset_valid = false;
1149
1150 if (!reset) {
1151 tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_DEST_RING;
1152
1153 tlv_filter.drop_threshold_valid = true;
1154 tlv_filter.rx_drop_threshold = HTT_RX_RING_TLV_DROP_THRESHOLD_VALUE;
1155
1156 tlv_filter.enable_log_mgmt_type = true;
1157 tlv_filter.enable_log_ctrl_type = true;
1158 tlv_filter.enable_log_data_type = true;
1159
1160 tlv_filter.conf_len_ctrl = HTT_RX_RING_DEFAULT_DMA_LENGTH;
1161 tlv_filter.conf_len_mgmt = HTT_RX_RING_DEFAULT_DMA_LENGTH;
1162 tlv_filter.conf_len_data = HTT_RX_RING_DEFAULT_DMA_LENGTH;
1163
1164 tlv_filter.enable_rx_tlv_offset = true;
1165 tlv_filter.rx_tlv_offset = HTT_RX_RING_PKT_TLV_OFFSET;
1166
1167 tlv_filter.pkt_filter_flags0 =
1168 HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 |
1169 HTT_RX_MON_MO_MGMT_FILTER_FLAGS0;
1170 tlv_filter.pkt_filter_flags1 =
1171 HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 |
1172 HTT_RX_MON_MO_MGMT_FILTER_FLAGS1;
1173 tlv_filter.pkt_filter_flags2 =
1174 HTT_RX_MON_FP_CTRL_FILTER_FLASG2 |
1175 HTT_RX_MON_MO_CTRL_FILTER_FLASG2;
1176 tlv_filter.pkt_filter_flags3 =
1177 HTT_RX_MON_FP_CTRL_FILTER_FLASG3 |
1178 HTT_RX_MON_MO_CTRL_FILTER_FLASG3 |
1179 HTT_RX_MON_FP_DATA_FILTER_FLASG3 |
1180 HTT_RX_MON_MO_DATA_FILTER_FLASG3;
1181 } else {
1182 tlv_filter = ath12k_mac_mon_status_filter_default;
1183
1184 if (ath12k_debugfs_is_extd_rx_stats_enabled(ar))
1185 tlv_filter.rx_filter = ath12k_debugfs_rx_filter(ar);
1186 }
1187
1188 if (ab->hw_params->rxdma1_enable) {
1189 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
1190 ring_id = ar->dp.rxdma_mon_dst_ring[i].ring_id;
1191 ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
1192 ar->dp.mac_id + i,
1193 HAL_RXDMA_MONITOR_DST,
1194 DP_RXDMA_REFILL_RING_SIZE,
1195 &tlv_filter);
1196 if (ret) {
1197 ath12k_err(ab,
1198 "failed to setup filter for monitor buf %d\n",
1199 ret);
1200 return ret;
1201 }
1202 }
1203 return 0;
1204 }
1205
1206 if (!reset) {
1207 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
1208 ring_id = dp->rx_mac_buf_ring[i].ring_id;
1209 ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
1210 i,
1211 HAL_RXDMA_BUF,
1212 DP_RXDMA_REFILL_RING_SIZE,
1213 &tlv_filter);
1214 if (ret) {
1215 ath12k_err(ab,
1216 "failed to setup filter for mon rx buf %d\n",
1217 ret);
1218 return ret;
1219 }
1220 }
1221 }
1222
1223 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
1224 ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
1225 if (!reset) {
1226 tlv_filter.rx_filter =
1227 HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING;
1228 }
1229
1230 ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id,
1231 i,
1232 HAL_RXDMA_MONITOR_STATUS,
1233 RX_MON_STATUS_BUF_SIZE,
1234 &tlv_filter);
1235 if (ret) {
1236 ath12k_err(ab,
1237 "failed to setup filter for mon status buf %d\n",
1238 ret);
1239 return ret;
1240 }
1241 }
1242
1243 return 0;
1244 }
1245
ath12k_dp_tx_htt_tx_filter_setup(struct ath12k_base * ab,u32 ring_id,int mac_id,enum hal_ring_type ring_type,int tx_buf_size,struct htt_tx_ring_tlv_filter * htt_tlv_filter)1246 int ath12k_dp_tx_htt_tx_filter_setup(struct ath12k_base *ab, u32 ring_id,
1247 int mac_id, enum hal_ring_type ring_type,
1248 int tx_buf_size,
1249 struct htt_tx_ring_tlv_filter *htt_tlv_filter)
1250 {
1251 struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
1252 struct htt_tx_ring_selection_cfg_cmd *cmd;
1253 struct hal_srng *srng = &ab->hal.srng_list[ring_id];
1254 struct hal_srng_params params;
1255 struct sk_buff *skb;
1256 int len = sizeof(*cmd);
1257 enum htt_srng_ring_type htt_ring_type;
1258 enum htt_srng_ring_id htt_ring_id;
1259 int ret;
1260
1261 skb = ath12k_htc_alloc_skb(ab, len);
1262 if (!skb)
1263 return -ENOMEM;
1264
1265 memset(¶ms, 0, sizeof(params));
1266 ath12k_hal_srng_get_params(ab, srng, ¶ms);
1267
1268 ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
1269 ring_type, &htt_ring_type,
1270 &htt_ring_id);
1271
1272 if (ret)
1273 goto err_free;
1274
1275 skb_put(skb, len);
1276 cmd = (struct htt_tx_ring_selection_cfg_cmd *)skb->data;
1277 cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_TX_MONITOR_CFG,
1278 HTT_TX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE);
1279 if (htt_ring_type == HTT_SW_TO_HW_RING ||
1280 htt_ring_type == HTT_HW_TO_SW_RING)
1281 cmd->info0 |=
1282 le32_encode_bits(DP_SW2HW_MACID(mac_id),
1283 HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
1284 else
1285 cmd->info0 |=
1286 le32_encode_bits(mac_id,
1287 HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
1288 cmd->info0 |= le32_encode_bits(htt_ring_id,
1289 HTT_TX_RING_SELECTION_CFG_CMD_INFO0_RING_ID);
1290 cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP),
1291 HTT_TX_RING_SELECTION_CFG_CMD_INFO0_SS);
1292 cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
1293 HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PS);
1294
1295 cmd->info1 |=
1296 le32_encode_bits(tx_buf_size,
1297 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_RING_BUFF_SIZE);
1298
1299 if (htt_tlv_filter->tx_mon_mgmt_filter) {
1300 cmd->info1 |=
1301 le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_MGMT,
1302 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE);
1303 cmd->info1 |=
1304 le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len,
1305 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT);
1306 cmd->info2 |=
1307 le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_MGMT,
1308 HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG);
1309 }
1310
1311 if (htt_tlv_filter->tx_mon_data_filter) {
1312 cmd->info1 |=
1313 le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_CTRL,
1314 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE);
1315 cmd->info1 |=
1316 le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len,
1317 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL);
1318 cmd->info2 |=
1319 le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_CTRL,
1320 HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG);
1321 }
1322
1323 if (htt_tlv_filter->tx_mon_ctrl_filter) {
1324 cmd->info1 |=
1325 le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_DATA,
1326 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE);
1327 cmd->info1 |=
1328 le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len,
1329 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA);
1330 cmd->info2 |=
1331 le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_DATA,
1332 HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG);
1333 }
1334
1335 cmd->tlv_filter_mask_in0 =
1336 cpu_to_le32(htt_tlv_filter->tx_mon_downstream_tlv_flags);
1337 cmd->tlv_filter_mask_in1 =
1338 cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags0);
1339 cmd->tlv_filter_mask_in2 =
1340 cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags1);
1341 cmd->tlv_filter_mask_in3 =
1342 cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags2);
1343
1344 ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
1345 if (ret)
1346 goto err_free;
1347
1348 return 0;
1349
1350 err_free:
1351 dev_kfree_skb_any(skb);
1352 return ret;
1353 }
1354