1*60bac4d6SBjoern A. Zeeb // SPDX-License-Identifier: BSD-3-Clause-Clear
2*60bac4d6SBjoern A. Zeeb /*
3*60bac4d6SBjoern A. Zeeb * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4*60bac4d6SBjoern A. Zeeb * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
5*60bac4d6SBjoern A. Zeeb */
6*60bac4d6SBjoern A. Zeeb
7*60bac4d6SBjoern A. Zeeb #include "core.h"
8*60bac4d6SBjoern A. Zeeb #include "peer.h"
9*60bac4d6SBjoern A. Zeeb #include "htc.h"
10*60bac4d6SBjoern A. Zeeb #include "dp_htt.h"
11*60bac4d6SBjoern A. Zeeb #include "debugfs_htt_stats.h"
12*60bac4d6SBjoern A. Zeeb #include "debugfs.h"
13*60bac4d6SBjoern A. Zeeb
ath12k_dp_htt_htc_tx_complete(struct ath12k_base * ab,struct sk_buff * skb)14*60bac4d6SBjoern A. Zeeb static void ath12k_dp_htt_htc_tx_complete(struct ath12k_base *ab,
15*60bac4d6SBjoern A. Zeeb struct sk_buff *skb)
16*60bac4d6SBjoern A. Zeeb {
17*60bac4d6SBjoern A. Zeeb dev_kfree_skb_any(skb);
18*60bac4d6SBjoern A. Zeeb }
19*60bac4d6SBjoern A. Zeeb
ath12k_dp_htt_connect(struct ath12k_dp * dp)20*60bac4d6SBjoern A. Zeeb int ath12k_dp_htt_connect(struct ath12k_dp *dp)
21*60bac4d6SBjoern A. Zeeb {
22*60bac4d6SBjoern A. Zeeb struct ath12k_htc_svc_conn_req conn_req = {};
23*60bac4d6SBjoern A. Zeeb struct ath12k_htc_svc_conn_resp conn_resp = {};
24*60bac4d6SBjoern A. Zeeb int status;
25*60bac4d6SBjoern A. Zeeb
26*60bac4d6SBjoern A. Zeeb conn_req.ep_ops.ep_tx_complete = ath12k_dp_htt_htc_tx_complete;
27*60bac4d6SBjoern A. Zeeb conn_req.ep_ops.ep_rx_complete = ath12k_dp_htt_htc_t2h_msg_handler;
28*60bac4d6SBjoern A. Zeeb
29*60bac4d6SBjoern A. Zeeb /* connect to control service */
30*60bac4d6SBjoern A. Zeeb conn_req.service_id = ATH12K_HTC_SVC_ID_HTT_DATA_MSG;
31*60bac4d6SBjoern A. Zeeb
32*60bac4d6SBjoern A. Zeeb status = ath12k_htc_connect_service(&dp->ab->htc, &conn_req,
33*60bac4d6SBjoern A. Zeeb &conn_resp);
34*60bac4d6SBjoern A. Zeeb
35*60bac4d6SBjoern A. Zeeb if (status)
36*60bac4d6SBjoern A. Zeeb return status;
37*60bac4d6SBjoern A. Zeeb
38*60bac4d6SBjoern A. Zeeb dp->eid = conn_resp.eid;
39*60bac4d6SBjoern A. Zeeb
40*60bac4d6SBjoern A. Zeeb return 0;
41*60bac4d6SBjoern A. Zeeb }
42*60bac4d6SBjoern A. Zeeb
ath12k_get_ppdu_user_index(struct htt_ppdu_stats * ppdu_stats,u16 peer_id)43*60bac4d6SBjoern A. Zeeb static int ath12k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
44*60bac4d6SBjoern A. Zeeb u16 peer_id)
45*60bac4d6SBjoern A. Zeeb {
46*60bac4d6SBjoern A. Zeeb int i;
47*60bac4d6SBjoern A. Zeeb
48*60bac4d6SBjoern A. Zeeb for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
49*60bac4d6SBjoern A. Zeeb if (ppdu_stats->user_stats[i].is_valid_peer_id) {
50*60bac4d6SBjoern A. Zeeb if (peer_id == ppdu_stats->user_stats[i].peer_id)
51*60bac4d6SBjoern A. Zeeb return i;
52*60bac4d6SBjoern A. Zeeb } else {
53*60bac4d6SBjoern A. Zeeb return i;
54*60bac4d6SBjoern A. Zeeb }
55*60bac4d6SBjoern A. Zeeb }
56*60bac4d6SBjoern A. Zeeb
57*60bac4d6SBjoern A. Zeeb return -EINVAL;
58*60bac4d6SBjoern A. Zeeb }
59*60bac4d6SBjoern A. Zeeb
ath12k_htt_tlv_ppdu_stats_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)60*60bac4d6SBjoern A. Zeeb static int ath12k_htt_tlv_ppdu_stats_parse(struct ath12k_base *ab,
61*60bac4d6SBjoern A. Zeeb u16 tag, u16 len, const void *ptr,
62*60bac4d6SBjoern A. Zeeb void *data)
63*60bac4d6SBjoern A. Zeeb {
64*60bac4d6SBjoern A. Zeeb const struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *ba_status;
65*60bac4d6SBjoern A. Zeeb const struct htt_ppdu_stats_usr_cmpltn_cmn *cmplt_cmn;
66*60bac4d6SBjoern A. Zeeb const struct htt_ppdu_stats_user_rate *user_rate;
67*60bac4d6SBjoern A. Zeeb struct htt_ppdu_stats_info *ppdu_info;
68*60bac4d6SBjoern A. Zeeb struct htt_ppdu_user_stats *user_stats;
69*60bac4d6SBjoern A. Zeeb int cur_user;
70*60bac4d6SBjoern A. Zeeb u16 peer_id;
71*60bac4d6SBjoern A. Zeeb
72*60bac4d6SBjoern A. Zeeb ppdu_info = data;
73*60bac4d6SBjoern A. Zeeb
74*60bac4d6SBjoern A. Zeeb switch (tag) {
75*60bac4d6SBjoern A. Zeeb case HTT_PPDU_STATS_TAG_COMMON:
76*60bac4d6SBjoern A. Zeeb if (len < sizeof(struct htt_ppdu_stats_common)) {
77*60bac4d6SBjoern A. Zeeb ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
78*60bac4d6SBjoern A. Zeeb len, tag);
79*60bac4d6SBjoern A. Zeeb return -EINVAL;
80*60bac4d6SBjoern A. Zeeb }
81*60bac4d6SBjoern A. Zeeb memcpy(&ppdu_info->ppdu_stats.common, ptr,
82*60bac4d6SBjoern A. Zeeb sizeof(struct htt_ppdu_stats_common));
83*60bac4d6SBjoern A. Zeeb break;
84*60bac4d6SBjoern A. Zeeb case HTT_PPDU_STATS_TAG_USR_RATE:
85*60bac4d6SBjoern A. Zeeb if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
86*60bac4d6SBjoern A. Zeeb ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
87*60bac4d6SBjoern A. Zeeb len, tag);
88*60bac4d6SBjoern A. Zeeb return -EINVAL;
89*60bac4d6SBjoern A. Zeeb }
90*60bac4d6SBjoern A. Zeeb user_rate = ptr;
91*60bac4d6SBjoern A. Zeeb peer_id = le16_to_cpu(user_rate->sw_peer_id);
92*60bac4d6SBjoern A. Zeeb cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
93*60bac4d6SBjoern A. Zeeb peer_id);
94*60bac4d6SBjoern A. Zeeb if (cur_user < 0)
95*60bac4d6SBjoern A. Zeeb return -EINVAL;
96*60bac4d6SBjoern A. Zeeb user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
97*60bac4d6SBjoern A. Zeeb user_stats->peer_id = peer_id;
98*60bac4d6SBjoern A. Zeeb user_stats->is_valid_peer_id = true;
99*60bac4d6SBjoern A. Zeeb memcpy(&user_stats->rate, ptr,
100*60bac4d6SBjoern A. Zeeb sizeof(struct htt_ppdu_stats_user_rate));
101*60bac4d6SBjoern A. Zeeb user_stats->tlv_flags |= BIT(tag);
102*60bac4d6SBjoern A. Zeeb break;
103*60bac4d6SBjoern A. Zeeb case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
104*60bac4d6SBjoern A. Zeeb if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
105*60bac4d6SBjoern A. Zeeb ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
106*60bac4d6SBjoern A. Zeeb len, tag);
107*60bac4d6SBjoern A. Zeeb return -EINVAL;
108*60bac4d6SBjoern A. Zeeb }
109*60bac4d6SBjoern A. Zeeb
110*60bac4d6SBjoern A. Zeeb cmplt_cmn = ptr;
111*60bac4d6SBjoern A. Zeeb peer_id = le16_to_cpu(cmplt_cmn->sw_peer_id);
112*60bac4d6SBjoern A. Zeeb cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
113*60bac4d6SBjoern A. Zeeb peer_id);
114*60bac4d6SBjoern A. Zeeb if (cur_user < 0)
115*60bac4d6SBjoern A. Zeeb return -EINVAL;
116*60bac4d6SBjoern A. Zeeb user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
117*60bac4d6SBjoern A. Zeeb user_stats->peer_id = peer_id;
118*60bac4d6SBjoern A. Zeeb user_stats->is_valid_peer_id = true;
119*60bac4d6SBjoern A. Zeeb memcpy(&user_stats->cmpltn_cmn, ptr,
120*60bac4d6SBjoern A. Zeeb sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
121*60bac4d6SBjoern A. Zeeb user_stats->tlv_flags |= BIT(tag);
122*60bac4d6SBjoern A. Zeeb break;
123*60bac4d6SBjoern A. Zeeb case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
124*60bac4d6SBjoern A. Zeeb if (len <
125*60bac4d6SBjoern A. Zeeb sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
126*60bac4d6SBjoern A. Zeeb ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
127*60bac4d6SBjoern A. Zeeb len, tag);
128*60bac4d6SBjoern A. Zeeb return -EINVAL;
129*60bac4d6SBjoern A. Zeeb }
130*60bac4d6SBjoern A. Zeeb
131*60bac4d6SBjoern A. Zeeb ba_status = ptr;
132*60bac4d6SBjoern A. Zeeb peer_id = le16_to_cpu(ba_status->sw_peer_id);
133*60bac4d6SBjoern A. Zeeb cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
134*60bac4d6SBjoern A. Zeeb peer_id);
135*60bac4d6SBjoern A. Zeeb if (cur_user < 0)
136*60bac4d6SBjoern A. Zeeb return -EINVAL;
137*60bac4d6SBjoern A. Zeeb user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
138*60bac4d6SBjoern A. Zeeb user_stats->peer_id = peer_id;
139*60bac4d6SBjoern A. Zeeb user_stats->is_valid_peer_id = true;
140*60bac4d6SBjoern A. Zeeb memcpy(&user_stats->ack_ba, ptr,
141*60bac4d6SBjoern A. Zeeb sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
142*60bac4d6SBjoern A. Zeeb user_stats->tlv_flags |= BIT(tag);
143*60bac4d6SBjoern A. Zeeb break;
144*60bac4d6SBjoern A. Zeeb }
145*60bac4d6SBjoern A. Zeeb return 0;
146*60bac4d6SBjoern A. Zeeb }
147*60bac4d6SBjoern A. Zeeb
ath12k_dp_htt_tlv_iter(struct ath12k_base * ab,const void * ptr,size_t len,int (* iter)(struct ath12k_base * ar,u16 tag,u16 len,const void * ptr,void * data),void * data)148*60bac4d6SBjoern A. Zeeb int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
149*60bac4d6SBjoern A. Zeeb int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
150*60bac4d6SBjoern A. Zeeb const void *ptr, void *data),
151*60bac4d6SBjoern A. Zeeb void *data)
152*60bac4d6SBjoern A. Zeeb {
153*60bac4d6SBjoern A. Zeeb const struct htt_tlv *tlv;
154*60bac4d6SBjoern A. Zeeb const void *begin = ptr;
155*60bac4d6SBjoern A. Zeeb u16 tlv_tag, tlv_len;
156*60bac4d6SBjoern A. Zeeb int ret = -EINVAL;
157*60bac4d6SBjoern A. Zeeb
158*60bac4d6SBjoern A. Zeeb while (len > 0) {
159*60bac4d6SBjoern A. Zeeb if (len < sizeof(*tlv)) {
160*60bac4d6SBjoern A. Zeeb ath12k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
161*60bac4d6SBjoern A. Zeeb ptr - begin, len, sizeof(*tlv));
162*60bac4d6SBjoern A. Zeeb return -EINVAL;
163*60bac4d6SBjoern A. Zeeb }
164*60bac4d6SBjoern A. Zeeb tlv = (struct htt_tlv *)ptr;
165*60bac4d6SBjoern A. Zeeb tlv_tag = le32_get_bits(tlv->header, HTT_TLV_TAG);
166*60bac4d6SBjoern A. Zeeb tlv_len = le32_get_bits(tlv->header, HTT_TLV_LEN);
167*60bac4d6SBjoern A. Zeeb ptr += sizeof(*tlv);
168*60bac4d6SBjoern A. Zeeb len -= sizeof(*tlv);
169*60bac4d6SBjoern A. Zeeb
170*60bac4d6SBjoern A. Zeeb if (tlv_len > len) {
171*60bac4d6SBjoern A. Zeeb ath12k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
172*60bac4d6SBjoern A. Zeeb tlv_tag, ptr - begin, len, tlv_len);
173*60bac4d6SBjoern A. Zeeb return -EINVAL;
174*60bac4d6SBjoern A. Zeeb }
175*60bac4d6SBjoern A. Zeeb ret = iter(ab, tlv_tag, tlv_len, ptr, data);
176*60bac4d6SBjoern A. Zeeb if (ret == -ENOMEM)
177*60bac4d6SBjoern A. Zeeb return ret;
178*60bac4d6SBjoern A. Zeeb
179*60bac4d6SBjoern A. Zeeb ptr += tlv_len;
180*60bac4d6SBjoern A. Zeeb len -= tlv_len;
181*60bac4d6SBjoern A. Zeeb }
182*60bac4d6SBjoern A. Zeeb return 0;
183*60bac4d6SBjoern A. Zeeb }
184*60bac4d6SBjoern A. Zeeb
185*60bac4d6SBjoern A. Zeeb static void
ath12k_update_per_peer_tx_stats(struct ath12k_pdev_dp * dp_pdev,struct htt_ppdu_stats * ppdu_stats,u8 user)186*60bac4d6SBjoern A. Zeeb ath12k_update_per_peer_tx_stats(struct ath12k_pdev_dp *dp_pdev,
187*60bac4d6SBjoern A. Zeeb struct htt_ppdu_stats *ppdu_stats, u8 user)
188*60bac4d6SBjoern A. Zeeb {
189*60bac4d6SBjoern A. Zeeb struct ath12k_dp *dp = dp_pdev->dp;
190*60bac4d6SBjoern A. Zeeb struct ath12k_base *ab = dp->ab;
191*60bac4d6SBjoern A. Zeeb struct ath12k_dp_link_peer *peer;
192*60bac4d6SBjoern A. Zeeb struct htt_ppdu_stats_user_rate *user_rate;
193*60bac4d6SBjoern A. Zeeb struct ath12k_per_peer_tx_stats *peer_stats = &dp_pdev->peer_tx_stats;
194*60bac4d6SBjoern A. Zeeb struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
195*60bac4d6SBjoern A. Zeeb struct htt_ppdu_stats_common *common = &ppdu_stats->common;
196*60bac4d6SBjoern A. Zeeb int ret;
197*60bac4d6SBjoern A. Zeeb u8 flags, mcs, nss, bw, sgi, dcm, ppdu_type, rate_idx = 0;
198*60bac4d6SBjoern A. Zeeb u32 v, succ_bytes = 0;
199*60bac4d6SBjoern A. Zeeb u16 tones, rate = 0, succ_pkts = 0;
200*60bac4d6SBjoern A. Zeeb u32 tx_duration = 0;
201*60bac4d6SBjoern A. Zeeb u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
202*60bac4d6SBjoern A. Zeeb u16 tx_retry_failed = 0, tx_retry_count = 0;
203*60bac4d6SBjoern A. Zeeb bool is_ampdu = false, is_ofdma;
204*60bac4d6SBjoern A. Zeeb
205*60bac4d6SBjoern A. Zeeb if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
206*60bac4d6SBjoern A. Zeeb return;
207*60bac4d6SBjoern A. Zeeb
208*60bac4d6SBjoern A. Zeeb if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) {
209*60bac4d6SBjoern A. Zeeb is_ampdu =
210*60bac4d6SBjoern A. Zeeb HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
211*60bac4d6SBjoern A. Zeeb tx_retry_failed =
212*60bac4d6SBjoern A. Zeeb __le16_to_cpu(usr_stats->cmpltn_cmn.mpdu_tried) -
213*60bac4d6SBjoern A. Zeeb __le16_to_cpu(usr_stats->cmpltn_cmn.mpdu_success);
214*60bac4d6SBjoern A. Zeeb tx_retry_count =
215*60bac4d6SBjoern A. Zeeb HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
216*60bac4d6SBjoern A. Zeeb HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
217*60bac4d6SBjoern A. Zeeb }
218*60bac4d6SBjoern A. Zeeb
219*60bac4d6SBjoern A. Zeeb if (usr_stats->tlv_flags &
220*60bac4d6SBjoern A. Zeeb BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
221*60bac4d6SBjoern A. Zeeb succ_bytes = le32_to_cpu(usr_stats->ack_ba.success_bytes);
222*60bac4d6SBjoern A. Zeeb succ_pkts = le32_get_bits(usr_stats->ack_ba.info,
223*60bac4d6SBjoern A. Zeeb HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M);
224*60bac4d6SBjoern A. Zeeb tid = le32_get_bits(usr_stats->ack_ba.info,
225*60bac4d6SBjoern A. Zeeb HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM);
226*60bac4d6SBjoern A. Zeeb }
227*60bac4d6SBjoern A. Zeeb
228*60bac4d6SBjoern A. Zeeb if (common->fes_duration_us)
229*60bac4d6SBjoern A. Zeeb tx_duration = le32_to_cpu(common->fes_duration_us);
230*60bac4d6SBjoern A. Zeeb
231*60bac4d6SBjoern A. Zeeb user_rate = &usr_stats->rate;
232*60bac4d6SBjoern A. Zeeb flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
233*60bac4d6SBjoern A. Zeeb bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
234*60bac4d6SBjoern A. Zeeb nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
235*60bac4d6SBjoern A. Zeeb mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
236*60bac4d6SBjoern A. Zeeb sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
237*60bac4d6SBjoern A. Zeeb dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
238*60bac4d6SBjoern A. Zeeb
239*60bac4d6SBjoern A. Zeeb ppdu_type = HTT_USR_RATE_PPDU_TYPE(user_rate->info1);
240*60bac4d6SBjoern A. Zeeb is_ofdma = (ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA) ||
241*60bac4d6SBjoern A. Zeeb (ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA);
242*60bac4d6SBjoern A. Zeeb
243*60bac4d6SBjoern A. Zeeb /* Note: If host configured fixed rates and in some other special
244*60bac4d6SBjoern A. Zeeb * cases, the broadcast/management frames are sent in different rates.
245*60bac4d6SBjoern A. Zeeb * Firmware rate's control to be skipped for this?
246*60bac4d6SBjoern A. Zeeb */
247*60bac4d6SBjoern A. Zeeb
248*60bac4d6SBjoern A. Zeeb if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH12K_HE_MCS_MAX) {
249*60bac4d6SBjoern A. Zeeb ath12k_warn(ab, "Invalid HE mcs %d peer stats", mcs);
250*60bac4d6SBjoern A. Zeeb return;
251*60bac4d6SBjoern A. Zeeb }
252*60bac4d6SBjoern A. Zeeb
253*60bac4d6SBjoern A. Zeeb if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH12K_VHT_MCS_MAX) {
254*60bac4d6SBjoern A. Zeeb ath12k_warn(ab, "Invalid VHT mcs %d peer stats", mcs);
255*60bac4d6SBjoern A. Zeeb return;
256*60bac4d6SBjoern A. Zeeb }
257*60bac4d6SBjoern A. Zeeb
258*60bac4d6SBjoern A. Zeeb if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH12K_HT_MCS_MAX || nss < 1)) {
259*60bac4d6SBjoern A. Zeeb ath12k_warn(ab, "Invalid HT mcs %d nss %d peer stats",
260*60bac4d6SBjoern A. Zeeb mcs, nss);
261*60bac4d6SBjoern A. Zeeb return;
262*60bac4d6SBjoern A. Zeeb }
263*60bac4d6SBjoern A. Zeeb
264*60bac4d6SBjoern A. Zeeb if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
265*60bac4d6SBjoern A. Zeeb ret = ath12k_mac_hw_ratecode_to_legacy_rate(mcs,
266*60bac4d6SBjoern A. Zeeb flags,
267*60bac4d6SBjoern A. Zeeb &rate_idx,
268*60bac4d6SBjoern A. Zeeb &rate);
269*60bac4d6SBjoern A. Zeeb if (ret < 0)
270*60bac4d6SBjoern A. Zeeb return;
271*60bac4d6SBjoern A. Zeeb }
272*60bac4d6SBjoern A. Zeeb
273*60bac4d6SBjoern A. Zeeb rcu_read_lock();
274*60bac4d6SBjoern A. Zeeb peer = ath12k_dp_link_peer_find_by_peerid(dp_pdev, usr_stats->peer_id);
275*60bac4d6SBjoern A. Zeeb
276*60bac4d6SBjoern A. Zeeb if (!peer || !peer->sta) {
277*60bac4d6SBjoern A. Zeeb rcu_read_unlock();
278*60bac4d6SBjoern A. Zeeb return;
279*60bac4d6SBjoern A. Zeeb }
280*60bac4d6SBjoern A. Zeeb
281*60bac4d6SBjoern A. Zeeb spin_lock_bh(&dp->dp_lock);
282*60bac4d6SBjoern A. Zeeb
283*60bac4d6SBjoern A. Zeeb memset(&peer->txrate, 0, sizeof(peer->txrate));
284*60bac4d6SBjoern A. Zeeb
285*60bac4d6SBjoern A. Zeeb peer->txrate.bw = ath12k_mac_bw_to_mac80211_bw(bw);
286*60bac4d6SBjoern A. Zeeb
287*60bac4d6SBjoern A. Zeeb switch (flags) {
288*60bac4d6SBjoern A. Zeeb case WMI_RATE_PREAMBLE_OFDM:
289*60bac4d6SBjoern A. Zeeb peer->txrate.legacy = rate;
290*60bac4d6SBjoern A. Zeeb break;
291*60bac4d6SBjoern A. Zeeb case WMI_RATE_PREAMBLE_CCK:
292*60bac4d6SBjoern A. Zeeb peer->txrate.legacy = rate;
293*60bac4d6SBjoern A. Zeeb break;
294*60bac4d6SBjoern A. Zeeb case WMI_RATE_PREAMBLE_HT:
295*60bac4d6SBjoern A. Zeeb peer->txrate.mcs = mcs + 8 * (nss - 1);
296*60bac4d6SBjoern A. Zeeb peer->txrate.flags = RATE_INFO_FLAGS_MCS;
297*60bac4d6SBjoern A. Zeeb if (sgi)
298*60bac4d6SBjoern A. Zeeb peer->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
299*60bac4d6SBjoern A. Zeeb break;
300*60bac4d6SBjoern A. Zeeb case WMI_RATE_PREAMBLE_VHT:
301*60bac4d6SBjoern A. Zeeb peer->txrate.mcs = mcs;
302*60bac4d6SBjoern A. Zeeb peer->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
303*60bac4d6SBjoern A. Zeeb if (sgi)
304*60bac4d6SBjoern A. Zeeb peer->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
305*60bac4d6SBjoern A. Zeeb break;
306*60bac4d6SBjoern A. Zeeb case WMI_RATE_PREAMBLE_HE:
307*60bac4d6SBjoern A. Zeeb peer->txrate.mcs = mcs;
308*60bac4d6SBjoern A. Zeeb peer->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
309*60bac4d6SBjoern A. Zeeb peer->txrate.he_dcm = dcm;
310*60bac4d6SBjoern A. Zeeb peer->txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
311*60bac4d6SBjoern A. Zeeb tones = le16_to_cpu(user_rate->ru_end) -
312*60bac4d6SBjoern A. Zeeb le16_to_cpu(user_rate->ru_start) + 1;
313*60bac4d6SBjoern A. Zeeb v = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(tones);
314*60bac4d6SBjoern A. Zeeb peer->txrate.he_ru_alloc = v;
315*60bac4d6SBjoern A. Zeeb if (is_ofdma)
316*60bac4d6SBjoern A. Zeeb peer->txrate.bw = RATE_INFO_BW_HE_RU;
317*60bac4d6SBjoern A. Zeeb break;
318*60bac4d6SBjoern A. Zeeb case WMI_RATE_PREAMBLE_EHT:
319*60bac4d6SBjoern A. Zeeb peer->txrate.mcs = mcs;
320*60bac4d6SBjoern A. Zeeb peer->txrate.flags = RATE_INFO_FLAGS_EHT_MCS;
321*60bac4d6SBjoern A. Zeeb peer->txrate.he_dcm = dcm;
322*60bac4d6SBjoern A. Zeeb peer->txrate.eht_gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(sgi);
323*60bac4d6SBjoern A. Zeeb tones = le16_to_cpu(user_rate->ru_end) -
324*60bac4d6SBjoern A. Zeeb le16_to_cpu(user_rate->ru_start) + 1;
325*60bac4d6SBjoern A. Zeeb v = ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(tones);
326*60bac4d6SBjoern A. Zeeb peer->txrate.eht_ru_alloc = v;
327*60bac4d6SBjoern A. Zeeb if (is_ofdma)
328*60bac4d6SBjoern A. Zeeb peer->txrate.bw = RATE_INFO_BW_EHT_RU;
329*60bac4d6SBjoern A. Zeeb break;
330*60bac4d6SBjoern A. Zeeb }
331*60bac4d6SBjoern A. Zeeb
332*60bac4d6SBjoern A. Zeeb peer->tx_retry_failed += tx_retry_failed;
333*60bac4d6SBjoern A. Zeeb peer->tx_retry_count += tx_retry_count;
334*60bac4d6SBjoern A. Zeeb peer->txrate.nss = nss;
335*60bac4d6SBjoern A. Zeeb peer->tx_duration += tx_duration;
336*60bac4d6SBjoern A. Zeeb memcpy(&peer->last_txrate, &peer->txrate, sizeof(struct rate_info));
337*60bac4d6SBjoern A. Zeeb
338*60bac4d6SBjoern A. Zeeb spin_unlock_bh(&dp->dp_lock);
339*60bac4d6SBjoern A. Zeeb
340*60bac4d6SBjoern A. Zeeb /* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
341*60bac4d6SBjoern A. Zeeb * So skip peer stats update for mgmt packets.
342*60bac4d6SBjoern A. Zeeb */
343*60bac4d6SBjoern A. Zeeb if (tid < HTT_PPDU_STATS_NON_QOS_TID) {
344*60bac4d6SBjoern A. Zeeb memset(peer_stats, 0, sizeof(*peer_stats));
345*60bac4d6SBjoern A. Zeeb peer_stats->succ_pkts = succ_pkts;
346*60bac4d6SBjoern A. Zeeb peer_stats->succ_bytes = succ_bytes;
347*60bac4d6SBjoern A. Zeeb peer_stats->is_ampdu = is_ampdu;
348*60bac4d6SBjoern A. Zeeb peer_stats->duration = tx_duration;
349*60bac4d6SBjoern A. Zeeb peer_stats->ba_fails =
350*60bac4d6SBjoern A. Zeeb HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
351*60bac4d6SBjoern A. Zeeb HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
352*60bac4d6SBjoern A. Zeeb }
353*60bac4d6SBjoern A. Zeeb
354*60bac4d6SBjoern A. Zeeb rcu_read_unlock();
355*60bac4d6SBjoern A. Zeeb }
356*60bac4d6SBjoern A. Zeeb
ath12k_htt_update_ppdu_stats(struct ath12k_pdev_dp * dp_pdev,struct htt_ppdu_stats * ppdu_stats)357*60bac4d6SBjoern A. Zeeb static void ath12k_htt_update_ppdu_stats(struct ath12k_pdev_dp *dp_pdev,
358*60bac4d6SBjoern A. Zeeb struct htt_ppdu_stats *ppdu_stats)
359*60bac4d6SBjoern A. Zeeb {
360*60bac4d6SBjoern A. Zeeb u8 user;
361*60bac4d6SBjoern A. Zeeb
362*60bac4d6SBjoern A. Zeeb for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
363*60bac4d6SBjoern A. Zeeb ath12k_update_per_peer_tx_stats(dp_pdev, ppdu_stats, user);
364*60bac4d6SBjoern A. Zeeb }
365*60bac4d6SBjoern A. Zeeb
366*60bac4d6SBjoern A. Zeeb static
ath12k_dp_htt_get_ppdu_desc(struct ath12k_pdev_dp * dp_pdev,u32 ppdu_id)367*60bac4d6SBjoern A. Zeeb struct htt_ppdu_stats_info *ath12k_dp_htt_get_ppdu_desc(struct ath12k_pdev_dp *dp_pdev,
368*60bac4d6SBjoern A. Zeeb u32 ppdu_id)
369*60bac4d6SBjoern A. Zeeb {
370*60bac4d6SBjoern A. Zeeb struct htt_ppdu_stats_info *ppdu_info;
371*60bac4d6SBjoern A. Zeeb
372*60bac4d6SBjoern A. Zeeb lockdep_assert_held(&dp_pdev->ppdu_list_lock);
373*60bac4d6SBjoern A. Zeeb if (!list_empty(&dp_pdev->ppdu_stats_info)) {
374*60bac4d6SBjoern A. Zeeb list_for_each_entry(ppdu_info, &dp_pdev->ppdu_stats_info, list) {
375*60bac4d6SBjoern A. Zeeb if (ppdu_info->ppdu_id == ppdu_id)
376*60bac4d6SBjoern A. Zeeb return ppdu_info;
377*60bac4d6SBjoern A. Zeeb }
378*60bac4d6SBjoern A. Zeeb
379*60bac4d6SBjoern A. Zeeb if (dp_pdev->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
380*60bac4d6SBjoern A. Zeeb ppdu_info = list_first_entry(&dp_pdev->ppdu_stats_info,
381*60bac4d6SBjoern A. Zeeb typeof(*ppdu_info), list);
382*60bac4d6SBjoern A. Zeeb list_del(&ppdu_info->list);
383*60bac4d6SBjoern A. Zeeb dp_pdev->ppdu_stat_list_depth--;
384*60bac4d6SBjoern A. Zeeb ath12k_htt_update_ppdu_stats(dp_pdev, &ppdu_info->ppdu_stats);
385*60bac4d6SBjoern A. Zeeb kfree(ppdu_info);
386*60bac4d6SBjoern A. Zeeb }
387*60bac4d6SBjoern A. Zeeb }
388*60bac4d6SBjoern A. Zeeb
389*60bac4d6SBjoern A. Zeeb ppdu_info = kzalloc_obj(*ppdu_info, GFP_ATOMIC);
390*60bac4d6SBjoern A. Zeeb if (!ppdu_info)
391*60bac4d6SBjoern A. Zeeb return NULL;
392*60bac4d6SBjoern A. Zeeb
393*60bac4d6SBjoern A. Zeeb list_add_tail(&ppdu_info->list, &dp_pdev->ppdu_stats_info);
394*60bac4d6SBjoern A. Zeeb dp_pdev->ppdu_stat_list_depth++;
395*60bac4d6SBjoern A. Zeeb
396*60bac4d6SBjoern A. Zeeb return ppdu_info;
397*60bac4d6SBjoern A. Zeeb }
398*60bac4d6SBjoern A. Zeeb
ath12k_copy_to_delay_stats(struct ath12k_dp_link_peer * peer,struct htt_ppdu_user_stats * usr_stats)399*60bac4d6SBjoern A. Zeeb static void ath12k_copy_to_delay_stats(struct ath12k_dp_link_peer *peer,
400*60bac4d6SBjoern A. Zeeb struct htt_ppdu_user_stats *usr_stats)
401*60bac4d6SBjoern A. Zeeb {
402*60bac4d6SBjoern A. Zeeb peer->ppdu_stats_delayba.sw_peer_id = le16_to_cpu(usr_stats->rate.sw_peer_id);
403*60bac4d6SBjoern A. Zeeb peer->ppdu_stats_delayba.info0 = le32_to_cpu(usr_stats->rate.info0);
404*60bac4d6SBjoern A. Zeeb peer->ppdu_stats_delayba.ru_end = le16_to_cpu(usr_stats->rate.ru_end);
405*60bac4d6SBjoern A. Zeeb peer->ppdu_stats_delayba.ru_start = le16_to_cpu(usr_stats->rate.ru_start);
406*60bac4d6SBjoern A. Zeeb peer->ppdu_stats_delayba.info1 = le32_to_cpu(usr_stats->rate.info1);
407*60bac4d6SBjoern A. Zeeb peer->ppdu_stats_delayba.rate_flags = le32_to_cpu(usr_stats->rate.rate_flags);
408*60bac4d6SBjoern A. Zeeb peer->ppdu_stats_delayba.resp_rate_flags =
409*60bac4d6SBjoern A. Zeeb le32_to_cpu(usr_stats->rate.resp_rate_flags);
410*60bac4d6SBjoern A. Zeeb
411*60bac4d6SBjoern A. Zeeb peer->delayba_flag = true;
412*60bac4d6SBjoern A. Zeeb }
413*60bac4d6SBjoern A. Zeeb
ath12k_copy_to_bar(struct ath12k_dp_link_peer * peer,struct htt_ppdu_user_stats * usr_stats)414*60bac4d6SBjoern A. Zeeb static void ath12k_copy_to_bar(struct ath12k_dp_link_peer *peer,
415*60bac4d6SBjoern A. Zeeb struct htt_ppdu_user_stats *usr_stats)
416*60bac4d6SBjoern A. Zeeb {
417*60bac4d6SBjoern A. Zeeb usr_stats->rate.sw_peer_id = cpu_to_le16(peer->ppdu_stats_delayba.sw_peer_id);
418*60bac4d6SBjoern A. Zeeb usr_stats->rate.info0 = cpu_to_le32(peer->ppdu_stats_delayba.info0);
419*60bac4d6SBjoern A. Zeeb usr_stats->rate.ru_end = cpu_to_le16(peer->ppdu_stats_delayba.ru_end);
420*60bac4d6SBjoern A. Zeeb usr_stats->rate.ru_start = cpu_to_le16(peer->ppdu_stats_delayba.ru_start);
421*60bac4d6SBjoern A. Zeeb usr_stats->rate.info1 = cpu_to_le32(peer->ppdu_stats_delayba.info1);
422*60bac4d6SBjoern A. Zeeb usr_stats->rate.rate_flags = cpu_to_le32(peer->ppdu_stats_delayba.rate_flags);
423*60bac4d6SBjoern A. Zeeb usr_stats->rate.resp_rate_flags =
424*60bac4d6SBjoern A. Zeeb cpu_to_le32(peer->ppdu_stats_delayba.resp_rate_flags);
425*60bac4d6SBjoern A. Zeeb
426*60bac4d6SBjoern A. Zeeb peer->delayba_flag = false;
427*60bac4d6SBjoern A. Zeeb }
428*60bac4d6SBjoern A. Zeeb
ath12k_htt_pull_ppdu_stats(struct ath12k_base * ab,struct sk_buff * skb)429*60bac4d6SBjoern A. Zeeb static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab,
430*60bac4d6SBjoern A. Zeeb struct sk_buff *skb)
431*60bac4d6SBjoern A. Zeeb {
432*60bac4d6SBjoern A. Zeeb struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
433*60bac4d6SBjoern A. Zeeb struct ath12k_htt_ppdu_stats_msg *msg;
434*60bac4d6SBjoern A. Zeeb struct htt_ppdu_stats_info *ppdu_info;
435*60bac4d6SBjoern A. Zeeb struct ath12k_dp_link_peer *peer = NULL;
436*60bac4d6SBjoern A. Zeeb struct htt_ppdu_user_stats *usr_stats = NULL;
437*60bac4d6SBjoern A. Zeeb u32 peer_id = 0;
438*60bac4d6SBjoern A. Zeeb struct ath12k_pdev_dp *dp_pdev;
439*60bac4d6SBjoern A. Zeeb int ret, i;
440*60bac4d6SBjoern A. Zeeb u8 pdev_id, pdev_idx;
441*60bac4d6SBjoern A. Zeeb u32 ppdu_id, len;
442*60bac4d6SBjoern A. Zeeb
443*60bac4d6SBjoern A. Zeeb msg = (struct ath12k_htt_ppdu_stats_msg *)skb->data;
444*60bac4d6SBjoern A. Zeeb len = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE);
445*60bac4d6SBjoern A. Zeeb if (len > (skb->len - struct_size(msg, data, 0))) {
446*60bac4d6SBjoern A. Zeeb ath12k_warn(ab,
447*60bac4d6SBjoern A. Zeeb "HTT PPDU STATS event has unexpected payload size %u, should be smaller than %u\n",
448*60bac4d6SBjoern A. Zeeb len, skb->len);
449*60bac4d6SBjoern A. Zeeb return -EINVAL;
450*60bac4d6SBjoern A. Zeeb }
451*60bac4d6SBjoern A. Zeeb
452*60bac4d6SBjoern A. Zeeb pdev_id = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PDEV_ID);
453*60bac4d6SBjoern A. Zeeb ppdu_id = le32_to_cpu(msg->ppdu_id);
454*60bac4d6SBjoern A. Zeeb
455*60bac4d6SBjoern A. Zeeb pdev_idx = DP_HW2SW_MACID(pdev_id);
456*60bac4d6SBjoern A. Zeeb if (pdev_idx >= MAX_RADIOS) {
457*60bac4d6SBjoern A. Zeeb ath12k_warn(ab, "HTT PPDU STATS invalid pdev id %u", pdev_id);
458*60bac4d6SBjoern A. Zeeb return -EINVAL;
459*60bac4d6SBjoern A. Zeeb }
460*60bac4d6SBjoern A. Zeeb
461*60bac4d6SBjoern A. Zeeb rcu_read_lock();
462*60bac4d6SBjoern A. Zeeb
463*60bac4d6SBjoern A. Zeeb dp_pdev = ath12k_dp_to_pdev_dp(dp, pdev_idx);
464*60bac4d6SBjoern A. Zeeb if (!dp_pdev) {
465*60bac4d6SBjoern A. Zeeb ret = -EINVAL;
466*60bac4d6SBjoern A. Zeeb goto exit;
467*60bac4d6SBjoern A. Zeeb }
468*60bac4d6SBjoern A. Zeeb
469*60bac4d6SBjoern A. Zeeb spin_lock_bh(&dp_pdev->ppdu_list_lock);
470*60bac4d6SBjoern A. Zeeb ppdu_info = ath12k_dp_htt_get_ppdu_desc(dp_pdev, ppdu_id);
471*60bac4d6SBjoern A. Zeeb if (!ppdu_info) {
472*60bac4d6SBjoern A. Zeeb spin_unlock_bh(&dp_pdev->ppdu_list_lock);
473*60bac4d6SBjoern A. Zeeb ret = -EINVAL;
474*60bac4d6SBjoern A. Zeeb goto exit;
475*60bac4d6SBjoern A. Zeeb }
476*60bac4d6SBjoern A. Zeeb
477*60bac4d6SBjoern A. Zeeb ppdu_info->ppdu_id = ppdu_id;
478*60bac4d6SBjoern A. Zeeb ret = ath12k_dp_htt_tlv_iter(ab, msg->data, len,
479*60bac4d6SBjoern A. Zeeb ath12k_htt_tlv_ppdu_stats_parse,
480*60bac4d6SBjoern A. Zeeb (void *)ppdu_info);
481*60bac4d6SBjoern A. Zeeb if (ret) {
482*60bac4d6SBjoern A. Zeeb spin_unlock_bh(&dp_pdev->ppdu_list_lock);
483*60bac4d6SBjoern A. Zeeb ath12k_warn(ab, "Failed to parse tlv %d\n", ret);
484*60bac4d6SBjoern A. Zeeb goto exit;
485*60bac4d6SBjoern A. Zeeb }
486*60bac4d6SBjoern A. Zeeb
487*60bac4d6SBjoern A. Zeeb if (ppdu_info->ppdu_stats.common.num_users >= HTT_PPDU_STATS_MAX_USERS) {
488*60bac4d6SBjoern A. Zeeb spin_unlock_bh(&dp_pdev->ppdu_list_lock);
489*60bac4d6SBjoern A. Zeeb ath12k_warn(ab,
490*60bac4d6SBjoern A. Zeeb "HTT PPDU STATS event has unexpected num_users %u, should be smaller than %u\n",
491*60bac4d6SBjoern A. Zeeb ppdu_info->ppdu_stats.common.num_users,
492*60bac4d6SBjoern A. Zeeb HTT_PPDU_STATS_MAX_USERS);
493*60bac4d6SBjoern A. Zeeb ret = -EINVAL;
494*60bac4d6SBjoern A. Zeeb goto exit;
495*60bac4d6SBjoern A. Zeeb }
496*60bac4d6SBjoern A. Zeeb
497*60bac4d6SBjoern A. Zeeb /* back up data rate tlv for all peers */
498*60bac4d6SBjoern A. Zeeb if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_DATA &&
499*60bac4d6SBjoern A. Zeeb (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON)) &&
500*60bac4d6SBjoern A. Zeeb ppdu_info->delay_ba) {
501*60bac4d6SBjoern A. Zeeb for (i = 0; i < ppdu_info->ppdu_stats.common.num_users; i++) {
502*60bac4d6SBjoern A. Zeeb peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id;
503*60bac4d6SBjoern A. Zeeb peer = ath12k_dp_link_peer_find_by_peerid(dp_pdev, peer_id);
504*60bac4d6SBjoern A. Zeeb if (!peer)
505*60bac4d6SBjoern A. Zeeb continue;
506*60bac4d6SBjoern A. Zeeb
507*60bac4d6SBjoern A. Zeeb usr_stats = &ppdu_info->ppdu_stats.user_stats[i];
508*60bac4d6SBjoern A. Zeeb if (usr_stats->delay_ba)
509*60bac4d6SBjoern A. Zeeb ath12k_copy_to_delay_stats(peer, usr_stats);
510*60bac4d6SBjoern A. Zeeb }
511*60bac4d6SBjoern A. Zeeb }
512*60bac4d6SBjoern A. Zeeb
513*60bac4d6SBjoern A. Zeeb /* restore all peers' data rate tlv to mu-bar tlv */
514*60bac4d6SBjoern A. Zeeb if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_BAR &&
515*60bac4d6SBjoern A. Zeeb (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON))) {
516*60bac4d6SBjoern A. Zeeb for (i = 0; i < ppdu_info->bar_num_users; i++) {
517*60bac4d6SBjoern A. Zeeb peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id;
518*60bac4d6SBjoern A. Zeeb peer = ath12k_dp_link_peer_find_by_peerid(dp_pdev, peer_id);
519*60bac4d6SBjoern A. Zeeb if (!peer)
520*60bac4d6SBjoern A. Zeeb continue;
521*60bac4d6SBjoern A. Zeeb
522*60bac4d6SBjoern A. Zeeb usr_stats = &ppdu_info->ppdu_stats.user_stats[i];
523*60bac4d6SBjoern A. Zeeb if (peer->delayba_flag)
524*60bac4d6SBjoern A. Zeeb ath12k_copy_to_bar(peer, usr_stats);
525*60bac4d6SBjoern A. Zeeb }
526*60bac4d6SBjoern A. Zeeb }
527*60bac4d6SBjoern A. Zeeb
528*60bac4d6SBjoern A. Zeeb spin_unlock_bh(&dp_pdev->ppdu_list_lock);
529*60bac4d6SBjoern A. Zeeb
530*60bac4d6SBjoern A. Zeeb exit:
531*60bac4d6SBjoern A. Zeeb rcu_read_unlock();
532*60bac4d6SBjoern A. Zeeb
533*60bac4d6SBjoern A. Zeeb return ret;
534*60bac4d6SBjoern A. Zeeb }
535*60bac4d6SBjoern A. Zeeb
ath12k_htt_mlo_offset_event_handler(struct ath12k_base * ab,struct sk_buff * skb)536*60bac4d6SBjoern A. Zeeb static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab,
537*60bac4d6SBjoern A. Zeeb struct sk_buff *skb)
538*60bac4d6SBjoern A. Zeeb {
539*60bac4d6SBjoern A. Zeeb struct ath12k_htt_mlo_offset_msg *msg;
540*60bac4d6SBjoern A. Zeeb struct ath12k_pdev *pdev;
541*60bac4d6SBjoern A. Zeeb struct ath12k *ar;
542*60bac4d6SBjoern A. Zeeb u8 pdev_id;
543*60bac4d6SBjoern A. Zeeb
544*60bac4d6SBjoern A. Zeeb msg = (struct ath12k_htt_mlo_offset_msg *)skb->data;
545*60bac4d6SBjoern A. Zeeb pdev_id = u32_get_bits(__le32_to_cpu(msg->info),
546*60bac4d6SBjoern A. Zeeb HTT_T2H_MLO_OFFSET_INFO_PDEV_ID);
547*60bac4d6SBjoern A. Zeeb
548*60bac4d6SBjoern A. Zeeb rcu_read_lock();
549*60bac4d6SBjoern A. Zeeb ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
550*60bac4d6SBjoern A. Zeeb if (!ar) {
551*60bac4d6SBjoern A. Zeeb /* It is possible that the ar is not yet active (started).
552*60bac4d6SBjoern A. Zeeb * The above function will only look for the active pdev
553*60bac4d6SBjoern A. Zeeb * and hence %NULL return is possible. Just silently
554*60bac4d6SBjoern A. Zeeb * discard this message
555*60bac4d6SBjoern A. Zeeb */
556*60bac4d6SBjoern A. Zeeb goto exit;
557*60bac4d6SBjoern A. Zeeb }
558*60bac4d6SBjoern A. Zeeb
559*60bac4d6SBjoern A. Zeeb spin_lock_bh(&ar->data_lock);
560*60bac4d6SBjoern A. Zeeb pdev = ar->pdev;
561*60bac4d6SBjoern A. Zeeb
562*60bac4d6SBjoern A. Zeeb pdev->timestamp.info = __le32_to_cpu(msg->info);
563*60bac4d6SBjoern A. Zeeb pdev->timestamp.sync_timestamp_lo_us = __le32_to_cpu(msg->sync_timestamp_lo_us);
564*60bac4d6SBjoern A. Zeeb pdev->timestamp.sync_timestamp_hi_us = __le32_to_cpu(msg->sync_timestamp_hi_us);
565*60bac4d6SBjoern A. Zeeb pdev->timestamp.mlo_offset_lo = __le32_to_cpu(msg->mlo_offset_lo);
566*60bac4d6SBjoern A. Zeeb pdev->timestamp.mlo_offset_hi = __le32_to_cpu(msg->mlo_offset_hi);
567*60bac4d6SBjoern A. Zeeb pdev->timestamp.mlo_offset_clks = __le32_to_cpu(msg->mlo_offset_clks);
568*60bac4d6SBjoern A. Zeeb pdev->timestamp.mlo_comp_clks = __le32_to_cpu(msg->mlo_comp_clks);
569*60bac4d6SBjoern A. Zeeb pdev->timestamp.mlo_comp_timer = __le32_to_cpu(msg->mlo_comp_timer);
570*60bac4d6SBjoern A. Zeeb
571*60bac4d6SBjoern A. Zeeb spin_unlock_bh(&ar->data_lock);
572*60bac4d6SBjoern A. Zeeb exit:
573*60bac4d6SBjoern A. Zeeb rcu_read_unlock();
574*60bac4d6SBjoern A. Zeeb }
575*60bac4d6SBjoern A. Zeeb
ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base * ab,struct sk_buff * skb)576*60bac4d6SBjoern A. Zeeb void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab,
577*60bac4d6SBjoern A. Zeeb struct sk_buff *skb)
578*60bac4d6SBjoern A. Zeeb {
579*60bac4d6SBjoern A. Zeeb struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
580*60bac4d6SBjoern A. Zeeb struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
581*60bac4d6SBjoern A. Zeeb enum htt_t2h_msg_type type;
582*60bac4d6SBjoern A. Zeeb u16 peer_id;
583*60bac4d6SBjoern A. Zeeb u8 vdev_id;
584*60bac4d6SBjoern A. Zeeb u8 mac_addr[ETH_ALEN];
585*60bac4d6SBjoern A. Zeeb u16 peer_mac_h16;
586*60bac4d6SBjoern A. Zeeb u16 ast_hash = 0;
587*60bac4d6SBjoern A. Zeeb u16 hw_peer_id;
588*60bac4d6SBjoern A. Zeeb
589*60bac4d6SBjoern A. Zeeb type = le32_get_bits(resp->version_msg.version, HTT_T2H_MSG_TYPE);
590*60bac4d6SBjoern A. Zeeb
591*60bac4d6SBjoern A. Zeeb ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
592*60bac4d6SBjoern A. Zeeb
593*60bac4d6SBjoern A. Zeeb switch (type) {
594*60bac4d6SBjoern A. Zeeb case HTT_T2H_MSG_TYPE_VERSION_CONF:
595*60bac4d6SBjoern A. Zeeb dp->htt_tgt_ver_major = le32_get_bits(resp->version_msg.version,
596*60bac4d6SBjoern A. Zeeb HTT_T2H_VERSION_CONF_MAJOR);
597*60bac4d6SBjoern A. Zeeb dp->htt_tgt_ver_minor = le32_get_bits(resp->version_msg.version,
598*60bac4d6SBjoern A. Zeeb HTT_T2H_VERSION_CONF_MINOR);
599*60bac4d6SBjoern A. Zeeb complete(&dp->htt_tgt_version_received);
600*60bac4d6SBjoern A. Zeeb break;
601*60bac4d6SBjoern A. Zeeb /* TODO: remove unused peer map versions after testing */
602*60bac4d6SBjoern A. Zeeb case HTT_T2H_MSG_TYPE_PEER_MAP:
603*60bac4d6SBjoern A. Zeeb vdev_id = le32_get_bits(resp->peer_map_ev.info,
604*60bac4d6SBjoern A. Zeeb HTT_T2H_PEER_MAP_INFO_VDEV_ID);
605*60bac4d6SBjoern A. Zeeb peer_id = le32_get_bits(resp->peer_map_ev.info,
606*60bac4d6SBjoern A. Zeeb HTT_T2H_PEER_MAP_INFO_PEER_ID);
607*60bac4d6SBjoern A. Zeeb peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
608*60bac4d6SBjoern A. Zeeb HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
609*60bac4d6SBjoern A. Zeeb ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
610*60bac4d6SBjoern A. Zeeb peer_mac_h16, mac_addr);
611*60bac4d6SBjoern A. Zeeb ath12k_dp_link_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0);
612*60bac4d6SBjoern A. Zeeb break;
613*60bac4d6SBjoern A. Zeeb case HTT_T2H_MSG_TYPE_PEER_MAP2:
614*60bac4d6SBjoern A. Zeeb vdev_id = le32_get_bits(resp->peer_map_ev.info,
615*60bac4d6SBjoern A. Zeeb HTT_T2H_PEER_MAP_INFO_VDEV_ID);
616*60bac4d6SBjoern A. Zeeb peer_id = le32_get_bits(resp->peer_map_ev.info,
617*60bac4d6SBjoern A. Zeeb HTT_T2H_PEER_MAP_INFO_PEER_ID);
618*60bac4d6SBjoern A. Zeeb peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
619*60bac4d6SBjoern A. Zeeb HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
620*60bac4d6SBjoern A. Zeeb ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
621*60bac4d6SBjoern A. Zeeb peer_mac_h16, mac_addr);
622*60bac4d6SBjoern A. Zeeb ast_hash = le32_get_bits(resp->peer_map_ev.info2,
623*60bac4d6SBjoern A. Zeeb HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL);
624*60bac4d6SBjoern A. Zeeb hw_peer_id = le32_get_bits(resp->peer_map_ev.info1,
625*60bac4d6SBjoern A. Zeeb HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID);
626*60bac4d6SBjoern A. Zeeb ath12k_dp_link_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
627*60bac4d6SBjoern A. Zeeb hw_peer_id);
628*60bac4d6SBjoern A. Zeeb break;
629*60bac4d6SBjoern A. Zeeb case HTT_T2H_MSG_TYPE_PEER_MAP3:
630*60bac4d6SBjoern A. Zeeb vdev_id = le32_get_bits(resp->peer_map_ev.info,
631*60bac4d6SBjoern A. Zeeb HTT_T2H_PEER_MAP_INFO_VDEV_ID);
632*60bac4d6SBjoern A. Zeeb peer_id = le32_get_bits(resp->peer_map_ev.info,
633*60bac4d6SBjoern A. Zeeb HTT_T2H_PEER_MAP_INFO_PEER_ID);
634*60bac4d6SBjoern A. Zeeb peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
635*60bac4d6SBjoern A. Zeeb HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
636*60bac4d6SBjoern A. Zeeb ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
637*60bac4d6SBjoern A. Zeeb peer_mac_h16, mac_addr);
638*60bac4d6SBjoern A. Zeeb ast_hash = le32_get_bits(resp->peer_map_ev.info2,
639*60bac4d6SBjoern A. Zeeb HTT_T2H_PEER_MAP3_INFO2_AST_HASH_VAL);
640*60bac4d6SBjoern A. Zeeb hw_peer_id = le32_get_bits(resp->peer_map_ev.info2,
641*60bac4d6SBjoern A. Zeeb HTT_T2H_PEER_MAP3_INFO2_HW_PEER_ID);
642*60bac4d6SBjoern A. Zeeb ath12k_dp_link_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
643*60bac4d6SBjoern A. Zeeb hw_peer_id);
644*60bac4d6SBjoern A. Zeeb break;
645*60bac4d6SBjoern A. Zeeb case HTT_T2H_MSG_TYPE_PEER_UNMAP:
646*60bac4d6SBjoern A. Zeeb case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
647*60bac4d6SBjoern A. Zeeb peer_id = le32_get_bits(resp->peer_unmap_ev.info,
648*60bac4d6SBjoern A. Zeeb HTT_T2H_PEER_UNMAP_INFO_PEER_ID);
649*60bac4d6SBjoern A. Zeeb ath12k_dp_link_peer_unmap_event(ab, peer_id);
650*60bac4d6SBjoern A. Zeeb break;
651*60bac4d6SBjoern A. Zeeb case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
652*60bac4d6SBjoern A. Zeeb ath12k_htt_pull_ppdu_stats(ab, skb);
653*60bac4d6SBjoern A. Zeeb break;
654*60bac4d6SBjoern A. Zeeb case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
655*60bac4d6SBjoern A. Zeeb ath12k_debugfs_htt_ext_stats_handler(ab, skb);
656*60bac4d6SBjoern A. Zeeb break;
657*60bac4d6SBjoern A. Zeeb case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND:
658*60bac4d6SBjoern A. Zeeb ath12k_htt_mlo_offset_event_handler(ab, skb);
659*60bac4d6SBjoern A. Zeeb break;
660*60bac4d6SBjoern A. Zeeb default:
661*60bac4d6SBjoern A. Zeeb ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt event %d not handled\n",
662*60bac4d6SBjoern A. Zeeb type);
663*60bac4d6SBjoern A. Zeeb break;
664*60bac4d6SBjoern A. Zeeb }
665*60bac4d6SBjoern A. Zeeb
666*60bac4d6SBjoern A. Zeeb dev_kfree_skb_any(skb);
667*60bac4d6SBjoern A. Zeeb }
668*60bac4d6SBjoern A. Zeeb EXPORT_SYMBOL(ath12k_dp_htt_htc_t2h_msg_handler);
669*60bac4d6SBjoern A. Zeeb
670*60bac4d6SBjoern A. Zeeb static int
ath12k_dp_tx_get_ring_id_type(struct ath12k_base * ab,int mac_id,u32 ring_id,enum hal_ring_type ring_type,enum htt_srng_ring_type * htt_ring_type,enum htt_srng_ring_id * htt_ring_id)671*60bac4d6SBjoern A. Zeeb ath12k_dp_tx_get_ring_id_type(struct ath12k_base *ab,
672*60bac4d6SBjoern A. Zeeb int mac_id, u32 ring_id,
673*60bac4d6SBjoern A. Zeeb enum hal_ring_type ring_type,
674*60bac4d6SBjoern A. Zeeb enum htt_srng_ring_type *htt_ring_type,
675*60bac4d6SBjoern A. Zeeb enum htt_srng_ring_id *htt_ring_id)
676*60bac4d6SBjoern A. Zeeb {
677*60bac4d6SBjoern A. Zeeb int ret = 0;
678*60bac4d6SBjoern A. Zeeb
679*60bac4d6SBjoern A. Zeeb switch (ring_type) {
680*60bac4d6SBjoern A. Zeeb case HAL_RXDMA_BUF:
681*60bac4d6SBjoern A. Zeeb /* for some targets, host fills rx buffer to fw and fw fills to
682*60bac4d6SBjoern A. Zeeb * rxbuf ring for each rxdma
683*60bac4d6SBjoern A. Zeeb */
684*60bac4d6SBjoern A. Zeeb if (!ab->hw_params->rx_mac_buf_ring) {
685*60bac4d6SBjoern A. Zeeb if (!(ring_id == HAL_SRNG_SW2RXDMA_BUF0 ||
686*60bac4d6SBjoern A. Zeeb ring_id == HAL_SRNG_SW2RXDMA_BUF1)) {
687*60bac4d6SBjoern A. Zeeb ret = -EINVAL;
688*60bac4d6SBjoern A. Zeeb }
689*60bac4d6SBjoern A. Zeeb *htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
690*60bac4d6SBjoern A. Zeeb *htt_ring_type = HTT_SW_TO_HW_RING;
691*60bac4d6SBjoern A. Zeeb } else {
692*60bac4d6SBjoern A. Zeeb if (ring_id == HAL_SRNG_SW2RXDMA_BUF0) {
693*60bac4d6SBjoern A. Zeeb *htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
694*60bac4d6SBjoern A. Zeeb *htt_ring_type = HTT_SW_TO_SW_RING;
695*60bac4d6SBjoern A. Zeeb } else {
696*60bac4d6SBjoern A. Zeeb *htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
697*60bac4d6SBjoern A. Zeeb *htt_ring_type = HTT_SW_TO_HW_RING;
698*60bac4d6SBjoern A. Zeeb }
699*60bac4d6SBjoern A. Zeeb }
700*60bac4d6SBjoern A. Zeeb break;
701*60bac4d6SBjoern A. Zeeb case HAL_RXDMA_DST:
702*60bac4d6SBjoern A. Zeeb *htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
703*60bac4d6SBjoern A. Zeeb *htt_ring_type = HTT_HW_TO_SW_RING;
704*60bac4d6SBjoern A. Zeeb break;
705*60bac4d6SBjoern A. Zeeb case HAL_RXDMA_MONITOR_BUF:
706*60bac4d6SBjoern A. Zeeb *htt_ring_id = HTT_RX_MON_HOST2MON_BUF_RING;
707*60bac4d6SBjoern A. Zeeb *htt_ring_type = HTT_SW_TO_HW_RING;
708*60bac4d6SBjoern A. Zeeb break;
709*60bac4d6SBjoern A. Zeeb case HAL_RXDMA_MONITOR_STATUS:
710*60bac4d6SBjoern A. Zeeb *htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
711*60bac4d6SBjoern A. Zeeb *htt_ring_type = HTT_SW_TO_HW_RING;
712*60bac4d6SBjoern A. Zeeb break;
713*60bac4d6SBjoern A. Zeeb case HAL_RXDMA_MONITOR_DST:
714*60bac4d6SBjoern A. Zeeb *htt_ring_id = HTT_RX_MON_MON2HOST_DEST_RING;
715*60bac4d6SBjoern A. Zeeb *htt_ring_type = HTT_HW_TO_SW_RING;
716*60bac4d6SBjoern A. Zeeb break;
717*60bac4d6SBjoern A. Zeeb case HAL_RXDMA_MONITOR_DESC:
718*60bac4d6SBjoern A. Zeeb *htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
719*60bac4d6SBjoern A. Zeeb *htt_ring_type = HTT_SW_TO_HW_RING;
720*60bac4d6SBjoern A. Zeeb break;
721*60bac4d6SBjoern A. Zeeb default:
722*60bac4d6SBjoern A. Zeeb ath12k_warn(ab, "Unsupported ring type in DP :%d\n", ring_type);
723*60bac4d6SBjoern A. Zeeb ret = -EINVAL;
724*60bac4d6SBjoern A. Zeeb }
725*60bac4d6SBjoern A. Zeeb return ret;
726*60bac4d6SBjoern A. Zeeb }
727*60bac4d6SBjoern A. Zeeb
ath12k_dp_tx_htt_srng_setup(struct ath12k_base * ab,u32 ring_id,int mac_id,enum hal_ring_type ring_type)728*60bac4d6SBjoern A. Zeeb int ath12k_dp_tx_htt_srng_setup(struct ath12k_base *ab, u32 ring_id,
729*60bac4d6SBjoern A. Zeeb int mac_id, enum hal_ring_type ring_type)
730*60bac4d6SBjoern A. Zeeb {
731*60bac4d6SBjoern A. Zeeb struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
732*60bac4d6SBjoern A. Zeeb struct htt_srng_setup_cmd *cmd;
733*60bac4d6SBjoern A. Zeeb struct hal_srng *srng = &ab->hal.srng_list[ring_id];
734*60bac4d6SBjoern A. Zeeb struct hal_srng_params params;
735*60bac4d6SBjoern A. Zeeb struct sk_buff *skb;
736*60bac4d6SBjoern A. Zeeb u32 ring_entry_sz;
737*60bac4d6SBjoern A. Zeeb int len = sizeof(*cmd);
738*60bac4d6SBjoern A. Zeeb dma_addr_t hp_addr, tp_addr;
739*60bac4d6SBjoern A. Zeeb enum htt_srng_ring_type htt_ring_type;
740*60bac4d6SBjoern A. Zeeb enum htt_srng_ring_id htt_ring_id;
741*60bac4d6SBjoern A. Zeeb int ret;
742*60bac4d6SBjoern A. Zeeb
743*60bac4d6SBjoern A. Zeeb skb = ath12k_htc_alloc_skb(ab, len);
744*60bac4d6SBjoern A. Zeeb if (!skb)
745*60bac4d6SBjoern A. Zeeb return -ENOMEM;
746*60bac4d6SBjoern A. Zeeb
747*60bac4d6SBjoern A. Zeeb memset(¶ms, 0, sizeof(params));
748*60bac4d6SBjoern A. Zeeb ath12k_hal_srng_get_params(ab, srng, ¶ms);
749*60bac4d6SBjoern A. Zeeb
750*60bac4d6SBjoern A. Zeeb hp_addr = ath12k_hal_srng_get_hp_addr(ab, srng);
751*60bac4d6SBjoern A. Zeeb tp_addr = ath12k_hal_srng_get_tp_addr(ab, srng);
752*60bac4d6SBjoern A. Zeeb
753*60bac4d6SBjoern A. Zeeb ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
754*60bac4d6SBjoern A. Zeeb ring_type, &htt_ring_type,
755*60bac4d6SBjoern A. Zeeb &htt_ring_id);
756*60bac4d6SBjoern A. Zeeb if (ret)
757*60bac4d6SBjoern A. Zeeb goto err_free;
758*60bac4d6SBjoern A. Zeeb
759*60bac4d6SBjoern A. Zeeb skb_put(skb, len);
760*60bac4d6SBjoern A. Zeeb cmd = (struct htt_srng_setup_cmd *)skb->data;
761*60bac4d6SBjoern A. Zeeb cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_SRING_SETUP,
762*60bac4d6SBjoern A. Zeeb HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE);
763*60bac4d6SBjoern A. Zeeb if (htt_ring_type == HTT_SW_TO_HW_RING ||
764*60bac4d6SBjoern A. Zeeb htt_ring_type == HTT_HW_TO_SW_RING)
765*60bac4d6SBjoern A. Zeeb cmd->info0 |= le32_encode_bits(DP_SW2HW_MACID(mac_id),
766*60bac4d6SBjoern A. Zeeb HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID);
767*60bac4d6SBjoern A. Zeeb else
768*60bac4d6SBjoern A. Zeeb cmd->info0 |= le32_encode_bits(mac_id,
769*60bac4d6SBjoern A. Zeeb HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID);
770*60bac4d6SBjoern A. Zeeb cmd->info0 |= le32_encode_bits(htt_ring_type,
771*60bac4d6SBjoern A. Zeeb HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE);
772*60bac4d6SBjoern A. Zeeb cmd->info0 |= le32_encode_bits(htt_ring_id,
773*60bac4d6SBjoern A. Zeeb HTT_SRNG_SETUP_CMD_INFO0_RING_ID);
774*60bac4d6SBjoern A. Zeeb
775*60bac4d6SBjoern A. Zeeb cmd->ring_base_addr_lo = cpu_to_le32(params.ring_base_paddr &
776*60bac4d6SBjoern A. Zeeb HAL_ADDR_LSB_REG_MASK);
777*60bac4d6SBjoern A. Zeeb
778*60bac4d6SBjoern A. Zeeb cmd->ring_base_addr_hi = cpu_to_le32((u64)params.ring_base_paddr >>
779*60bac4d6SBjoern A. Zeeb HAL_ADDR_MSB_REG_SHIFT);
780*60bac4d6SBjoern A. Zeeb
781*60bac4d6SBjoern A. Zeeb ret = ath12k_hal_srng_get_entrysize(ab, ring_type);
782*60bac4d6SBjoern A. Zeeb if (ret < 0)
783*60bac4d6SBjoern A. Zeeb goto err_free;
784*60bac4d6SBjoern A. Zeeb
785*60bac4d6SBjoern A. Zeeb ring_entry_sz = ret;
786*60bac4d6SBjoern A. Zeeb
787*60bac4d6SBjoern A. Zeeb ring_entry_sz >>= 2;
788*60bac4d6SBjoern A. Zeeb cmd->info1 = le32_encode_bits(ring_entry_sz,
789*60bac4d6SBjoern A. Zeeb HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE);
790*60bac4d6SBjoern A. Zeeb cmd->info1 |= le32_encode_bits(params.num_entries * ring_entry_sz,
791*60bac4d6SBjoern A. Zeeb HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE);
792*60bac4d6SBjoern A. Zeeb cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP),
793*60bac4d6SBjoern A. Zeeb HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP);
794*60bac4d6SBjoern A. Zeeb cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
795*60bac4d6SBjoern A. Zeeb HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP);
796*60bac4d6SBjoern A. Zeeb cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_RING_PTR_SWAP),
797*60bac4d6SBjoern A. Zeeb HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP);
798*60bac4d6SBjoern A. Zeeb if (htt_ring_type == HTT_SW_TO_HW_RING)
799*60bac4d6SBjoern A. Zeeb cmd->info1 |= cpu_to_le32(HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS);
800*60bac4d6SBjoern A. Zeeb
801*60bac4d6SBjoern A. Zeeb cmd->ring_head_off32_remote_addr_lo = cpu_to_le32(lower_32_bits(hp_addr));
802*60bac4d6SBjoern A. Zeeb cmd->ring_head_off32_remote_addr_hi = cpu_to_le32(upper_32_bits(hp_addr));
803*60bac4d6SBjoern A. Zeeb
804*60bac4d6SBjoern A. Zeeb cmd->ring_tail_off32_remote_addr_lo = cpu_to_le32(lower_32_bits(tp_addr));
805*60bac4d6SBjoern A. Zeeb cmd->ring_tail_off32_remote_addr_hi = cpu_to_le32(upper_32_bits(tp_addr));
806*60bac4d6SBjoern A. Zeeb
807*60bac4d6SBjoern A. Zeeb cmd->ring_msi_addr_lo = cpu_to_le32(lower_32_bits(params.msi_addr));
808*60bac4d6SBjoern A. Zeeb cmd->ring_msi_addr_hi = cpu_to_le32(upper_32_bits(params.msi_addr));
809*60bac4d6SBjoern A. Zeeb cmd->msi_data = cpu_to_le32(params.msi_data);
810*60bac4d6SBjoern A. Zeeb
811*60bac4d6SBjoern A. Zeeb cmd->intr_info =
812*60bac4d6SBjoern A. Zeeb le32_encode_bits(params.intr_batch_cntr_thres_entries * ring_entry_sz,
813*60bac4d6SBjoern A. Zeeb HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH);
814*60bac4d6SBjoern A. Zeeb cmd->intr_info |=
815*60bac4d6SBjoern A. Zeeb le32_encode_bits(params.intr_timer_thres_us >> 3,
816*60bac4d6SBjoern A. Zeeb HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH);
817*60bac4d6SBjoern A. Zeeb
818*60bac4d6SBjoern A. Zeeb cmd->info2 = 0;
819*60bac4d6SBjoern A. Zeeb if (params.flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
820*60bac4d6SBjoern A. Zeeb cmd->info2 = le32_encode_bits(params.low_threshold,
821*60bac4d6SBjoern A. Zeeb HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH);
822*60bac4d6SBjoern A. Zeeb }
823*60bac4d6SBjoern A. Zeeb
824*60bac4d6SBjoern A. Zeeb ath12k_dbg(ab, ATH12K_DBG_HAL,
825*60bac4d6SBjoern A. Zeeb "%s msi_addr_lo:0x%x, msi_addr_hi:0x%x, msi_data:0x%x\n",
826*60bac4d6SBjoern A. Zeeb __func__, cmd->ring_msi_addr_lo, cmd->ring_msi_addr_hi,
827*60bac4d6SBjoern A. Zeeb cmd->msi_data);
828*60bac4d6SBjoern A. Zeeb
829*60bac4d6SBjoern A. Zeeb ath12k_dbg(ab, ATH12K_DBG_HAL,
830*60bac4d6SBjoern A. Zeeb "ring_id:%d, ring_type:%d, intr_info:0x%x, flags:0x%x\n",
831*60bac4d6SBjoern A. Zeeb ring_id, ring_type, cmd->intr_info, cmd->info2);
832*60bac4d6SBjoern A. Zeeb
833*60bac4d6SBjoern A. Zeeb ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
834*60bac4d6SBjoern A. Zeeb if (ret)
835*60bac4d6SBjoern A. Zeeb goto err_free;
836*60bac4d6SBjoern A. Zeeb
837*60bac4d6SBjoern A. Zeeb return 0;
838*60bac4d6SBjoern A. Zeeb
839*60bac4d6SBjoern A. Zeeb err_free:
840*60bac4d6SBjoern A. Zeeb dev_kfree_skb_any(skb);
841*60bac4d6SBjoern A. Zeeb
842*60bac4d6SBjoern A. Zeeb return ret;
843*60bac4d6SBjoern A. Zeeb }
844*60bac4d6SBjoern A. Zeeb
ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base * ab)845*60bac4d6SBjoern A. Zeeb int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab)
846*60bac4d6SBjoern A. Zeeb {
847*60bac4d6SBjoern A. Zeeb struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
848*60bac4d6SBjoern A. Zeeb struct sk_buff *skb;
849*60bac4d6SBjoern A. Zeeb struct htt_ver_req_cmd *cmd;
850*60bac4d6SBjoern A. Zeeb int len = sizeof(*cmd);
851*60bac4d6SBjoern A. Zeeb u32 metadata_version;
852*60bac4d6SBjoern A. Zeeb int ret;
853*60bac4d6SBjoern A. Zeeb
854*60bac4d6SBjoern A. Zeeb init_completion(&dp->htt_tgt_version_received);
855*60bac4d6SBjoern A. Zeeb
856*60bac4d6SBjoern A. Zeeb skb = ath12k_htc_alloc_skb(ab, len);
857*60bac4d6SBjoern A. Zeeb if (!skb)
858*60bac4d6SBjoern A. Zeeb return -ENOMEM;
859*60bac4d6SBjoern A. Zeeb
860*60bac4d6SBjoern A. Zeeb skb_put(skb, len);
861*60bac4d6SBjoern A. Zeeb cmd = (struct htt_ver_req_cmd *)skb->data;
862*60bac4d6SBjoern A. Zeeb cmd->ver_reg_info = le32_encode_bits(HTT_H2T_MSG_TYPE_VERSION_REQ,
863*60bac4d6SBjoern A. Zeeb HTT_OPTION_TAG);
864*60bac4d6SBjoern A. Zeeb metadata_version = ath12k_ftm_mode ? HTT_OPTION_TCL_METADATA_VER_V1 :
865*60bac4d6SBjoern A. Zeeb HTT_OPTION_TCL_METADATA_VER_V2;
866*60bac4d6SBjoern A. Zeeb
867*60bac4d6SBjoern A. Zeeb cmd->tcl_metadata_version = le32_encode_bits(HTT_TAG_TCL_METADATA_VERSION,
868*60bac4d6SBjoern A. Zeeb HTT_OPTION_TAG) |
869*60bac4d6SBjoern A. Zeeb le32_encode_bits(HTT_TCL_METADATA_VER_SZ,
870*60bac4d6SBjoern A. Zeeb HTT_OPTION_LEN) |
871*60bac4d6SBjoern A. Zeeb le32_encode_bits(metadata_version,
872*60bac4d6SBjoern A. Zeeb HTT_OPTION_VALUE);
873*60bac4d6SBjoern A. Zeeb
874*60bac4d6SBjoern A. Zeeb ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
875*60bac4d6SBjoern A. Zeeb if (ret) {
876*60bac4d6SBjoern A. Zeeb dev_kfree_skb_any(skb);
877*60bac4d6SBjoern A. Zeeb return ret;
878*60bac4d6SBjoern A. Zeeb }
879*60bac4d6SBjoern A. Zeeb
880*60bac4d6SBjoern A. Zeeb ret = wait_for_completion_timeout(&dp->htt_tgt_version_received,
881*60bac4d6SBjoern A. Zeeb HTT_TARGET_VERSION_TIMEOUT_HZ);
882*60bac4d6SBjoern A. Zeeb if (ret == 0) {
883*60bac4d6SBjoern A. Zeeb ath12k_warn(ab, "htt target version request timed out\n");
884*60bac4d6SBjoern A. Zeeb return -ETIMEDOUT;
885*60bac4d6SBjoern A. Zeeb }
886*60bac4d6SBjoern A. Zeeb
887*60bac4d6SBjoern A. Zeeb if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
888*60bac4d6SBjoern A. Zeeb ath12k_err(ab, "unsupported htt major version %d supported version is %d\n",
889*60bac4d6SBjoern A. Zeeb dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
890*60bac4d6SBjoern A. Zeeb return -EOPNOTSUPP;
891*60bac4d6SBjoern A. Zeeb }
892*60bac4d6SBjoern A. Zeeb
893*60bac4d6SBjoern A. Zeeb return 0;
894*60bac4d6SBjoern A. Zeeb }
895*60bac4d6SBjoern A. Zeeb
ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k * ar,u32 mask)896*60bac4d6SBjoern A. Zeeb int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask)
897*60bac4d6SBjoern A. Zeeb {
898*60bac4d6SBjoern A. Zeeb struct ath12k_base *ab = ar->ab;
899*60bac4d6SBjoern A. Zeeb struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
900*60bac4d6SBjoern A. Zeeb struct sk_buff *skb;
901*60bac4d6SBjoern A. Zeeb struct htt_ppdu_stats_cfg_cmd *cmd;
902*60bac4d6SBjoern A. Zeeb int len = sizeof(*cmd);
903*60bac4d6SBjoern A. Zeeb u8 pdev_mask;
904*60bac4d6SBjoern A. Zeeb int ret;
905*60bac4d6SBjoern A. Zeeb int i;
906*60bac4d6SBjoern A. Zeeb
907*60bac4d6SBjoern A. Zeeb for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
908*60bac4d6SBjoern A. Zeeb skb = ath12k_htc_alloc_skb(ab, len);
909*60bac4d6SBjoern A. Zeeb if (!skb)
910*60bac4d6SBjoern A. Zeeb return -ENOMEM;
911*60bac4d6SBjoern A. Zeeb
912*60bac4d6SBjoern A. Zeeb skb_put(skb, len);
913*60bac4d6SBjoern A. Zeeb cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data;
914*60bac4d6SBjoern A. Zeeb cmd->msg = le32_encode_bits(HTT_H2T_MSG_TYPE_PPDU_STATS_CFG,
915*60bac4d6SBjoern A. Zeeb HTT_PPDU_STATS_CFG_MSG_TYPE);
916*60bac4d6SBjoern A. Zeeb
917*60bac4d6SBjoern A. Zeeb pdev_mask = 1 << (i + ar->pdev_idx);
918*60bac4d6SBjoern A. Zeeb cmd->msg |= le32_encode_bits(pdev_mask, HTT_PPDU_STATS_CFG_PDEV_ID);
919*60bac4d6SBjoern A. Zeeb cmd->msg |= le32_encode_bits(mask, HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK);
920*60bac4d6SBjoern A. Zeeb
921*60bac4d6SBjoern A. Zeeb ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
922*60bac4d6SBjoern A. Zeeb if (ret) {
923*60bac4d6SBjoern A. Zeeb dev_kfree_skb_any(skb);
924*60bac4d6SBjoern A. Zeeb return ret;
925*60bac4d6SBjoern A. Zeeb }
926*60bac4d6SBjoern A. Zeeb }
927*60bac4d6SBjoern A. Zeeb
928*60bac4d6SBjoern A. Zeeb return 0;
929*60bac4d6SBjoern A. Zeeb }
930*60bac4d6SBjoern A. Zeeb
ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base * ab,u32 ring_id,int mac_id,enum hal_ring_type ring_type,int rx_buf_size,struct htt_rx_ring_tlv_filter * tlv_filter)931*60bac4d6SBjoern A. Zeeb int ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base *ab, u32 ring_id,
932*60bac4d6SBjoern A. Zeeb int mac_id, enum hal_ring_type ring_type,
933*60bac4d6SBjoern A. Zeeb int rx_buf_size,
934*60bac4d6SBjoern A. Zeeb struct htt_rx_ring_tlv_filter *tlv_filter)
935*60bac4d6SBjoern A. Zeeb {
936*60bac4d6SBjoern A. Zeeb struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
937*60bac4d6SBjoern A. Zeeb struct htt_rx_ring_selection_cfg_cmd *cmd;
938*60bac4d6SBjoern A. Zeeb struct hal_srng *srng = &ab->hal.srng_list[ring_id];
939*60bac4d6SBjoern A. Zeeb struct hal_srng_params params;
940*60bac4d6SBjoern A. Zeeb struct sk_buff *skb;
941*60bac4d6SBjoern A. Zeeb int len = sizeof(*cmd);
942*60bac4d6SBjoern A. Zeeb enum htt_srng_ring_type htt_ring_type;
943*60bac4d6SBjoern A. Zeeb enum htt_srng_ring_id htt_ring_id;
944*60bac4d6SBjoern A. Zeeb int ret;
945*60bac4d6SBjoern A. Zeeb
946*60bac4d6SBjoern A. Zeeb skb = ath12k_htc_alloc_skb(ab, len);
947*60bac4d6SBjoern A. Zeeb if (!skb)
948*60bac4d6SBjoern A. Zeeb return -ENOMEM;
949*60bac4d6SBjoern A. Zeeb
950*60bac4d6SBjoern A. Zeeb memset(¶ms, 0, sizeof(params));
951*60bac4d6SBjoern A. Zeeb ath12k_hal_srng_get_params(ab, srng, ¶ms);
952*60bac4d6SBjoern A. Zeeb
953*60bac4d6SBjoern A. Zeeb ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
954*60bac4d6SBjoern A. Zeeb ring_type, &htt_ring_type,
955*60bac4d6SBjoern A. Zeeb &htt_ring_id);
956*60bac4d6SBjoern A. Zeeb if (ret)
957*60bac4d6SBjoern A. Zeeb goto err_free;
958*60bac4d6SBjoern A. Zeeb
959*60bac4d6SBjoern A. Zeeb skb_put(skb, len);
960*60bac4d6SBjoern A. Zeeb cmd = (struct htt_rx_ring_selection_cfg_cmd *)skb->data;
961*60bac4d6SBjoern A. Zeeb cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG,
962*60bac4d6SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE);
963*60bac4d6SBjoern A. Zeeb if (htt_ring_type == HTT_SW_TO_HW_RING ||
964*60bac4d6SBjoern A. Zeeb htt_ring_type == HTT_HW_TO_SW_RING)
965*60bac4d6SBjoern A. Zeeb cmd->info0 |=
966*60bac4d6SBjoern A. Zeeb le32_encode_bits(DP_SW2HW_MACID(mac_id),
967*60bac4d6SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
968*60bac4d6SBjoern A. Zeeb else
969*60bac4d6SBjoern A. Zeeb cmd->info0 |=
970*60bac4d6SBjoern A. Zeeb le32_encode_bits(mac_id,
971*60bac4d6SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
972*60bac4d6SBjoern A. Zeeb cmd->info0 |= le32_encode_bits(htt_ring_id,
973*60bac4d6SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID);
974*60bac4d6SBjoern A. Zeeb cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP),
975*60bac4d6SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS);
976*60bac4d6SBjoern A. Zeeb cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
977*60bac4d6SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS);
978*60bac4d6SBjoern A. Zeeb cmd->info0 |= le32_encode_bits(tlv_filter->offset_valid,
979*60bac4d6SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_CMD_INFO0_OFFSET_VALID);
980*60bac4d6SBjoern A. Zeeb cmd->info0 |=
981*60bac4d6SBjoern A. Zeeb le32_encode_bits(tlv_filter->drop_threshold_valid,
982*60bac4d6SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_CMD_INFO0_DROP_THRES_VAL);
983*60bac4d6SBjoern A. Zeeb cmd->info0 |= le32_encode_bits(!tlv_filter->rxmon_disable,
984*60bac4d6SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_CMD_INFO0_EN_RXMON);
985*60bac4d6SBjoern A. Zeeb
986*60bac4d6SBjoern A. Zeeb cmd->info1 = le32_encode_bits(rx_buf_size,
987*60bac4d6SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE);
988*60bac4d6SBjoern A. Zeeb cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_mgmt,
989*60bac4d6SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT);
990*60bac4d6SBjoern A. Zeeb cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_ctrl,
991*60bac4d6SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL);
992*60bac4d6SBjoern A. Zeeb cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_data,
993*60bac4d6SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA);
994*60bac4d6SBjoern A. Zeeb cmd->pkt_type_en_flags0 = cpu_to_le32(tlv_filter->pkt_filter_flags0);
995*60bac4d6SBjoern A. Zeeb cmd->pkt_type_en_flags1 = cpu_to_le32(tlv_filter->pkt_filter_flags1);
996*60bac4d6SBjoern A. Zeeb cmd->pkt_type_en_flags2 = cpu_to_le32(tlv_filter->pkt_filter_flags2);
997*60bac4d6SBjoern A. Zeeb cmd->pkt_type_en_flags3 = cpu_to_le32(tlv_filter->pkt_filter_flags3);
998*60bac4d6SBjoern A. Zeeb cmd->rx_filter_tlv = cpu_to_le32(tlv_filter->rx_filter);
999*60bac4d6SBjoern A. Zeeb
1000*60bac4d6SBjoern A. Zeeb cmd->info2 = le32_encode_bits(tlv_filter->rx_drop_threshold,
1001*60bac4d6SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_CMD_INFO2_DROP_THRESHOLD);
1002*60bac4d6SBjoern A. Zeeb cmd->info2 |=
1003*60bac4d6SBjoern A. Zeeb le32_encode_bits(tlv_filter->enable_log_mgmt_type,
1004*60bac4d6SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_MGMT_TYPE);
1005*60bac4d6SBjoern A. Zeeb cmd->info2 |=
1006*60bac4d6SBjoern A. Zeeb le32_encode_bits(tlv_filter->enable_log_ctrl_type,
1007*60bac4d6SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_CTRL_TYPE);
1008*60bac4d6SBjoern A. Zeeb cmd->info2 |=
1009*60bac4d6SBjoern A. Zeeb le32_encode_bits(tlv_filter->enable_log_data_type,
1010*60bac4d6SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_DATA_TYPE);
1011*60bac4d6SBjoern A. Zeeb
1012*60bac4d6SBjoern A. Zeeb cmd->info3 =
1013*60bac4d6SBjoern A. Zeeb le32_encode_bits(tlv_filter->enable_rx_tlv_offset,
1014*60bac4d6SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_CMD_INFO3_EN_TLV_PKT_OFFSET);
1015*60bac4d6SBjoern A. Zeeb cmd->info3 |=
1016*60bac4d6SBjoern A. Zeeb le32_encode_bits(tlv_filter->rx_tlv_offset,
1017*60bac4d6SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_CMD_INFO3_PKT_TLV_OFFSET);
1018*60bac4d6SBjoern A. Zeeb
1019*60bac4d6SBjoern A. Zeeb if (tlv_filter->offset_valid) {
1020*60bac4d6SBjoern A. Zeeb cmd->rx_packet_offset =
1021*60bac4d6SBjoern A. Zeeb le32_encode_bits(tlv_filter->rx_packet_offset,
1022*60bac4d6SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET);
1023*60bac4d6SBjoern A. Zeeb
1024*60bac4d6SBjoern A. Zeeb cmd->rx_packet_offset |=
1025*60bac4d6SBjoern A. Zeeb le32_encode_bits(tlv_filter->rx_header_offset,
1026*60bac4d6SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET);
1027*60bac4d6SBjoern A. Zeeb
1028*60bac4d6SBjoern A. Zeeb cmd->rx_mpdu_offset =
1029*60bac4d6SBjoern A. Zeeb le32_encode_bits(tlv_filter->rx_mpdu_end_offset,
1030*60bac4d6SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET);
1031*60bac4d6SBjoern A. Zeeb
1032*60bac4d6SBjoern A. Zeeb cmd->rx_mpdu_offset |=
1033*60bac4d6SBjoern A. Zeeb le32_encode_bits(tlv_filter->rx_mpdu_start_offset,
1034*60bac4d6SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET);
1035*60bac4d6SBjoern A. Zeeb
1036*60bac4d6SBjoern A. Zeeb cmd->rx_msdu_offset =
1037*60bac4d6SBjoern A. Zeeb le32_encode_bits(tlv_filter->rx_msdu_end_offset,
1038*60bac4d6SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET);
1039*60bac4d6SBjoern A. Zeeb
1040*60bac4d6SBjoern A. Zeeb cmd->rx_msdu_offset |=
1041*60bac4d6SBjoern A. Zeeb le32_encode_bits(tlv_filter->rx_msdu_start_offset,
1042*60bac4d6SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET);
1043*60bac4d6SBjoern A. Zeeb
1044*60bac4d6SBjoern A. Zeeb cmd->rx_attn_offset =
1045*60bac4d6SBjoern A. Zeeb le32_encode_bits(tlv_filter->rx_attn_offset,
1046*60bac4d6SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET);
1047*60bac4d6SBjoern A. Zeeb }
1048*60bac4d6SBjoern A. Zeeb
1049*60bac4d6SBjoern A. Zeeb if (tlv_filter->rx_mpdu_start_wmask > 0 &&
1050*60bac4d6SBjoern A. Zeeb tlv_filter->rx_msdu_end_wmask > 0) {
1051*60bac4d6SBjoern A. Zeeb cmd->info2 |=
1052*60bac4d6SBjoern A. Zeeb le32_encode_bits(true,
1053*60bac4d6SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_WORD_MASK_COMPACT_SET);
1054*60bac4d6SBjoern A. Zeeb cmd->rx_mpdu_start_end_mask =
1055*60bac4d6SBjoern A. Zeeb le32_encode_bits(tlv_filter->rx_mpdu_start_wmask,
1056*60bac4d6SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_MASK);
1057*60bac4d6SBjoern A. Zeeb /* mpdu_end is not used for any hardwares so far
1058*60bac4d6SBjoern A. Zeeb * please assign it in future if any chip is
1059*60bac4d6SBjoern A. Zeeb * using through hal ops
1060*60bac4d6SBjoern A. Zeeb */
1061*60bac4d6SBjoern A. Zeeb cmd->rx_mpdu_start_end_mask |=
1062*60bac4d6SBjoern A. Zeeb le32_encode_bits(tlv_filter->rx_mpdu_end_wmask,
1063*60bac4d6SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_MASK);
1064*60bac4d6SBjoern A. Zeeb cmd->rx_msdu_end_word_mask =
1065*60bac4d6SBjoern A. Zeeb le32_encode_bits(tlv_filter->rx_msdu_end_wmask,
1066*60bac4d6SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_MASK);
1067*60bac4d6SBjoern A. Zeeb }
1068*60bac4d6SBjoern A. Zeeb
1069*60bac4d6SBjoern A. Zeeb ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
1070*60bac4d6SBjoern A. Zeeb if (ret)
1071*60bac4d6SBjoern A. Zeeb goto err_free;
1072*60bac4d6SBjoern A. Zeeb
1073*60bac4d6SBjoern A. Zeeb return 0;
1074*60bac4d6SBjoern A. Zeeb
1075*60bac4d6SBjoern A. Zeeb err_free:
1076*60bac4d6SBjoern A. Zeeb dev_kfree_skb_any(skb);
1077*60bac4d6SBjoern A. Zeeb
1078*60bac4d6SBjoern A. Zeeb return ret;
1079*60bac4d6SBjoern A. Zeeb }
1080*60bac4d6SBjoern A. Zeeb EXPORT_SYMBOL(ath12k_dp_tx_htt_rx_filter_setup);
1081*60bac4d6SBjoern A. Zeeb
1082*60bac4d6SBjoern A. Zeeb int
ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k * ar,u8 type,struct htt_ext_stats_cfg_params * cfg_params,u64 cookie)1083*60bac4d6SBjoern A. Zeeb ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type,
1084*60bac4d6SBjoern A. Zeeb struct htt_ext_stats_cfg_params *cfg_params,
1085*60bac4d6SBjoern A. Zeeb u64 cookie)
1086*60bac4d6SBjoern A. Zeeb {
1087*60bac4d6SBjoern A. Zeeb struct ath12k_base *ab = ar->ab;
1088*60bac4d6SBjoern A. Zeeb struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
1089*60bac4d6SBjoern A. Zeeb struct sk_buff *skb;
1090*60bac4d6SBjoern A. Zeeb struct htt_ext_stats_cfg_cmd *cmd;
1091*60bac4d6SBjoern A. Zeeb int len = sizeof(*cmd);
1092*60bac4d6SBjoern A. Zeeb int ret;
1093*60bac4d6SBjoern A. Zeeb u32 pdev_id;
1094*60bac4d6SBjoern A. Zeeb
1095*60bac4d6SBjoern A. Zeeb skb = ath12k_htc_alloc_skb(ab, len);
1096*60bac4d6SBjoern A. Zeeb if (!skb)
1097*60bac4d6SBjoern A. Zeeb return -ENOMEM;
1098*60bac4d6SBjoern A. Zeeb
1099*60bac4d6SBjoern A. Zeeb skb_put(skb, len);
1100*60bac4d6SBjoern A. Zeeb
1101*60bac4d6SBjoern A. Zeeb cmd = (struct htt_ext_stats_cfg_cmd *)skb->data;
1102*60bac4d6SBjoern A. Zeeb memset(cmd, 0, sizeof(*cmd));
1103*60bac4d6SBjoern A. Zeeb cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG;
1104*60bac4d6SBjoern A. Zeeb
1105*60bac4d6SBjoern A. Zeeb pdev_id = ath12k_mac_get_target_pdev_id(ar);
1106*60bac4d6SBjoern A. Zeeb cmd->hdr.pdev_mask = 1 << pdev_id;
1107*60bac4d6SBjoern A. Zeeb
1108*60bac4d6SBjoern A. Zeeb cmd->hdr.stats_type = type;
1109*60bac4d6SBjoern A. Zeeb cmd->cfg_param0 = cpu_to_le32(cfg_params->cfg0);
1110*60bac4d6SBjoern A. Zeeb cmd->cfg_param1 = cpu_to_le32(cfg_params->cfg1);
1111*60bac4d6SBjoern A. Zeeb cmd->cfg_param2 = cpu_to_le32(cfg_params->cfg2);
1112*60bac4d6SBjoern A. Zeeb cmd->cfg_param3 = cpu_to_le32(cfg_params->cfg3);
1113*60bac4d6SBjoern A. Zeeb cmd->cookie_lsb = cpu_to_le32(lower_32_bits(cookie));
1114*60bac4d6SBjoern A. Zeeb cmd->cookie_msb = cpu_to_le32(upper_32_bits(cookie));
1115*60bac4d6SBjoern A. Zeeb
1116*60bac4d6SBjoern A. Zeeb ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
1117*60bac4d6SBjoern A. Zeeb if (ret) {
1118*60bac4d6SBjoern A. Zeeb ath12k_warn(ab, "failed to send htt type stats request: %d",
1119*60bac4d6SBjoern A. Zeeb ret);
1120*60bac4d6SBjoern A. Zeeb dev_kfree_skb_any(skb);
1121*60bac4d6SBjoern A. Zeeb return ret;
1122*60bac4d6SBjoern A. Zeeb }
1123*60bac4d6SBjoern A. Zeeb
1124*60bac4d6SBjoern A. Zeeb return 0;
1125*60bac4d6SBjoern A. Zeeb }
1126*60bac4d6SBjoern A. Zeeb
ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k * ar,bool reset)1127*60bac4d6SBjoern A. Zeeb int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset)
1128*60bac4d6SBjoern A. Zeeb {
1129*60bac4d6SBjoern A. Zeeb struct ath12k_base *ab = ar->ab;
1130*60bac4d6SBjoern A. Zeeb int ret;
1131*60bac4d6SBjoern A. Zeeb
1132*60bac4d6SBjoern A. Zeeb ret = ath12k_dp_tx_htt_rx_monitor_mode_ring_config(ar, reset);
1133*60bac4d6SBjoern A. Zeeb if (ret) {
1134*60bac4d6SBjoern A. Zeeb ath12k_err(ab, "failed to setup rx monitor filter %d\n", ret);
1135*60bac4d6SBjoern A. Zeeb return ret;
1136*60bac4d6SBjoern A. Zeeb }
1137*60bac4d6SBjoern A. Zeeb
1138*60bac4d6SBjoern A. Zeeb return 0;
1139*60bac4d6SBjoern A. Zeeb }
1140*60bac4d6SBjoern A. Zeeb
ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k * ar,bool reset)1141*60bac4d6SBjoern A. Zeeb int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
1142*60bac4d6SBjoern A. Zeeb {
1143*60bac4d6SBjoern A. Zeeb struct ath12k_base *ab = ar->ab;
1144*60bac4d6SBjoern A. Zeeb struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
1145*60bac4d6SBjoern A. Zeeb struct htt_rx_ring_tlv_filter tlv_filter = {};
1146*60bac4d6SBjoern A. Zeeb int ret, ring_id, i;
1147*60bac4d6SBjoern A. Zeeb
1148*60bac4d6SBjoern A. Zeeb tlv_filter.offset_valid = false;
1149*60bac4d6SBjoern A. Zeeb
1150*60bac4d6SBjoern A. Zeeb if (!reset) {
1151*60bac4d6SBjoern A. Zeeb tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_DEST_RING;
1152*60bac4d6SBjoern A. Zeeb
1153*60bac4d6SBjoern A. Zeeb tlv_filter.drop_threshold_valid = true;
1154*60bac4d6SBjoern A. Zeeb tlv_filter.rx_drop_threshold = HTT_RX_RING_TLV_DROP_THRESHOLD_VALUE;
1155*60bac4d6SBjoern A. Zeeb
1156*60bac4d6SBjoern A. Zeeb tlv_filter.enable_log_mgmt_type = true;
1157*60bac4d6SBjoern A. Zeeb tlv_filter.enable_log_ctrl_type = true;
1158*60bac4d6SBjoern A. Zeeb tlv_filter.enable_log_data_type = true;
1159*60bac4d6SBjoern A. Zeeb
1160*60bac4d6SBjoern A. Zeeb tlv_filter.conf_len_ctrl = HTT_RX_RING_DEFAULT_DMA_LENGTH;
1161*60bac4d6SBjoern A. Zeeb tlv_filter.conf_len_mgmt = HTT_RX_RING_DEFAULT_DMA_LENGTH;
1162*60bac4d6SBjoern A. Zeeb tlv_filter.conf_len_data = HTT_RX_RING_DEFAULT_DMA_LENGTH;
1163*60bac4d6SBjoern A. Zeeb
1164*60bac4d6SBjoern A. Zeeb tlv_filter.enable_rx_tlv_offset = true;
1165*60bac4d6SBjoern A. Zeeb tlv_filter.rx_tlv_offset = HTT_RX_RING_PKT_TLV_OFFSET;
1166*60bac4d6SBjoern A. Zeeb
1167*60bac4d6SBjoern A. Zeeb tlv_filter.pkt_filter_flags0 =
1168*60bac4d6SBjoern A. Zeeb HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 |
1169*60bac4d6SBjoern A. Zeeb HTT_RX_MON_MO_MGMT_FILTER_FLAGS0;
1170*60bac4d6SBjoern A. Zeeb tlv_filter.pkt_filter_flags1 =
1171*60bac4d6SBjoern A. Zeeb HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 |
1172*60bac4d6SBjoern A. Zeeb HTT_RX_MON_MO_MGMT_FILTER_FLAGS1;
1173*60bac4d6SBjoern A. Zeeb tlv_filter.pkt_filter_flags2 =
1174*60bac4d6SBjoern A. Zeeb HTT_RX_MON_FP_CTRL_FILTER_FLASG2 |
1175*60bac4d6SBjoern A. Zeeb HTT_RX_MON_MO_CTRL_FILTER_FLASG2;
1176*60bac4d6SBjoern A. Zeeb tlv_filter.pkt_filter_flags3 =
1177*60bac4d6SBjoern A. Zeeb HTT_RX_MON_FP_CTRL_FILTER_FLASG3 |
1178*60bac4d6SBjoern A. Zeeb HTT_RX_MON_MO_CTRL_FILTER_FLASG3 |
1179*60bac4d6SBjoern A. Zeeb HTT_RX_MON_FP_DATA_FILTER_FLASG3 |
1180*60bac4d6SBjoern A. Zeeb HTT_RX_MON_MO_DATA_FILTER_FLASG3;
1181*60bac4d6SBjoern A. Zeeb } else {
1182*60bac4d6SBjoern A. Zeeb tlv_filter = ath12k_mac_mon_status_filter_default;
1183*60bac4d6SBjoern A. Zeeb
1184*60bac4d6SBjoern A. Zeeb if (ath12k_debugfs_is_extd_rx_stats_enabled(ar))
1185*60bac4d6SBjoern A. Zeeb tlv_filter.rx_filter = ath12k_debugfs_rx_filter(ar);
1186*60bac4d6SBjoern A. Zeeb }
1187*60bac4d6SBjoern A. Zeeb
1188*60bac4d6SBjoern A. Zeeb if (ab->hw_params->rxdma1_enable) {
1189*60bac4d6SBjoern A. Zeeb for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
1190*60bac4d6SBjoern A. Zeeb ring_id = ar->dp.rxdma_mon_dst_ring[i].ring_id;
1191*60bac4d6SBjoern A. Zeeb ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
1192*60bac4d6SBjoern A. Zeeb ar->dp.mac_id + i,
1193*60bac4d6SBjoern A. Zeeb HAL_RXDMA_MONITOR_DST,
1194*60bac4d6SBjoern A. Zeeb DP_RXDMA_REFILL_RING_SIZE,
1195*60bac4d6SBjoern A. Zeeb &tlv_filter);
1196*60bac4d6SBjoern A. Zeeb if (ret) {
1197*60bac4d6SBjoern A. Zeeb ath12k_err(ab,
1198*60bac4d6SBjoern A. Zeeb "failed to setup filter for monitor buf %d\n",
1199*60bac4d6SBjoern A. Zeeb ret);
1200*60bac4d6SBjoern A. Zeeb return ret;
1201*60bac4d6SBjoern A. Zeeb }
1202*60bac4d6SBjoern A. Zeeb }
1203*60bac4d6SBjoern A. Zeeb return 0;
1204*60bac4d6SBjoern A. Zeeb }
1205*60bac4d6SBjoern A. Zeeb
1206*60bac4d6SBjoern A. Zeeb if (!reset) {
1207*60bac4d6SBjoern A. Zeeb for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
1208*60bac4d6SBjoern A. Zeeb ring_id = dp->rx_mac_buf_ring[i].ring_id;
1209*60bac4d6SBjoern A. Zeeb ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
1210*60bac4d6SBjoern A. Zeeb i,
1211*60bac4d6SBjoern A. Zeeb HAL_RXDMA_BUF,
1212*60bac4d6SBjoern A. Zeeb DP_RXDMA_REFILL_RING_SIZE,
1213*60bac4d6SBjoern A. Zeeb &tlv_filter);
1214*60bac4d6SBjoern A. Zeeb if (ret) {
1215*60bac4d6SBjoern A. Zeeb ath12k_err(ab,
1216*60bac4d6SBjoern A. Zeeb "failed to setup filter for mon rx buf %d\n",
1217*60bac4d6SBjoern A. Zeeb ret);
1218*60bac4d6SBjoern A. Zeeb return ret;
1219*60bac4d6SBjoern A. Zeeb }
1220*60bac4d6SBjoern A. Zeeb }
1221*60bac4d6SBjoern A. Zeeb }
1222*60bac4d6SBjoern A. Zeeb
1223*60bac4d6SBjoern A. Zeeb for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
1224*60bac4d6SBjoern A. Zeeb ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
1225*60bac4d6SBjoern A. Zeeb if (!reset) {
1226*60bac4d6SBjoern A. Zeeb tlv_filter.rx_filter =
1227*60bac4d6SBjoern A. Zeeb HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING;
1228*60bac4d6SBjoern A. Zeeb }
1229*60bac4d6SBjoern A. Zeeb
1230*60bac4d6SBjoern A. Zeeb ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id,
1231*60bac4d6SBjoern A. Zeeb i,
1232*60bac4d6SBjoern A. Zeeb HAL_RXDMA_MONITOR_STATUS,
1233*60bac4d6SBjoern A. Zeeb RX_MON_STATUS_BUF_SIZE,
1234*60bac4d6SBjoern A. Zeeb &tlv_filter);
1235*60bac4d6SBjoern A. Zeeb if (ret) {
1236*60bac4d6SBjoern A. Zeeb ath12k_err(ab,
1237*60bac4d6SBjoern A. Zeeb "failed to setup filter for mon status buf %d\n",
1238*60bac4d6SBjoern A. Zeeb ret);
1239*60bac4d6SBjoern A. Zeeb return ret;
1240*60bac4d6SBjoern A. Zeeb }
1241*60bac4d6SBjoern A. Zeeb }
1242*60bac4d6SBjoern A. Zeeb
1243*60bac4d6SBjoern A. Zeeb return 0;
1244*60bac4d6SBjoern A. Zeeb }
1245*60bac4d6SBjoern A. Zeeb
ath12k_dp_tx_htt_tx_filter_setup(struct ath12k_base * ab,u32 ring_id,int mac_id,enum hal_ring_type ring_type,int tx_buf_size,struct htt_tx_ring_tlv_filter * htt_tlv_filter)1246*60bac4d6SBjoern A. Zeeb int ath12k_dp_tx_htt_tx_filter_setup(struct ath12k_base *ab, u32 ring_id,
1247*60bac4d6SBjoern A. Zeeb int mac_id, enum hal_ring_type ring_type,
1248*60bac4d6SBjoern A. Zeeb int tx_buf_size,
1249*60bac4d6SBjoern A. Zeeb struct htt_tx_ring_tlv_filter *htt_tlv_filter)
1250*60bac4d6SBjoern A. Zeeb {
1251*60bac4d6SBjoern A. Zeeb struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
1252*60bac4d6SBjoern A. Zeeb struct htt_tx_ring_selection_cfg_cmd *cmd;
1253*60bac4d6SBjoern A. Zeeb struct hal_srng *srng = &ab->hal.srng_list[ring_id];
1254*60bac4d6SBjoern A. Zeeb struct hal_srng_params params;
1255*60bac4d6SBjoern A. Zeeb struct sk_buff *skb;
1256*60bac4d6SBjoern A. Zeeb int len = sizeof(*cmd);
1257*60bac4d6SBjoern A. Zeeb enum htt_srng_ring_type htt_ring_type;
1258*60bac4d6SBjoern A. Zeeb enum htt_srng_ring_id htt_ring_id;
1259*60bac4d6SBjoern A. Zeeb int ret;
1260*60bac4d6SBjoern A. Zeeb
1261*60bac4d6SBjoern A. Zeeb skb = ath12k_htc_alloc_skb(ab, len);
1262*60bac4d6SBjoern A. Zeeb if (!skb)
1263*60bac4d6SBjoern A. Zeeb return -ENOMEM;
1264*60bac4d6SBjoern A. Zeeb
1265*60bac4d6SBjoern A. Zeeb memset(¶ms, 0, sizeof(params));
1266*60bac4d6SBjoern A. Zeeb ath12k_hal_srng_get_params(ab, srng, ¶ms);
1267*60bac4d6SBjoern A. Zeeb
1268*60bac4d6SBjoern A. Zeeb ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
1269*60bac4d6SBjoern A. Zeeb ring_type, &htt_ring_type,
1270*60bac4d6SBjoern A. Zeeb &htt_ring_id);
1271*60bac4d6SBjoern A. Zeeb
1272*60bac4d6SBjoern A. Zeeb if (ret)
1273*60bac4d6SBjoern A. Zeeb goto err_free;
1274*60bac4d6SBjoern A. Zeeb
1275*60bac4d6SBjoern A. Zeeb skb_put(skb, len);
1276*60bac4d6SBjoern A. Zeeb cmd = (struct htt_tx_ring_selection_cfg_cmd *)skb->data;
1277*60bac4d6SBjoern A. Zeeb cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_TX_MONITOR_CFG,
1278*60bac4d6SBjoern A. Zeeb HTT_TX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE);
1279*60bac4d6SBjoern A. Zeeb if (htt_ring_type == HTT_SW_TO_HW_RING ||
1280*60bac4d6SBjoern A. Zeeb htt_ring_type == HTT_HW_TO_SW_RING)
1281*60bac4d6SBjoern A. Zeeb cmd->info0 |=
1282*60bac4d6SBjoern A. Zeeb le32_encode_bits(DP_SW2HW_MACID(mac_id),
1283*60bac4d6SBjoern A. Zeeb HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
1284*60bac4d6SBjoern A. Zeeb else
1285*60bac4d6SBjoern A. Zeeb cmd->info0 |=
1286*60bac4d6SBjoern A. Zeeb le32_encode_bits(mac_id,
1287*60bac4d6SBjoern A. Zeeb HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
1288*60bac4d6SBjoern A. Zeeb cmd->info0 |= le32_encode_bits(htt_ring_id,
1289*60bac4d6SBjoern A. Zeeb HTT_TX_RING_SELECTION_CFG_CMD_INFO0_RING_ID);
1290*60bac4d6SBjoern A. Zeeb cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP),
1291*60bac4d6SBjoern A. Zeeb HTT_TX_RING_SELECTION_CFG_CMD_INFO0_SS);
1292*60bac4d6SBjoern A. Zeeb cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
1293*60bac4d6SBjoern A. Zeeb HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PS);
1294*60bac4d6SBjoern A. Zeeb
1295*60bac4d6SBjoern A. Zeeb cmd->info1 |=
1296*60bac4d6SBjoern A. Zeeb le32_encode_bits(tx_buf_size,
1297*60bac4d6SBjoern A. Zeeb HTT_TX_RING_SELECTION_CFG_CMD_INFO1_RING_BUFF_SIZE);
1298*60bac4d6SBjoern A. Zeeb
1299*60bac4d6SBjoern A. Zeeb if (htt_tlv_filter->tx_mon_mgmt_filter) {
1300*60bac4d6SBjoern A. Zeeb cmd->info1 |=
1301*60bac4d6SBjoern A. Zeeb le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_MGMT,
1302*60bac4d6SBjoern A. Zeeb HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE);
1303*60bac4d6SBjoern A. Zeeb cmd->info1 |=
1304*60bac4d6SBjoern A. Zeeb le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len,
1305*60bac4d6SBjoern A. Zeeb HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT);
1306*60bac4d6SBjoern A. Zeeb cmd->info2 |=
1307*60bac4d6SBjoern A. Zeeb le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_MGMT,
1308*60bac4d6SBjoern A. Zeeb HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG);
1309*60bac4d6SBjoern A. Zeeb }
1310*60bac4d6SBjoern A. Zeeb
1311*60bac4d6SBjoern A. Zeeb if (htt_tlv_filter->tx_mon_data_filter) {
1312*60bac4d6SBjoern A. Zeeb cmd->info1 |=
1313*60bac4d6SBjoern A. Zeeb le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_CTRL,
1314*60bac4d6SBjoern A. Zeeb HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE);
1315*60bac4d6SBjoern A. Zeeb cmd->info1 |=
1316*60bac4d6SBjoern A. Zeeb le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len,
1317*60bac4d6SBjoern A. Zeeb HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL);
1318*60bac4d6SBjoern A. Zeeb cmd->info2 |=
1319*60bac4d6SBjoern A. Zeeb le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_CTRL,
1320*60bac4d6SBjoern A. Zeeb HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG);
1321*60bac4d6SBjoern A. Zeeb }
1322*60bac4d6SBjoern A. Zeeb
1323*60bac4d6SBjoern A. Zeeb if (htt_tlv_filter->tx_mon_ctrl_filter) {
1324*60bac4d6SBjoern A. Zeeb cmd->info1 |=
1325*60bac4d6SBjoern A. Zeeb le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_DATA,
1326*60bac4d6SBjoern A. Zeeb HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE);
1327*60bac4d6SBjoern A. Zeeb cmd->info1 |=
1328*60bac4d6SBjoern A. Zeeb le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len,
1329*60bac4d6SBjoern A. Zeeb HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA);
1330*60bac4d6SBjoern A. Zeeb cmd->info2 |=
1331*60bac4d6SBjoern A. Zeeb le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_DATA,
1332*60bac4d6SBjoern A. Zeeb HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG);
1333*60bac4d6SBjoern A. Zeeb }
1334*60bac4d6SBjoern A. Zeeb
1335*60bac4d6SBjoern A. Zeeb cmd->tlv_filter_mask_in0 =
1336*60bac4d6SBjoern A. Zeeb cpu_to_le32(htt_tlv_filter->tx_mon_downstream_tlv_flags);
1337*60bac4d6SBjoern A. Zeeb cmd->tlv_filter_mask_in1 =
1338*60bac4d6SBjoern A. Zeeb cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags0);
1339*60bac4d6SBjoern A. Zeeb cmd->tlv_filter_mask_in2 =
1340*60bac4d6SBjoern A. Zeeb cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags1);
1341*60bac4d6SBjoern A. Zeeb cmd->tlv_filter_mask_in3 =
1342*60bac4d6SBjoern A. Zeeb cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags2);
1343*60bac4d6SBjoern A. Zeeb
1344*60bac4d6SBjoern A. Zeeb ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
1345*60bac4d6SBjoern A. Zeeb if (ret)
1346*60bac4d6SBjoern A. Zeeb goto err_free;
1347*60bac4d6SBjoern A. Zeeb
1348*60bac4d6SBjoern A. Zeeb return 0;
1349*60bac4d6SBjoern A. Zeeb
1350*60bac4d6SBjoern A. Zeeb err_free:
1351*60bac4d6SBjoern A. Zeeb dev_kfree_skb_any(skb);
1352*60bac4d6SBjoern A. Zeeb return ret;
1353*60bac4d6SBjoern A. Zeeb }
1354