1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. 5 */ 6 7 #include "core.h" 8 #include "peer.h" 9 #include "htc.h" 10 #include "dp_htt.h" 11 #include "debugfs_htt_stats.h" 12 #include "debugfs.h" 13 14 static void ath12k_dp_htt_htc_tx_complete(struct ath12k_base *ab, 15 struct sk_buff *skb) 16 { 17 dev_kfree_skb_any(skb); 18 } 19 20 int ath12k_dp_htt_connect(struct ath12k_dp *dp) 21 { 22 struct ath12k_htc_svc_conn_req conn_req = {}; 23 struct ath12k_htc_svc_conn_resp conn_resp = {}; 24 int status; 25 26 conn_req.ep_ops.ep_tx_complete = ath12k_dp_htt_htc_tx_complete; 27 conn_req.ep_ops.ep_rx_complete = ath12k_dp_htt_htc_t2h_msg_handler; 28 29 /* connect to control service */ 30 conn_req.service_id = ATH12K_HTC_SVC_ID_HTT_DATA_MSG; 31 32 status = ath12k_htc_connect_service(&dp->ab->htc, &conn_req, 33 &conn_resp); 34 35 if (status) 36 return status; 37 38 dp->eid = conn_resp.eid; 39 40 return 0; 41 } 42 43 static int ath12k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats, 44 u16 peer_id) 45 { 46 int i; 47 48 for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) { 49 if (ppdu_stats->user_stats[i].is_valid_peer_id) { 50 if (peer_id == ppdu_stats->user_stats[i].peer_id) 51 return i; 52 } else { 53 return i; 54 } 55 } 56 57 return -EINVAL; 58 } 59 60 static int ath12k_htt_tlv_ppdu_stats_parse(struct ath12k_base *ab, 61 u16 tag, u16 len, const void *ptr, 62 void *data) 63 { 64 const struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *ba_status; 65 const struct htt_ppdu_stats_usr_cmpltn_cmn *cmplt_cmn; 66 const struct htt_ppdu_stats_user_rate *user_rate; 67 struct htt_ppdu_stats_info *ppdu_info; 68 struct htt_ppdu_user_stats *user_stats; 69 int cur_user; 70 u16 peer_id; 71 72 ppdu_info = data; 73 74 switch (tag) { 75 case HTT_PPDU_STATS_TAG_COMMON: 76 if (len < sizeof(struct htt_ppdu_stats_common)) { 77 ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n", 78 len, tag); 79 return -EINVAL; 80 } 81 memcpy(&ppdu_info->ppdu_stats.common, ptr, 82 sizeof(struct htt_ppdu_stats_common)); 83 break; 84 case HTT_PPDU_STATS_TAG_USR_RATE: 85 if (len < sizeof(struct htt_ppdu_stats_user_rate)) { 86 ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n", 87 len, tag); 88 return -EINVAL; 89 } 90 user_rate = ptr; 91 peer_id = le16_to_cpu(user_rate->sw_peer_id); 92 cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 93 peer_id); 94 if (cur_user < 0) 95 return -EINVAL; 96 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 97 user_stats->peer_id = peer_id; 98 user_stats->is_valid_peer_id = true; 99 memcpy(&user_stats->rate, ptr, 100 sizeof(struct htt_ppdu_stats_user_rate)); 101 user_stats->tlv_flags |= BIT(tag); 102 break; 103 case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON: 104 if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) { 105 ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n", 106 len, tag); 107 return -EINVAL; 108 } 109 110 cmplt_cmn = ptr; 111 peer_id = le16_to_cpu(cmplt_cmn->sw_peer_id); 112 cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 113 peer_id); 114 if (cur_user < 0) 115 return -EINVAL; 116 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 117 user_stats->peer_id = peer_id; 118 user_stats->is_valid_peer_id = true; 119 memcpy(&user_stats->cmpltn_cmn, ptr, 120 sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)); 121 user_stats->tlv_flags |= BIT(tag); 122 break; 123 case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS: 124 if (len < 125 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) { 126 ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n", 127 len, tag); 128 return -EINVAL; 129 } 130 131 ba_status = ptr; 132 peer_id = le16_to_cpu(ba_status->sw_peer_id); 133 cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 134 peer_id); 135 if (cur_user < 0) 136 return -EINVAL; 137 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 138 user_stats->peer_id = peer_id; 139 user_stats->is_valid_peer_id = true; 140 memcpy(&user_stats->ack_ba, ptr, 141 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)); 142 user_stats->tlv_flags |= BIT(tag); 143 break; 144 } 145 return 0; 146 } 147 148 int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len, 149 int (*iter)(struct ath12k_base *ar, u16 tag, u16 len, 150 const void *ptr, void *data), 151 void *data) 152 { 153 const struct htt_tlv *tlv; 154 const void *begin = ptr; 155 u16 tlv_tag, tlv_len; 156 int ret = -EINVAL; 157 158 while (len > 0) { 159 if (len < sizeof(*tlv)) { 160 ath12k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", 161 ptr - begin, len, sizeof(*tlv)); 162 return -EINVAL; 163 } 164 tlv = (struct htt_tlv *)ptr; 165 tlv_tag = le32_get_bits(tlv->header, HTT_TLV_TAG); 166 tlv_len = le32_get_bits(tlv->header, HTT_TLV_LEN); 167 ptr += sizeof(*tlv); 168 len -= sizeof(*tlv); 169 170 if (tlv_len > len) { 171 ath12k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n", 172 tlv_tag, ptr - begin, len, tlv_len); 173 return -EINVAL; 174 } 175 ret = iter(ab, tlv_tag, tlv_len, ptr, data); 176 if (ret == -ENOMEM) 177 return ret; 178 179 ptr += tlv_len; 180 len -= tlv_len; 181 } 182 return 0; 183 } 184 185 static void 186 ath12k_update_per_peer_tx_stats(struct ath12k_pdev_dp *dp_pdev, 187 struct htt_ppdu_stats *ppdu_stats, u8 user) 188 { 189 struct ath12k_dp *dp = dp_pdev->dp; 190 struct ath12k_base *ab = dp->ab; 191 struct ath12k_dp_link_peer *peer; 192 struct htt_ppdu_stats_user_rate *user_rate; 193 struct ath12k_per_peer_tx_stats *peer_stats = &dp_pdev->peer_tx_stats; 194 struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user]; 195 struct htt_ppdu_stats_common *common = &ppdu_stats->common; 196 int ret; 197 u8 flags, mcs, nss, bw, sgi, dcm, ppdu_type, rate_idx = 0; 198 u32 v, succ_bytes = 0; 199 u16 tones, rate = 0, succ_pkts = 0; 200 u32 tx_duration = 0; 201 u8 tid = HTT_PPDU_STATS_NON_QOS_TID; 202 u16 tx_retry_failed = 0, tx_retry_count = 0; 203 bool is_ampdu = false, is_ofdma; 204 205 if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE))) 206 return; 207 208 if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) 209 is_ampdu = 210 HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags); 211 212 if (usr_stats->tlv_flags & 213 BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) { 214 succ_bytes = le32_to_cpu(usr_stats->ack_ba.success_bytes); 215 succ_pkts = le32_get_bits(usr_stats->ack_ba.info, 216 HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M); 217 tid = le32_get_bits(usr_stats->ack_ba.info, 218 HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM); 219 220 if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) { 221 tx_retry_failed = 222 __le16_to_cpu(usr_stats->cmpltn_cmn.mpdu_tried) - 223 __le16_to_cpu(usr_stats->cmpltn_cmn.mpdu_success); 224 tx_retry_count = 225 HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) + 226 HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags); 227 } 228 229 if (common->fes_duration_us) 230 tx_duration = le32_to_cpu(common->fes_duration_us); 231 } 232 233 user_rate = &usr_stats->rate; 234 flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags); 235 bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2; 236 nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1; 237 mcs = HTT_USR_RATE_MCS(user_rate->rate_flags); 238 sgi = HTT_USR_RATE_GI(user_rate->rate_flags); 239 dcm = HTT_USR_RATE_DCM(user_rate->rate_flags); 240 241 ppdu_type = HTT_USR_RATE_PPDU_TYPE(user_rate->info1); 242 is_ofdma = (ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA) || 243 (ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA); 244 245 /* Note: If host configured fixed rates and in some other special 246 * cases, the broadcast/management frames are sent in different rates. 247 * Firmware rate's control to be skipped for this? 248 */ 249 250 if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH12K_HE_MCS_MAX) { 251 ath12k_warn(ab, "Invalid HE mcs %d peer stats", mcs); 252 return; 253 } 254 255 if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH12K_VHT_MCS_MAX) { 256 ath12k_warn(ab, "Invalid VHT mcs %d peer stats", mcs); 257 return; 258 } 259 260 if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH12K_HT_MCS_MAX || nss < 1)) { 261 ath12k_warn(ab, "Invalid HT mcs %d nss %d peer stats", 262 mcs, nss); 263 return; 264 } 265 266 if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) { 267 ret = ath12k_mac_hw_ratecode_to_legacy_rate(mcs, 268 flags, 269 &rate_idx, 270 &rate); 271 if (ret < 0) 272 return; 273 } 274 275 rcu_read_lock(); 276 peer = ath12k_dp_link_peer_find_by_peerid(dp_pdev, usr_stats->peer_id); 277 278 if (!peer || !peer->sta) { 279 rcu_read_unlock(); 280 return; 281 } 282 283 spin_lock_bh(&dp->dp_lock); 284 285 memset(&peer->txrate, 0, sizeof(peer->txrate)); 286 287 peer->txrate.bw = ath12k_mac_bw_to_mac80211_bw(bw); 288 289 switch (flags) { 290 case WMI_RATE_PREAMBLE_OFDM: 291 peer->txrate.legacy = rate; 292 break; 293 case WMI_RATE_PREAMBLE_CCK: 294 peer->txrate.legacy = rate; 295 break; 296 case WMI_RATE_PREAMBLE_HT: 297 peer->txrate.mcs = mcs + 8 * (nss - 1); 298 peer->txrate.flags = RATE_INFO_FLAGS_MCS; 299 if (sgi) 300 peer->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 301 break; 302 case WMI_RATE_PREAMBLE_VHT: 303 peer->txrate.mcs = mcs; 304 peer->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; 305 if (sgi) 306 peer->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 307 break; 308 case WMI_RATE_PREAMBLE_HE: 309 peer->txrate.mcs = mcs; 310 peer->txrate.flags = RATE_INFO_FLAGS_HE_MCS; 311 peer->txrate.he_dcm = dcm; 312 peer->txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi); 313 tones = le16_to_cpu(user_rate->ru_end) - 314 le16_to_cpu(user_rate->ru_start) + 1; 315 v = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(tones); 316 peer->txrate.he_ru_alloc = v; 317 if (is_ofdma) 318 peer->txrate.bw = RATE_INFO_BW_HE_RU; 319 break; 320 case WMI_RATE_PREAMBLE_EHT: 321 peer->txrate.mcs = mcs; 322 peer->txrate.flags = RATE_INFO_FLAGS_EHT_MCS; 323 peer->txrate.he_dcm = dcm; 324 peer->txrate.eht_gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(sgi); 325 tones = le16_to_cpu(user_rate->ru_end) - 326 le16_to_cpu(user_rate->ru_start) + 1; 327 v = ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(tones); 328 peer->txrate.eht_ru_alloc = v; 329 if (is_ofdma) 330 peer->txrate.bw = RATE_INFO_BW_EHT_RU; 331 break; 332 } 333 334 peer->tx_retry_failed += tx_retry_failed; 335 peer->tx_retry_count += tx_retry_count; 336 peer->txrate.nss = nss; 337 peer->tx_duration += tx_duration; 338 memcpy(&peer->last_txrate, &peer->txrate, sizeof(struct rate_info)); 339 340 spin_unlock_bh(&dp->dp_lock); 341 342 /* PPDU stats reported for mgmt packet doesn't have valid tx bytes. 343 * So skip peer stats update for mgmt packets. 344 */ 345 if (tid < HTT_PPDU_STATS_NON_QOS_TID) { 346 memset(peer_stats, 0, sizeof(*peer_stats)); 347 peer_stats->succ_pkts = succ_pkts; 348 peer_stats->succ_bytes = succ_bytes; 349 peer_stats->is_ampdu = is_ampdu; 350 peer_stats->duration = tx_duration; 351 peer_stats->ba_fails = 352 HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) + 353 HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags); 354 } 355 356 rcu_read_unlock(); 357 } 358 359 static void ath12k_htt_update_ppdu_stats(struct ath12k_pdev_dp *dp_pdev, 360 struct htt_ppdu_stats *ppdu_stats) 361 { 362 u8 user; 363 364 for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++) 365 ath12k_update_per_peer_tx_stats(dp_pdev, ppdu_stats, user); 366 } 367 368 static 369 struct htt_ppdu_stats_info *ath12k_dp_htt_get_ppdu_desc(struct ath12k_pdev_dp *dp_pdev, 370 u32 ppdu_id) 371 { 372 struct htt_ppdu_stats_info *ppdu_info; 373 374 lockdep_assert_held(&dp_pdev->ppdu_list_lock); 375 if (!list_empty(&dp_pdev->ppdu_stats_info)) { 376 list_for_each_entry(ppdu_info, &dp_pdev->ppdu_stats_info, list) { 377 if (ppdu_info->ppdu_id == ppdu_id) 378 return ppdu_info; 379 } 380 381 if (dp_pdev->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) { 382 ppdu_info = list_first_entry(&dp_pdev->ppdu_stats_info, 383 typeof(*ppdu_info), list); 384 list_del(&ppdu_info->list); 385 dp_pdev->ppdu_stat_list_depth--; 386 ath12k_htt_update_ppdu_stats(dp_pdev, &ppdu_info->ppdu_stats); 387 kfree(ppdu_info); 388 } 389 } 390 391 ppdu_info = kzalloc_obj(*ppdu_info, GFP_ATOMIC); 392 if (!ppdu_info) 393 return NULL; 394 395 list_add_tail(&ppdu_info->list, &dp_pdev->ppdu_stats_info); 396 dp_pdev->ppdu_stat_list_depth++; 397 398 return ppdu_info; 399 } 400 401 static void ath12k_copy_to_delay_stats(struct ath12k_dp_link_peer *peer, 402 struct htt_ppdu_user_stats *usr_stats) 403 { 404 peer->ppdu_stats_delayba.sw_peer_id = le16_to_cpu(usr_stats->rate.sw_peer_id); 405 peer->ppdu_stats_delayba.info0 = le32_to_cpu(usr_stats->rate.info0); 406 peer->ppdu_stats_delayba.ru_end = le16_to_cpu(usr_stats->rate.ru_end); 407 peer->ppdu_stats_delayba.ru_start = le16_to_cpu(usr_stats->rate.ru_start); 408 peer->ppdu_stats_delayba.info1 = le32_to_cpu(usr_stats->rate.info1); 409 peer->ppdu_stats_delayba.rate_flags = le32_to_cpu(usr_stats->rate.rate_flags); 410 peer->ppdu_stats_delayba.resp_rate_flags = 411 le32_to_cpu(usr_stats->rate.resp_rate_flags); 412 413 peer->delayba_flag = true; 414 } 415 416 static void ath12k_copy_to_bar(struct ath12k_dp_link_peer *peer, 417 struct htt_ppdu_user_stats *usr_stats) 418 { 419 usr_stats->rate.sw_peer_id = cpu_to_le16(peer->ppdu_stats_delayba.sw_peer_id); 420 usr_stats->rate.info0 = cpu_to_le32(peer->ppdu_stats_delayba.info0); 421 usr_stats->rate.ru_end = cpu_to_le16(peer->ppdu_stats_delayba.ru_end); 422 usr_stats->rate.ru_start = cpu_to_le16(peer->ppdu_stats_delayba.ru_start); 423 usr_stats->rate.info1 = cpu_to_le32(peer->ppdu_stats_delayba.info1); 424 usr_stats->rate.rate_flags = cpu_to_le32(peer->ppdu_stats_delayba.rate_flags); 425 usr_stats->rate.resp_rate_flags = 426 cpu_to_le32(peer->ppdu_stats_delayba.resp_rate_flags); 427 428 peer->delayba_flag = false; 429 } 430 431 static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab, 432 struct sk_buff *skb) 433 { 434 struct ath12k_dp *dp = ath12k_ab_to_dp(ab); 435 struct ath12k_htt_ppdu_stats_msg *msg; 436 struct htt_ppdu_stats_info *ppdu_info; 437 struct ath12k_dp_link_peer *peer = NULL; 438 struct htt_ppdu_user_stats *usr_stats = NULL; 439 u32 peer_id = 0; 440 struct ath12k_pdev_dp *dp_pdev; 441 int ret, i; 442 u8 pdev_id, pdev_idx; 443 u32 ppdu_id, len; 444 445 msg = (struct ath12k_htt_ppdu_stats_msg *)skb->data; 446 len = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE); 447 if (len > (skb->len - struct_size(msg, data, 0))) { 448 ath12k_warn(ab, 449 "HTT PPDU STATS event has unexpected payload size %u, should be smaller than %u\n", 450 len, skb->len); 451 return -EINVAL; 452 } 453 454 pdev_id = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PDEV_ID); 455 ppdu_id = le32_to_cpu(msg->ppdu_id); 456 457 pdev_idx = DP_HW2SW_MACID(pdev_id); 458 if (pdev_idx >= MAX_RADIOS) { 459 ath12k_warn(ab, "HTT PPDU STATS invalid pdev id %u", pdev_id); 460 return -EINVAL; 461 } 462 463 rcu_read_lock(); 464 465 dp_pdev = ath12k_dp_to_pdev_dp(dp, pdev_idx); 466 if (!dp_pdev) { 467 ret = -EINVAL; 468 goto exit; 469 } 470 471 spin_lock_bh(&dp_pdev->ppdu_list_lock); 472 ppdu_info = ath12k_dp_htt_get_ppdu_desc(dp_pdev, ppdu_id); 473 if (!ppdu_info) { 474 spin_unlock_bh(&dp_pdev->ppdu_list_lock); 475 ret = -EINVAL; 476 goto exit; 477 } 478 479 ppdu_info->ppdu_id = ppdu_id; 480 ret = ath12k_dp_htt_tlv_iter(ab, msg->data, len, 481 ath12k_htt_tlv_ppdu_stats_parse, 482 (void *)ppdu_info); 483 if (ret) { 484 spin_unlock_bh(&dp_pdev->ppdu_list_lock); 485 ath12k_warn(ab, "Failed to parse tlv %d\n", ret); 486 goto exit; 487 } 488 489 if (ppdu_info->ppdu_stats.common.num_users >= HTT_PPDU_STATS_MAX_USERS) { 490 spin_unlock_bh(&dp_pdev->ppdu_list_lock); 491 ath12k_warn(ab, 492 "HTT PPDU STATS event has unexpected num_users %u, should be smaller than %u\n", 493 ppdu_info->ppdu_stats.common.num_users, 494 HTT_PPDU_STATS_MAX_USERS); 495 ret = -EINVAL; 496 goto exit; 497 } 498 499 /* back up data rate tlv for all peers */ 500 if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_DATA && 501 (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON)) && 502 ppdu_info->delay_ba) { 503 for (i = 0; i < ppdu_info->ppdu_stats.common.num_users; i++) { 504 peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id; 505 peer = ath12k_dp_link_peer_find_by_peerid(dp_pdev, peer_id); 506 if (!peer) 507 continue; 508 509 usr_stats = &ppdu_info->ppdu_stats.user_stats[i]; 510 if (usr_stats->delay_ba) 511 ath12k_copy_to_delay_stats(peer, usr_stats); 512 } 513 } 514 515 /* restore all peers' data rate tlv to mu-bar tlv */ 516 if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_BAR && 517 (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON))) { 518 for (i = 0; i < ppdu_info->bar_num_users; i++) { 519 peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id; 520 peer = ath12k_dp_link_peer_find_by_peerid(dp_pdev, peer_id); 521 if (!peer) 522 continue; 523 524 usr_stats = &ppdu_info->ppdu_stats.user_stats[i]; 525 if (peer->delayba_flag) 526 ath12k_copy_to_bar(peer, usr_stats); 527 } 528 } 529 530 spin_unlock_bh(&dp_pdev->ppdu_list_lock); 531 532 exit: 533 rcu_read_unlock(); 534 535 return ret; 536 } 537 538 static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab, 539 struct sk_buff *skb) 540 { 541 struct ath12k_htt_mlo_offset_msg *msg; 542 struct ath12k_pdev *pdev; 543 struct ath12k *ar; 544 u8 pdev_id; 545 546 msg = (struct ath12k_htt_mlo_offset_msg *)skb->data; 547 pdev_id = u32_get_bits(__le32_to_cpu(msg->info), 548 HTT_T2H_MLO_OFFSET_INFO_PDEV_ID); 549 550 rcu_read_lock(); 551 ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id); 552 if (!ar) { 553 /* It is possible that the ar is not yet active (started). 554 * The above function will only look for the active pdev 555 * and hence %NULL return is possible. Just silently 556 * discard this message 557 */ 558 goto exit; 559 } 560 561 spin_lock_bh(&ar->data_lock); 562 pdev = ar->pdev; 563 564 pdev->timestamp.info = __le32_to_cpu(msg->info); 565 pdev->timestamp.sync_timestamp_lo_us = __le32_to_cpu(msg->sync_timestamp_lo_us); 566 pdev->timestamp.sync_timestamp_hi_us = __le32_to_cpu(msg->sync_timestamp_hi_us); 567 pdev->timestamp.mlo_offset_lo = __le32_to_cpu(msg->mlo_offset_lo); 568 pdev->timestamp.mlo_offset_hi = __le32_to_cpu(msg->mlo_offset_hi); 569 pdev->timestamp.mlo_offset_clks = __le32_to_cpu(msg->mlo_offset_clks); 570 pdev->timestamp.mlo_comp_clks = __le32_to_cpu(msg->mlo_comp_clks); 571 pdev->timestamp.mlo_comp_timer = __le32_to_cpu(msg->mlo_comp_timer); 572 573 spin_unlock_bh(&ar->data_lock); 574 exit: 575 rcu_read_unlock(); 576 } 577 578 void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab, 579 struct sk_buff *skb) 580 { 581 struct ath12k_dp *dp = ath12k_ab_to_dp(ab); 582 struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data; 583 enum htt_t2h_msg_type type; 584 u16 peer_id; 585 u8 vdev_id; 586 u8 mac_addr[ETH_ALEN]; 587 u16 peer_mac_h16; 588 u16 ast_hash = 0; 589 u16 hw_peer_id; 590 591 type = le32_get_bits(resp->version_msg.version, HTT_T2H_MSG_TYPE); 592 593 ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type); 594 595 switch (type) { 596 case HTT_T2H_MSG_TYPE_VERSION_CONF: 597 dp->htt_tgt_ver_major = le32_get_bits(resp->version_msg.version, 598 HTT_T2H_VERSION_CONF_MAJOR); 599 dp->htt_tgt_ver_minor = le32_get_bits(resp->version_msg.version, 600 HTT_T2H_VERSION_CONF_MINOR); 601 complete(&dp->htt_tgt_version_received); 602 break; 603 /* TODO: remove unused peer map versions after testing */ 604 case HTT_T2H_MSG_TYPE_PEER_MAP: 605 vdev_id = le32_get_bits(resp->peer_map_ev.info, 606 HTT_T2H_PEER_MAP_INFO_VDEV_ID); 607 peer_id = le32_get_bits(resp->peer_map_ev.info, 608 HTT_T2H_PEER_MAP_INFO_PEER_ID); 609 peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1, 610 HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16); 611 ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32), 612 peer_mac_h16, mac_addr); 613 ath12k_dp_link_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0); 614 break; 615 case HTT_T2H_MSG_TYPE_PEER_MAP2: 616 vdev_id = le32_get_bits(resp->peer_map_ev.info, 617 HTT_T2H_PEER_MAP_INFO_VDEV_ID); 618 peer_id = le32_get_bits(resp->peer_map_ev.info, 619 HTT_T2H_PEER_MAP_INFO_PEER_ID); 620 peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1, 621 HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16); 622 ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32), 623 peer_mac_h16, mac_addr); 624 ast_hash = le32_get_bits(resp->peer_map_ev.info2, 625 HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL); 626 hw_peer_id = le32_get_bits(resp->peer_map_ev.info1, 627 HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID); 628 ath12k_dp_link_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash, 629 hw_peer_id); 630 break; 631 case HTT_T2H_MSG_TYPE_PEER_MAP3: 632 vdev_id = le32_get_bits(resp->peer_map_ev.info, 633 HTT_T2H_PEER_MAP_INFO_VDEV_ID); 634 peer_id = le32_get_bits(resp->peer_map_ev.info, 635 HTT_T2H_PEER_MAP_INFO_PEER_ID); 636 peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1, 637 HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16); 638 ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32), 639 peer_mac_h16, mac_addr); 640 ast_hash = le32_get_bits(resp->peer_map_ev.info2, 641 HTT_T2H_PEER_MAP3_INFO2_AST_HASH_VAL); 642 hw_peer_id = le32_get_bits(resp->peer_map_ev.info2, 643 HTT_T2H_PEER_MAP3_INFO2_HW_PEER_ID); 644 ath12k_dp_link_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash, 645 hw_peer_id); 646 break; 647 case HTT_T2H_MSG_TYPE_PEER_UNMAP: 648 case HTT_T2H_MSG_TYPE_PEER_UNMAP2: 649 peer_id = le32_get_bits(resp->peer_unmap_ev.info, 650 HTT_T2H_PEER_UNMAP_INFO_PEER_ID); 651 ath12k_dp_link_peer_unmap_event(ab, peer_id); 652 break; 653 case HTT_T2H_MSG_TYPE_PPDU_STATS_IND: 654 ath12k_htt_pull_ppdu_stats(ab, skb); 655 break; 656 case HTT_T2H_MSG_TYPE_EXT_STATS_CONF: 657 ath12k_debugfs_htt_ext_stats_handler(ab, skb); 658 break; 659 case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND: 660 ath12k_htt_mlo_offset_event_handler(ab, skb); 661 break; 662 default: 663 ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt event %d not handled\n", 664 type); 665 break; 666 } 667 668 dev_kfree_skb_any(skb); 669 } 670 EXPORT_SYMBOL(ath12k_dp_htt_htc_t2h_msg_handler); 671 672 static int 673 ath12k_dp_tx_get_ring_id_type(struct ath12k_base *ab, 674 int mac_id, u32 ring_id, 675 enum hal_ring_type ring_type, 676 enum htt_srng_ring_type *htt_ring_type, 677 enum htt_srng_ring_id *htt_ring_id) 678 { 679 int ret = 0; 680 681 switch (ring_type) { 682 case HAL_RXDMA_BUF: 683 /* for some targets, host fills rx buffer to fw and fw fills to 684 * rxbuf ring for each rxdma 685 */ 686 if (!ab->hw_params->rx_mac_buf_ring) { 687 if (!(ring_id == HAL_SRNG_SW2RXDMA_BUF0 || 688 ring_id == HAL_SRNG_SW2RXDMA_BUF1)) { 689 ret = -EINVAL; 690 } 691 *htt_ring_id = HTT_RXDMA_HOST_BUF_RING; 692 *htt_ring_type = HTT_SW_TO_HW_RING; 693 } else { 694 if (ring_id == HAL_SRNG_SW2RXDMA_BUF0) { 695 *htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING; 696 *htt_ring_type = HTT_SW_TO_SW_RING; 697 } else { 698 *htt_ring_id = HTT_RXDMA_HOST_BUF_RING; 699 *htt_ring_type = HTT_SW_TO_HW_RING; 700 } 701 } 702 break; 703 case HAL_RXDMA_DST: 704 *htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING; 705 *htt_ring_type = HTT_HW_TO_SW_RING; 706 break; 707 case HAL_RXDMA_MONITOR_BUF: 708 *htt_ring_id = HTT_RX_MON_HOST2MON_BUF_RING; 709 *htt_ring_type = HTT_SW_TO_HW_RING; 710 break; 711 case HAL_RXDMA_MONITOR_STATUS: 712 *htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING; 713 *htt_ring_type = HTT_SW_TO_HW_RING; 714 break; 715 case HAL_RXDMA_MONITOR_DST: 716 *htt_ring_id = HTT_RX_MON_MON2HOST_DEST_RING; 717 *htt_ring_type = HTT_HW_TO_SW_RING; 718 break; 719 case HAL_RXDMA_MONITOR_DESC: 720 *htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING; 721 *htt_ring_type = HTT_SW_TO_HW_RING; 722 break; 723 default: 724 ath12k_warn(ab, "Unsupported ring type in DP :%d\n", ring_type); 725 ret = -EINVAL; 726 } 727 return ret; 728 } 729 730 int ath12k_dp_tx_htt_srng_setup(struct ath12k_base *ab, u32 ring_id, 731 int mac_id, enum hal_ring_type ring_type) 732 { 733 struct ath12k_dp *dp = ath12k_ab_to_dp(ab); 734 struct htt_srng_setup_cmd *cmd; 735 struct hal_srng *srng = &ab->hal.srng_list[ring_id]; 736 struct hal_srng_params params; 737 struct sk_buff *skb; 738 u32 ring_entry_sz; 739 int len = sizeof(*cmd); 740 dma_addr_t hp_addr, tp_addr; 741 enum htt_srng_ring_type htt_ring_type; 742 enum htt_srng_ring_id htt_ring_id; 743 int ret; 744 745 skb = ath12k_htc_alloc_skb(ab, len); 746 if (!skb) 747 return -ENOMEM; 748 749 memset(¶ms, 0, sizeof(params)); 750 ath12k_hal_srng_get_params(ab, srng, ¶ms); 751 752 hp_addr = ath12k_hal_srng_get_hp_addr(ab, srng); 753 tp_addr = ath12k_hal_srng_get_tp_addr(ab, srng); 754 755 ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id, 756 ring_type, &htt_ring_type, 757 &htt_ring_id); 758 if (ret) 759 goto err_free; 760 761 skb_put(skb, len); 762 cmd = (struct htt_srng_setup_cmd *)skb->data; 763 cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_SRING_SETUP, 764 HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE); 765 if (htt_ring_type == HTT_SW_TO_HW_RING || 766 htt_ring_type == HTT_HW_TO_SW_RING) 767 cmd->info0 |= le32_encode_bits(DP_SW2HW_MACID(mac_id), 768 HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID); 769 else 770 cmd->info0 |= le32_encode_bits(mac_id, 771 HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID); 772 cmd->info0 |= le32_encode_bits(htt_ring_type, 773 HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE); 774 cmd->info0 |= le32_encode_bits(htt_ring_id, 775 HTT_SRNG_SETUP_CMD_INFO0_RING_ID); 776 777 cmd->ring_base_addr_lo = cpu_to_le32(params.ring_base_paddr & 778 HAL_ADDR_LSB_REG_MASK); 779 780 cmd->ring_base_addr_hi = cpu_to_le32((u64)params.ring_base_paddr >> 781 HAL_ADDR_MSB_REG_SHIFT); 782 783 ret = ath12k_hal_srng_get_entrysize(ab, ring_type); 784 if (ret < 0) 785 goto err_free; 786 787 ring_entry_sz = ret; 788 789 ring_entry_sz >>= 2; 790 cmd->info1 = le32_encode_bits(ring_entry_sz, 791 HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE); 792 cmd->info1 |= le32_encode_bits(params.num_entries * ring_entry_sz, 793 HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE); 794 cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP), 795 HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP); 796 cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP), 797 HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP); 798 cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_RING_PTR_SWAP), 799 HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP); 800 if (htt_ring_type == HTT_SW_TO_HW_RING) 801 cmd->info1 |= cpu_to_le32(HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS); 802 803 cmd->ring_head_off32_remote_addr_lo = cpu_to_le32(lower_32_bits(hp_addr)); 804 cmd->ring_head_off32_remote_addr_hi = cpu_to_le32(upper_32_bits(hp_addr)); 805 806 cmd->ring_tail_off32_remote_addr_lo = cpu_to_le32(lower_32_bits(tp_addr)); 807 cmd->ring_tail_off32_remote_addr_hi = cpu_to_le32(upper_32_bits(tp_addr)); 808 809 cmd->ring_msi_addr_lo = cpu_to_le32(lower_32_bits(params.msi_addr)); 810 cmd->ring_msi_addr_hi = cpu_to_le32(upper_32_bits(params.msi_addr)); 811 cmd->msi_data = cpu_to_le32(params.msi_data); 812 813 cmd->intr_info = 814 le32_encode_bits(params.intr_batch_cntr_thres_entries * ring_entry_sz, 815 HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH); 816 cmd->intr_info |= 817 le32_encode_bits(params.intr_timer_thres_us >> 3, 818 HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH); 819 820 cmd->info2 = 0; 821 if (params.flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) { 822 cmd->info2 = le32_encode_bits(params.low_threshold, 823 HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH); 824 } 825 826 ath12k_dbg(ab, ATH12K_DBG_HAL, 827 "%s msi_addr_lo:0x%x, msi_addr_hi:0x%x, msi_data:0x%x\n", 828 __func__, cmd->ring_msi_addr_lo, cmd->ring_msi_addr_hi, 829 cmd->msi_data); 830 831 ath12k_dbg(ab, ATH12K_DBG_HAL, 832 "ring_id:%d, ring_type:%d, intr_info:0x%x, flags:0x%x\n", 833 ring_id, ring_type, cmd->intr_info, cmd->info2); 834 835 ret = ath12k_htc_send(&ab->htc, dp->eid, skb); 836 if (ret) 837 goto err_free; 838 839 return 0; 840 841 err_free: 842 dev_kfree_skb_any(skb); 843 844 return ret; 845 } 846 847 int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab) 848 { 849 struct ath12k_dp *dp = ath12k_ab_to_dp(ab); 850 struct sk_buff *skb; 851 struct htt_ver_req_cmd *cmd; 852 int len = sizeof(*cmd); 853 u32 metadata_version; 854 int ret; 855 856 init_completion(&dp->htt_tgt_version_received); 857 858 skb = ath12k_htc_alloc_skb(ab, len); 859 if (!skb) 860 return -ENOMEM; 861 862 skb_put(skb, len); 863 cmd = (struct htt_ver_req_cmd *)skb->data; 864 cmd->ver_reg_info = le32_encode_bits(HTT_H2T_MSG_TYPE_VERSION_REQ, 865 HTT_OPTION_TAG); 866 metadata_version = ath12k_ftm_mode ? HTT_OPTION_TCL_METADATA_VER_V1 : 867 HTT_OPTION_TCL_METADATA_VER_V2; 868 869 cmd->tcl_metadata_version = le32_encode_bits(HTT_TAG_TCL_METADATA_VERSION, 870 HTT_OPTION_TAG) | 871 le32_encode_bits(HTT_TCL_METADATA_VER_SZ, 872 HTT_OPTION_LEN) | 873 le32_encode_bits(metadata_version, 874 HTT_OPTION_VALUE); 875 876 ret = ath12k_htc_send(&ab->htc, dp->eid, skb); 877 if (ret) { 878 dev_kfree_skb_any(skb); 879 return ret; 880 } 881 882 ret = wait_for_completion_timeout(&dp->htt_tgt_version_received, 883 HTT_TARGET_VERSION_TIMEOUT_HZ); 884 if (ret == 0) { 885 ath12k_warn(ab, "htt target version request timed out\n"); 886 return -ETIMEDOUT; 887 } 888 889 if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) { 890 ath12k_err(ab, "unsupported htt major version %d supported version is %d\n", 891 dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR); 892 return -EOPNOTSUPP; 893 } 894 895 return 0; 896 } 897 898 int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask) 899 { 900 struct ath12k_base *ab = ar->ab; 901 struct ath12k_dp *dp = ath12k_ab_to_dp(ab); 902 struct sk_buff *skb; 903 struct htt_ppdu_stats_cfg_cmd *cmd; 904 int len = sizeof(*cmd); 905 u8 pdev_mask; 906 int ret; 907 int i; 908 909 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 910 skb = ath12k_htc_alloc_skb(ab, len); 911 if (!skb) 912 return -ENOMEM; 913 914 skb_put(skb, len); 915 cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data; 916 cmd->msg = le32_encode_bits(HTT_H2T_MSG_TYPE_PPDU_STATS_CFG, 917 HTT_PPDU_STATS_CFG_MSG_TYPE); 918 919 pdev_mask = 1 << (i + ar->pdev_idx); 920 cmd->msg |= le32_encode_bits(pdev_mask, HTT_PPDU_STATS_CFG_PDEV_ID); 921 cmd->msg |= le32_encode_bits(mask, HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK); 922 923 ret = ath12k_htc_send(&ab->htc, dp->eid, skb); 924 if (ret) { 925 dev_kfree_skb_any(skb); 926 return ret; 927 } 928 } 929 930 return 0; 931 } 932 933 int ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base *ab, u32 ring_id, 934 int mac_id, enum hal_ring_type ring_type, 935 int rx_buf_size, 936 struct htt_rx_ring_tlv_filter *tlv_filter) 937 { 938 struct ath12k_dp *dp = ath12k_ab_to_dp(ab); 939 struct htt_rx_ring_selection_cfg_cmd *cmd; 940 struct hal_srng *srng = &ab->hal.srng_list[ring_id]; 941 struct hal_srng_params params; 942 struct sk_buff *skb; 943 int len = sizeof(*cmd); 944 enum htt_srng_ring_type htt_ring_type; 945 enum htt_srng_ring_id htt_ring_id; 946 int ret; 947 948 skb = ath12k_htc_alloc_skb(ab, len); 949 if (!skb) 950 return -ENOMEM; 951 952 memset(¶ms, 0, sizeof(params)); 953 ath12k_hal_srng_get_params(ab, srng, ¶ms); 954 955 ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id, 956 ring_type, &htt_ring_type, 957 &htt_ring_id); 958 if (ret) 959 goto err_free; 960 961 skb_put(skb, len); 962 cmd = (struct htt_rx_ring_selection_cfg_cmd *)skb->data; 963 cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG, 964 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE); 965 if (htt_ring_type == HTT_SW_TO_HW_RING || 966 htt_ring_type == HTT_HW_TO_SW_RING) 967 cmd->info0 |= 968 le32_encode_bits(DP_SW2HW_MACID(mac_id), 969 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID); 970 else 971 cmd->info0 |= 972 le32_encode_bits(mac_id, 973 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID); 974 cmd->info0 |= le32_encode_bits(htt_ring_id, 975 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID); 976 cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP), 977 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS); 978 cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP), 979 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS); 980 cmd->info0 |= le32_encode_bits(tlv_filter->offset_valid, 981 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_OFFSET_VALID); 982 cmd->info0 |= 983 le32_encode_bits(tlv_filter->drop_threshold_valid, 984 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_DROP_THRES_VAL); 985 cmd->info0 |= le32_encode_bits(!tlv_filter->rxmon_disable, 986 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_EN_RXMON); 987 988 cmd->info1 = le32_encode_bits(rx_buf_size, 989 HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE); 990 cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_mgmt, 991 HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT); 992 cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_ctrl, 993 HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL); 994 cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_data, 995 HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA); 996 cmd->pkt_type_en_flags0 = cpu_to_le32(tlv_filter->pkt_filter_flags0); 997 cmd->pkt_type_en_flags1 = cpu_to_le32(tlv_filter->pkt_filter_flags1); 998 cmd->pkt_type_en_flags2 = cpu_to_le32(tlv_filter->pkt_filter_flags2); 999 cmd->pkt_type_en_flags3 = cpu_to_le32(tlv_filter->pkt_filter_flags3); 1000 cmd->rx_filter_tlv = cpu_to_le32(tlv_filter->rx_filter); 1001 1002 cmd->info2 = le32_encode_bits(tlv_filter->rx_drop_threshold, 1003 HTT_RX_RING_SELECTION_CFG_CMD_INFO2_DROP_THRESHOLD); 1004 cmd->info2 |= 1005 le32_encode_bits(tlv_filter->enable_log_mgmt_type, 1006 HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_MGMT_TYPE); 1007 cmd->info2 |= 1008 le32_encode_bits(tlv_filter->enable_log_ctrl_type, 1009 HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_CTRL_TYPE); 1010 cmd->info2 |= 1011 le32_encode_bits(tlv_filter->enable_log_data_type, 1012 HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_DATA_TYPE); 1013 1014 cmd->info3 = 1015 le32_encode_bits(tlv_filter->enable_rx_tlv_offset, 1016 HTT_RX_RING_SELECTION_CFG_CMD_INFO3_EN_TLV_PKT_OFFSET); 1017 cmd->info3 |= 1018 le32_encode_bits(tlv_filter->rx_tlv_offset, 1019 HTT_RX_RING_SELECTION_CFG_CMD_INFO3_PKT_TLV_OFFSET); 1020 1021 if (tlv_filter->offset_valid) { 1022 cmd->rx_packet_offset = 1023 le32_encode_bits(tlv_filter->rx_packet_offset, 1024 HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET); 1025 1026 cmd->rx_packet_offset |= 1027 le32_encode_bits(tlv_filter->rx_header_offset, 1028 HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET); 1029 1030 cmd->rx_mpdu_offset = 1031 le32_encode_bits(tlv_filter->rx_mpdu_end_offset, 1032 HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET); 1033 1034 cmd->rx_mpdu_offset |= 1035 le32_encode_bits(tlv_filter->rx_mpdu_start_offset, 1036 HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET); 1037 1038 cmd->rx_msdu_offset = 1039 le32_encode_bits(tlv_filter->rx_msdu_end_offset, 1040 HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET); 1041 1042 cmd->rx_msdu_offset |= 1043 le32_encode_bits(tlv_filter->rx_msdu_start_offset, 1044 HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET); 1045 1046 cmd->rx_attn_offset = 1047 le32_encode_bits(tlv_filter->rx_attn_offset, 1048 HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET); 1049 } 1050 1051 if (tlv_filter->rx_mpdu_start_wmask > 0 && 1052 tlv_filter->rx_msdu_end_wmask > 0) { 1053 cmd->info2 |= 1054 le32_encode_bits(true, 1055 HTT_RX_RING_SELECTION_CFG_WORD_MASK_COMPACT_SET); 1056 cmd->rx_mpdu_start_end_mask = 1057 le32_encode_bits(tlv_filter->rx_mpdu_start_wmask, 1058 HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_MASK); 1059 /* mpdu_end is not used for any hardwares so far 1060 * please assign it in future if any chip is 1061 * using through hal ops 1062 */ 1063 cmd->rx_mpdu_start_end_mask |= 1064 le32_encode_bits(tlv_filter->rx_mpdu_end_wmask, 1065 HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_MASK); 1066 cmd->rx_msdu_end_word_mask = 1067 le32_encode_bits(tlv_filter->rx_msdu_end_wmask, 1068 HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_MASK); 1069 } 1070 1071 ret = ath12k_htc_send(&ab->htc, dp->eid, skb); 1072 if (ret) 1073 goto err_free; 1074 1075 return 0; 1076 1077 err_free: 1078 dev_kfree_skb_any(skb); 1079 1080 return ret; 1081 } 1082 EXPORT_SYMBOL(ath12k_dp_tx_htt_rx_filter_setup); 1083 1084 int 1085 ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type, 1086 struct htt_ext_stats_cfg_params *cfg_params, 1087 u64 cookie) 1088 { 1089 struct ath12k_base *ab = ar->ab; 1090 struct ath12k_dp *dp = ath12k_ab_to_dp(ab); 1091 struct sk_buff *skb; 1092 struct htt_ext_stats_cfg_cmd *cmd; 1093 int len = sizeof(*cmd); 1094 int ret; 1095 u32 pdev_id; 1096 1097 skb = ath12k_htc_alloc_skb(ab, len); 1098 if (!skb) 1099 return -ENOMEM; 1100 1101 skb_put(skb, len); 1102 1103 cmd = (struct htt_ext_stats_cfg_cmd *)skb->data; 1104 memset(cmd, 0, sizeof(*cmd)); 1105 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG; 1106 1107 pdev_id = ath12k_mac_get_target_pdev_id(ar); 1108 cmd->hdr.pdev_mask = 1 << pdev_id; 1109 1110 cmd->hdr.stats_type = type; 1111 cmd->cfg_param0 = cpu_to_le32(cfg_params->cfg0); 1112 cmd->cfg_param1 = cpu_to_le32(cfg_params->cfg1); 1113 cmd->cfg_param2 = cpu_to_le32(cfg_params->cfg2); 1114 cmd->cfg_param3 = cpu_to_le32(cfg_params->cfg3); 1115 cmd->cookie_lsb = cpu_to_le32(lower_32_bits(cookie)); 1116 cmd->cookie_msb = cpu_to_le32(upper_32_bits(cookie)); 1117 1118 ret = ath12k_htc_send(&ab->htc, dp->eid, skb); 1119 if (ret) { 1120 ath12k_warn(ab, "failed to send htt type stats request: %d", 1121 ret); 1122 dev_kfree_skb_any(skb); 1123 return ret; 1124 } 1125 1126 return 0; 1127 } 1128 1129 int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset) 1130 { 1131 struct ath12k_base *ab = ar->ab; 1132 int ret; 1133 1134 ret = ath12k_dp_tx_htt_rx_monitor_mode_ring_config(ar, reset); 1135 if (ret) { 1136 ath12k_err(ab, "failed to setup rx monitor filter %d\n", ret); 1137 return ret; 1138 } 1139 1140 return 0; 1141 } 1142 1143 int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset) 1144 { 1145 struct ath12k_base *ab = ar->ab; 1146 struct ath12k_dp *dp = ath12k_ab_to_dp(ab); 1147 struct htt_rx_ring_tlv_filter tlv_filter = {}; 1148 int ret, ring_id, i; 1149 1150 tlv_filter.offset_valid = false; 1151 1152 if (!reset) { 1153 tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_DEST_RING; 1154 1155 tlv_filter.drop_threshold_valid = true; 1156 tlv_filter.rx_drop_threshold = HTT_RX_RING_TLV_DROP_THRESHOLD_VALUE; 1157 1158 tlv_filter.enable_log_mgmt_type = true; 1159 tlv_filter.enable_log_ctrl_type = true; 1160 tlv_filter.enable_log_data_type = true; 1161 1162 tlv_filter.conf_len_ctrl = HTT_RX_RING_DEFAULT_DMA_LENGTH; 1163 tlv_filter.conf_len_mgmt = HTT_RX_RING_DEFAULT_DMA_LENGTH; 1164 tlv_filter.conf_len_data = HTT_RX_RING_DEFAULT_DMA_LENGTH; 1165 1166 tlv_filter.enable_rx_tlv_offset = true; 1167 tlv_filter.rx_tlv_offset = HTT_RX_RING_PKT_TLV_OFFSET; 1168 1169 tlv_filter.pkt_filter_flags0 = 1170 HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 | 1171 HTT_RX_MON_MO_MGMT_FILTER_FLAGS0; 1172 tlv_filter.pkt_filter_flags1 = 1173 HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 | 1174 HTT_RX_MON_MO_MGMT_FILTER_FLAGS1; 1175 tlv_filter.pkt_filter_flags2 = 1176 HTT_RX_MON_FP_CTRL_FILTER_FLASG2 | 1177 HTT_RX_MON_MO_CTRL_FILTER_FLASG2; 1178 tlv_filter.pkt_filter_flags3 = 1179 HTT_RX_MON_FP_CTRL_FILTER_FLASG3 | 1180 HTT_RX_MON_MO_CTRL_FILTER_FLASG3 | 1181 HTT_RX_MON_FP_DATA_FILTER_FLASG3 | 1182 HTT_RX_MON_MO_DATA_FILTER_FLASG3; 1183 } else { 1184 tlv_filter = ath12k_mac_mon_status_filter_default; 1185 1186 if (ath12k_debugfs_is_extd_rx_stats_enabled(ar)) 1187 tlv_filter.rx_filter = ath12k_debugfs_rx_filter(ar); 1188 } 1189 1190 if (ab->hw_params->rxdma1_enable) { 1191 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 1192 ring_id = ar->dp.rxdma_mon_dst_ring[i].ring_id; 1193 ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, 1194 ar->dp.mac_id + i, 1195 HAL_RXDMA_MONITOR_DST, 1196 DP_RXDMA_REFILL_RING_SIZE, 1197 &tlv_filter); 1198 if (ret) { 1199 ath12k_err(ab, 1200 "failed to setup filter for monitor buf %d\n", 1201 ret); 1202 return ret; 1203 } 1204 } 1205 return 0; 1206 } 1207 1208 if (!reset) { 1209 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 1210 ring_id = dp->rx_mac_buf_ring[i].ring_id; 1211 ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, 1212 i, 1213 HAL_RXDMA_BUF, 1214 DP_RXDMA_REFILL_RING_SIZE, 1215 &tlv_filter); 1216 if (ret) { 1217 ath12k_err(ab, 1218 "failed to setup filter for mon rx buf %d\n", 1219 ret); 1220 return ret; 1221 } 1222 } 1223 } 1224 1225 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 1226 ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; 1227 if (!reset) { 1228 tlv_filter.rx_filter = 1229 HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING; 1230 } 1231 1232 ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, 1233 i, 1234 HAL_RXDMA_MONITOR_STATUS, 1235 RX_MON_STATUS_BUF_SIZE, 1236 &tlv_filter); 1237 if (ret) { 1238 ath12k_err(ab, 1239 "failed to setup filter for mon status buf %d\n", 1240 ret); 1241 return ret; 1242 } 1243 } 1244 1245 return 0; 1246 } 1247 1248 int ath12k_dp_tx_htt_tx_filter_setup(struct ath12k_base *ab, u32 ring_id, 1249 int mac_id, enum hal_ring_type ring_type, 1250 int tx_buf_size, 1251 struct htt_tx_ring_tlv_filter *htt_tlv_filter) 1252 { 1253 struct ath12k_dp *dp = ath12k_ab_to_dp(ab); 1254 struct htt_tx_ring_selection_cfg_cmd *cmd; 1255 struct hal_srng *srng = &ab->hal.srng_list[ring_id]; 1256 struct hal_srng_params params; 1257 struct sk_buff *skb; 1258 int len = sizeof(*cmd); 1259 enum htt_srng_ring_type htt_ring_type; 1260 enum htt_srng_ring_id htt_ring_id; 1261 int ret; 1262 1263 skb = ath12k_htc_alloc_skb(ab, len); 1264 if (!skb) 1265 return -ENOMEM; 1266 1267 memset(¶ms, 0, sizeof(params)); 1268 ath12k_hal_srng_get_params(ab, srng, ¶ms); 1269 1270 ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id, 1271 ring_type, &htt_ring_type, 1272 &htt_ring_id); 1273 1274 if (ret) 1275 goto err_free; 1276 1277 skb_put(skb, len); 1278 cmd = (struct htt_tx_ring_selection_cfg_cmd *)skb->data; 1279 cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_TX_MONITOR_CFG, 1280 HTT_TX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE); 1281 if (htt_ring_type == HTT_SW_TO_HW_RING || 1282 htt_ring_type == HTT_HW_TO_SW_RING) 1283 cmd->info0 |= 1284 le32_encode_bits(DP_SW2HW_MACID(mac_id), 1285 HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID); 1286 else 1287 cmd->info0 |= 1288 le32_encode_bits(mac_id, 1289 HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID); 1290 cmd->info0 |= le32_encode_bits(htt_ring_id, 1291 HTT_TX_RING_SELECTION_CFG_CMD_INFO0_RING_ID); 1292 cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP), 1293 HTT_TX_RING_SELECTION_CFG_CMD_INFO0_SS); 1294 cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP), 1295 HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PS); 1296 1297 cmd->info1 |= 1298 le32_encode_bits(tx_buf_size, 1299 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_RING_BUFF_SIZE); 1300 1301 if (htt_tlv_filter->tx_mon_mgmt_filter) { 1302 cmd->info1 |= 1303 le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_MGMT, 1304 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE); 1305 cmd->info1 |= 1306 le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len, 1307 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT); 1308 cmd->info2 |= 1309 le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_MGMT, 1310 HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG); 1311 } 1312 1313 if (htt_tlv_filter->tx_mon_data_filter) { 1314 cmd->info1 |= 1315 le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_CTRL, 1316 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE); 1317 cmd->info1 |= 1318 le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len, 1319 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL); 1320 cmd->info2 |= 1321 le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_CTRL, 1322 HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG); 1323 } 1324 1325 if (htt_tlv_filter->tx_mon_ctrl_filter) { 1326 cmd->info1 |= 1327 le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_DATA, 1328 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE); 1329 cmd->info1 |= 1330 le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len, 1331 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA); 1332 cmd->info2 |= 1333 le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_DATA, 1334 HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG); 1335 } 1336 1337 cmd->tlv_filter_mask_in0 = 1338 cpu_to_le32(htt_tlv_filter->tx_mon_downstream_tlv_flags); 1339 cmd->tlv_filter_mask_in1 = 1340 cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags0); 1341 cmd->tlv_filter_mask_in2 = 1342 cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags1); 1343 cmd->tlv_filter_mask_in3 = 1344 cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags2); 1345 1346 ret = ath12k_htc_send(&ab->htc, dp->eid, skb); 1347 if (ret) 1348 goto err_free; 1349 1350 return 0; 1351 1352 err_free: 1353 dev_kfree_skb_any(skb); 1354 return ret; 1355 } 1356