1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. 5 */ 6 #include <linux/skbuff.h> 7 #include <linux/ctype.h> 8 #include <net/mac80211.h> 9 #include <net/cfg80211.h> 10 #include <linux/completion.h> 11 #include <linux/if_ether.h> 12 #include <linux/types.h> 13 #include <linux/pci.h> 14 #include <linux/uuid.h> 15 #include <linux/time.h> 16 #include <linux/of.h> 17 #include <linux/cleanup.h> 18 #include "core.h" 19 #include "debugfs.h" 20 #include "debug.h" 21 #include "mac.h" 22 #include "hw.h" 23 #include "peer.h" 24 #include "p2p.h" 25 #include "testmode.h" 26 27 struct ath12k_wmi_svc_ready_parse { 28 bool wmi_svc_bitmap_done; 29 }; 30 31 struct wmi_tlv_fw_stats_parse { 32 const struct wmi_stats_event *ev; 33 struct ath12k_fw_stats *stats; 34 const struct wmi_per_chain_rssi_stat_params *rssi; 35 int rssi_num; 36 bool chain_rssi_done; 37 }; 38 39 struct ath12k_wmi_dma_ring_caps_parse { 40 struct ath12k_wmi_dma_ring_caps_params *dma_ring_caps; 41 u32 n_dma_ring_caps; 42 }; 43 44 struct ath12k_wmi_service_ext_arg { 45 u32 default_conc_scan_config_bits; 46 u32 default_fw_config_bits; 47 struct ath12k_wmi_ppe_threshold_arg ppet; 48 u32 he_cap_info; 49 u32 mpdu_density; 50 u32 max_bssid_rx_filters; 51 u32 num_hw_modes; 52 u32 num_phy; 53 }; 54 55 struct ath12k_wmi_svc_rdy_ext_parse { 56 struct ath12k_wmi_service_ext_arg arg; 57 const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps; 58 const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps; 59 u32 n_hw_mode_caps; 60 u32 tot_phy_id; 61 struct ath12k_wmi_hw_mode_cap_params pref_hw_mode_caps; 62 struct ath12k_wmi_mac_phy_caps_params *mac_phy_caps; 63 u32 n_mac_phy_caps; 64 const struct ath12k_wmi_soc_hal_reg_caps_params *soc_hal_reg_caps; 65 const struct ath12k_wmi_hal_reg_caps_ext_params *ext_hal_reg_caps; 66 u32 n_ext_hal_reg_caps; 67 struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse; 68 bool hw_mode_done; 69 bool mac_phy_done; 70 bool ext_hal_reg_done; 71 bool mac_phy_chainmask_combo_done; 72 bool mac_phy_chainmask_cap_done; 73 bool oem_dma_ring_cap_done; 74 bool dma_ring_cap_done; 75 }; 76 77 struct ath12k_wmi_svc_rdy_ext2_arg { 78 u32 reg_db_version; 79 u32 hw_min_max_tx_power_2ghz; 80 u32 hw_min_max_tx_power_5ghz; 81 u32 chwidth_num_peer_caps; 82 u32 preamble_puncture_bw; 83 u32 max_user_per_ppdu_ofdma; 84 u32 max_user_per_ppdu_mumimo; 85 u32 target_cap_flags; 86 u32 eht_cap_mac_info[WMI_MAX_EHTCAP_MAC_SIZE]; 87 u32 max_num_linkview_peers; 88 u32 max_num_msduq_supported_per_tid; 89 u32 default_num_msduq_supported_per_tid; 90 }; 91 92 struct ath12k_wmi_svc_rdy_ext2_parse { 93 struct ath12k_wmi_svc_rdy_ext2_arg arg; 94 struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse; 95 bool dma_ring_cap_done; 96 bool spectral_bin_scaling_done; 97 bool mac_phy_caps_ext_done; 98 bool hal_reg_caps_ext2_done; 99 bool scan_radio_caps_ext2_done; 100 bool twt_caps_done; 101 bool htt_msdu_idx_to_qtype_map_done; 102 bool dbs_or_sbs_cap_ext_done; 103 }; 104 105 struct ath12k_wmi_rdy_parse { 106 u32 num_extra_mac_addr; 107 }; 108 109 struct ath12k_wmi_dma_buf_release_arg { 110 struct ath12k_wmi_dma_buf_release_fixed_params fixed; 111 const struct ath12k_wmi_dma_buf_release_entry_params *buf_entry; 112 const struct ath12k_wmi_dma_buf_release_meta_data_params *meta_data; 113 u32 num_buf_entry; 114 u32 num_meta; 115 bool buf_entry_done; 116 bool meta_data_done; 117 }; 118 119 struct ath12k_wmi_tlv_policy { 120 size_t min_len; 121 }; 122 123 struct wmi_tlv_mgmt_rx_parse { 124 const struct ath12k_wmi_mgmt_rx_params *fixed; 125 const u8 *frame_buf; 126 bool frame_buf_done; 127 }; 128 129 struct wmi_pdev_set_obss_bitmap_arg { 130 u32 tlv_tag; 131 u32 pdev_id; 132 u32 cmd_id; 133 const u32 *bitmap; 134 const char *label; 135 }; 136 137 static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = { 138 [WMI_TAG_ARRAY_BYTE] = { .min_len = 0 }, 139 [WMI_TAG_ARRAY_UINT32] = { .min_len = 0 }, 140 [WMI_TAG_SERVICE_READY_EVENT] = { 141 .min_len = sizeof(struct wmi_service_ready_event) }, 142 [WMI_TAG_SERVICE_READY_EXT_EVENT] = { 143 .min_len = sizeof(struct wmi_service_ready_ext_event) }, 144 [WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS] = { 145 .min_len = sizeof(struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params) }, 146 [WMI_TAG_SOC_HAL_REG_CAPABILITIES] = { 147 .min_len = sizeof(struct ath12k_wmi_soc_hal_reg_caps_params) }, 148 [WMI_TAG_VDEV_START_RESPONSE_EVENT] = { 149 .min_len = sizeof(struct wmi_vdev_start_resp_event) }, 150 [WMI_TAG_PEER_DELETE_RESP_EVENT] = { 151 .min_len = sizeof(struct wmi_peer_delete_resp_event) }, 152 [WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT] = { 153 .min_len = sizeof(struct wmi_bcn_tx_status_event) }, 154 [WMI_TAG_VDEV_STOPPED_EVENT] = { 155 .min_len = sizeof(struct wmi_vdev_stopped_event) }, 156 [WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT] = { 157 .min_len = sizeof(struct wmi_reg_chan_list_cc_ext_event) }, 158 [WMI_TAG_MGMT_RX_HDR] = { 159 .min_len = sizeof(struct ath12k_wmi_mgmt_rx_params) }, 160 [WMI_TAG_MGMT_TX_COMPL_EVENT] = { 161 .min_len = sizeof(struct wmi_mgmt_tx_compl_event) }, 162 [WMI_TAG_SCAN_EVENT] = { 163 .min_len = sizeof(struct wmi_scan_event) }, 164 [WMI_TAG_PEER_STA_KICKOUT_EVENT] = { 165 .min_len = sizeof(struct wmi_peer_sta_kickout_event) }, 166 [WMI_TAG_ROAM_EVENT] = { 167 .min_len = sizeof(struct wmi_roam_event) }, 168 [WMI_TAG_CHAN_INFO_EVENT] = { 169 .min_len = sizeof(struct wmi_chan_info_event) }, 170 [WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT] = { 171 .min_len = sizeof(struct wmi_pdev_bss_chan_info_event) }, 172 [WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT] = { 173 .min_len = sizeof(struct wmi_vdev_install_key_compl_event) }, 174 [WMI_TAG_READY_EVENT] = { 175 .min_len = sizeof(struct ath12k_wmi_ready_event_min_params) }, 176 [WMI_TAG_SERVICE_AVAILABLE_EVENT] = { 177 .min_len = sizeof(struct wmi_service_available_event) }, 178 [WMI_TAG_PEER_ASSOC_CONF_EVENT] = { 179 .min_len = sizeof(struct wmi_peer_assoc_conf_event) }, 180 [WMI_TAG_RFKILL_EVENT] = { 181 .min_len = sizeof(struct wmi_rfkill_state_change_event) }, 182 [WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT] = { 183 .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) }, 184 [WMI_TAG_HOST_SWFDA_EVENT] = { 185 .min_len = sizeof(struct wmi_fils_discovery_event) }, 186 [WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = { 187 .min_len = sizeof(struct wmi_probe_resp_tx_status_event) }, 188 [WMI_TAG_VDEV_DELETE_RESP_EVENT] = { 189 .min_len = sizeof(struct wmi_vdev_delete_resp_event) }, 190 [WMI_TAG_TWT_ENABLE_COMPLETE_EVENT] = { 191 .min_len = sizeof(struct wmi_twt_enable_event) }, 192 [WMI_TAG_TWT_DISABLE_COMPLETE_EVENT] = { 193 .min_len = sizeof(struct wmi_twt_disable_event) }, 194 [WMI_TAG_P2P_NOA_INFO] = { 195 .min_len = sizeof(struct ath12k_wmi_p2p_noa_info) }, 196 [WMI_TAG_P2P_NOA_EVENT] = { 197 .min_len = sizeof(struct wmi_p2p_noa_event) }, 198 [WMI_TAG_11D_NEW_COUNTRY_EVENT] = { 199 .min_len = sizeof(struct wmi_11d_new_cc_event) }, 200 [WMI_TAG_PER_CHAIN_RSSI_STATS] = { 201 .min_len = sizeof(struct wmi_per_chain_rssi_stat_params) }, 202 [WMI_TAG_OBSS_COLOR_COLLISION_EVT] = { 203 .min_len = sizeof(struct wmi_obss_color_collision_event) }, 204 }; 205 206 __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len) 207 { 208 return le32_encode_bits(cmd, WMI_TLV_TAG) | 209 le32_encode_bits(len, WMI_TLV_LEN); 210 } 211 212 static __le32 ath12k_wmi_tlv_cmd_hdr(u32 cmd, u32 len) 213 { 214 return ath12k_wmi_tlv_hdr(cmd, len - TLV_HDR_SIZE); 215 } 216 217 #define PRIMAP(_hw_mode_) \ 218 [_hw_mode_] = _hw_mode_##_PRI 219 220 static const int ath12k_hw_mode_pri_map[] = { 221 PRIMAP(WMI_HOST_HW_MODE_SINGLE), 222 PRIMAP(WMI_HOST_HW_MODE_DBS), 223 PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE), 224 PRIMAP(WMI_HOST_HW_MODE_SBS), 225 PRIMAP(WMI_HOST_HW_MODE_DBS_SBS), 226 PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS), 227 /* keep last */ 228 PRIMAP(WMI_HOST_HW_MODE_MAX), 229 }; 230 231 static int 232 ath12k_wmi_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len, 233 int (*iter)(struct ath12k_base *ab, u16 tag, u16 len, 234 const void *ptr, void *data), 235 void *data) 236 { 237 const void *begin = ptr; 238 const struct wmi_tlv *tlv; 239 u16 tlv_tag, tlv_len; 240 int ret; 241 242 while (len > 0) { 243 if (len < sizeof(*tlv)) { 244 ath12k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", 245 ptr - begin, len, sizeof(*tlv)); 246 return -EINVAL; 247 } 248 249 tlv = ptr; 250 tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG); 251 tlv_len = le32_get_bits(tlv->header, WMI_TLV_LEN); 252 ptr += sizeof(*tlv); 253 len -= sizeof(*tlv); 254 255 if (tlv_len > len) { 256 ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n", 257 tlv_tag, ptr - begin, len, tlv_len); 258 return -EINVAL; 259 } 260 261 if (tlv_tag < ARRAY_SIZE(ath12k_wmi_tlv_policies) && 262 ath12k_wmi_tlv_policies[tlv_tag].min_len && 263 ath12k_wmi_tlv_policies[tlv_tag].min_len > tlv_len) { 264 ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n", 265 tlv_tag, ptr - begin, tlv_len, 266 ath12k_wmi_tlv_policies[tlv_tag].min_len); 267 return -EINVAL; 268 } 269 270 ret = iter(ab, tlv_tag, tlv_len, ptr, data); 271 if (ret) 272 return ret; 273 274 ptr += tlv_len; 275 len -= tlv_len; 276 } 277 278 return 0; 279 } 280 281 static int ath12k_wmi_tlv_iter_parse(struct ath12k_base *ab, u16 tag, u16 len, 282 const void *ptr, void *data) 283 { 284 const void **tb = data; 285 286 if (tag < WMI_TAG_MAX) 287 tb[tag] = ptr; 288 289 return 0; 290 } 291 292 static int ath12k_wmi_tlv_parse(struct ath12k_base *ar, const void **tb, 293 const void *ptr, size_t len) 294 { 295 return ath12k_wmi_tlv_iter(ar, ptr, len, ath12k_wmi_tlv_iter_parse, 296 (void *)tb); 297 } 298 299 static const void ** 300 ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab, 301 struct sk_buff *skb, gfp_t gfp) 302 { 303 const void **tb; 304 int ret; 305 306 tb = kcalloc(WMI_TAG_MAX, sizeof(*tb), gfp); 307 if (!tb) 308 return ERR_PTR(-ENOMEM); 309 310 ret = ath12k_wmi_tlv_parse(ab, tb, skb->data, skb->len); 311 if (ret) { 312 kfree(tb); 313 return ERR_PTR(ret); 314 } 315 316 return tb; 317 } 318 319 static int ath12k_wmi_cmd_send_nowait(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb, 320 u32 cmd_id) 321 { 322 struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb); 323 struct ath12k_base *ab = wmi->wmi_ab->ab; 324 struct wmi_cmd_hdr *cmd_hdr; 325 int ret; 326 327 if (!skb_push(skb, sizeof(struct wmi_cmd_hdr))) 328 return -ENOMEM; 329 330 cmd_hdr = (struct wmi_cmd_hdr *)skb->data; 331 cmd_hdr->cmd_id = le32_encode_bits(cmd_id, WMI_CMD_HDR_CMD_ID); 332 333 memset(skb_cb, 0, sizeof(*skb_cb)); 334 ret = ath12k_htc_send(&ab->htc, wmi->eid, skb); 335 336 if (ret) 337 goto err_pull; 338 339 return 0; 340 341 err_pull: 342 skb_pull(skb, sizeof(struct wmi_cmd_hdr)); 343 return ret; 344 } 345 346 int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb, 347 u32 cmd_id) 348 { 349 struct ath12k_wmi_base *wmi_ab = wmi->wmi_ab; 350 int ret = -EOPNOTSUPP; 351 352 might_sleep(); 353 354 wait_event_timeout(wmi_ab->tx_credits_wq, ({ 355 ret = ath12k_wmi_cmd_send_nowait(wmi, skb, cmd_id); 356 357 if (ret && test_bit(ATH12K_FLAG_CRASH_FLUSH, &wmi_ab->ab->dev_flags)) 358 ret = -ESHUTDOWN; 359 360 (ret != -EAGAIN); 361 }), WMI_SEND_TIMEOUT_HZ); 362 363 if (ret == -EAGAIN) 364 ath12k_warn(wmi_ab->ab, "wmi command %d timeout\n", cmd_id); 365 366 return ret; 367 } 368 369 static int ath12k_pull_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle, 370 const void *ptr, 371 struct ath12k_wmi_service_ext_arg *arg) 372 { 373 const struct wmi_service_ready_ext_event *ev = ptr; 374 int i; 375 376 if (!ev) 377 return -EINVAL; 378 379 /* Move this to host based bitmap */ 380 arg->default_conc_scan_config_bits = 381 le32_to_cpu(ev->default_conc_scan_config_bits); 382 arg->default_fw_config_bits = le32_to_cpu(ev->default_fw_config_bits); 383 arg->he_cap_info = le32_to_cpu(ev->he_cap_info); 384 arg->mpdu_density = le32_to_cpu(ev->mpdu_density); 385 arg->max_bssid_rx_filters = le32_to_cpu(ev->max_bssid_rx_filters); 386 arg->ppet.numss_m1 = le32_to_cpu(ev->ppet.numss_m1); 387 arg->ppet.ru_bit_mask = le32_to_cpu(ev->ppet.ru_info); 388 389 for (i = 0; i < WMI_MAX_NUM_SS; i++) 390 arg->ppet.ppet16_ppet8_ru3_ru0[i] = 391 le32_to_cpu(ev->ppet.ppet16_ppet8_ru3_ru0[i]); 392 393 return 0; 394 } 395 396 static int 397 ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle, 398 struct ath12k_wmi_svc_rdy_ext_parse *svc, 399 u8 hw_mode_id, u8 phy_id, 400 struct ath12k_pdev *pdev) 401 { 402 const struct ath12k_wmi_mac_phy_caps_params *mac_caps; 403 const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps = svc->hw_caps; 404 const struct ath12k_wmi_hw_mode_cap_params *wmi_hw_mode_caps = svc->hw_mode_caps; 405 const struct ath12k_wmi_mac_phy_caps_params *wmi_mac_phy_caps = svc->mac_phy_caps; 406 struct ath12k_base *ab = wmi_handle->wmi_ab->ab; 407 struct ath12k_band_cap *cap_band; 408 struct ath12k_pdev_cap *pdev_cap = &pdev->cap; 409 struct ath12k_fw_pdev *fw_pdev; 410 u32 supported_bands; 411 u32 phy_map; 412 u32 hw_idx, phy_idx = 0; 413 int i; 414 415 if (!hw_caps || !wmi_hw_mode_caps || !svc->soc_hal_reg_caps) 416 return -EINVAL; 417 418 for (hw_idx = 0; hw_idx < le32_to_cpu(hw_caps->num_hw_modes); hw_idx++) { 419 if (hw_mode_id == le32_to_cpu(wmi_hw_mode_caps[hw_idx].hw_mode_id)) 420 break; 421 422 phy_map = le32_to_cpu(wmi_hw_mode_caps[hw_idx].phy_id_map); 423 phy_idx = fls(phy_map); 424 } 425 426 if (hw_idx == le32_to_cpu(hw_caps->num_hw_modes)) 427 return -EINVAL; 428 429 phy_idx += phy_id; 430 if (phy_id >= le32_to_cpu(svc->soc_hal_reg_caps->num_phy)) 431 return -EINVAL; 432 433 mac_caps = wmi_mac_phy_caps + phy_idx; 434 supported_bands = le32_to_cpu(mac_caps->supported_bands); 435 436 if (!(supported_bands & WMI_HOST_WLAN_2GHZ_CAP) && 437 !(supported_bands & WMI_HOST_WLAN_5GHZ_CAP)) 438 return -EINVAL; 439 440 pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps); 441 pdev->hw_link_id = ath12k_wmi_mac_phy_get_hw_link_id(mac_caps); 442 pdev_cap->supported_bands |= supported_bands; 443 pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density); 444 445 fw_pdev = &ab->fw_pdev[ab->fw_pdev_count]; 446 fw_pdev->supported_bands = supported_bands; 447 fw_pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps); 448 fw_pdev->phy_id = le32_to_cpu(mac_caps->phy_id); 449 ab->fw_pdev_count++; 450 451 /* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from 452 * band to band for a single radio, need to see how this should be 453 * handled. 454 */ 455 if (supported_bands & WMI_HOST_WLAN_2GHZ_CAP) { 456 pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_2g); 457 pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_2g); 458 } 459 460 if (supported_bands & WMI_HOST_WLAN_5GHZ_CAP) { 461 pdev_cap->vht_cap = le32_to_cpu(mac_caps->vht_cap_info_5g); 462 pdev_cap->vht_mcs = le32_to_cpu(mac_caps->vht_supp_mcs_5g); 463 pdev_cap->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g); 464 pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_5g); 465 pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_5g); 466 pdev_cap->nss_ratio_enabled = 467 WMI_NSS_RATIO_EN_DIS_GET(mac_caps->nss_ratio); 468 pdev_cap->nss_ratio_info = 469 WMI_NSS_RATIO_INFO_GET(mac_caps->nss_ratio); 470 } 471 472 /* tx/rx chainmask reported from fw depends on the actual hw chains used, 473 * For example, for 4x4 capable macphys, first 4 chains can be used for first 474 * mac and the remaining 4 chains can be used for the second mac or vice-versa. 475 * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0 476 * will be advertised for second mac or vice-versa. Compute the shift value 477 * for tx/rx chainmask which will be used to advertise supported ht/vht rates to 478 * mac80211. 479 */ 480 pdev_cap->tx_chain_mask_shift = 481 find_first_bit((unsigned long *)&pdev_cap->tx_chain_mask, 32); 482 pdev_cap->rx_chain_mask_shift = 483 find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32); 484 485 if (supported_bands & WMI_HOST_WLAN_2GHZ_CAP) { 486 cap_band = &pdev_cap->band[NL80211_BAND_2GHZ]; 487 cap_band->phy_id = le32_to_cpu(mac_caps->phy_id); 488 cap_band->max_bw_supported = le32_to_cpu(mac_caps->max_bw_supported_2g); 489 cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_2g); 490 cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_2g); 491 cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_2g_ext); 492 cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_2g); 493 for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++) 494 cap_band->he_cap_phy_info[i] = 495 le32_to_cpu(mac_caps->he_cap_phy_info_2g[i]); 496 497 cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet2g.numss_m1); 498 cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet2g.ru_info); 499 500 for (i = 0; i < WMI_MAX_NUM_SS; i++) 501 cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] = 502 le32_to_cpu(mac_caps->he_ppet2g.ppet16_ppet8_ru3_ru0[i]); 503 } 504 505 if (supported_bands & WMI_HOST_WLAN_5GHZ_CAP) { 506 cap_band = &pdev_cap->band[NL80211_BAND_5GHZ]; 507 cap_band->phy_id = le32_to_cpu(mac_caps->phy_id); 508 cap_band->max_bw_supported = 509 le32_to_cpu(mac_caps->max_bw_supported_5g); 510 cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g); 511 cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g); 512 cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext); 513 cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g); 514 for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++) 515 cap_band->he_cap_phy_info[i] = 516 le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]); 517 518 cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1); 519 cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info); 520 521 for (i = 0; i < WMI_MAX_NUM_SS; i++) 522 cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] = 523 le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]); 524 525 cap_band = &pdev_cap->band[NL80211_BAND_6GHZ]; 526 cap_band->max_bw_supported = 527 le32_to_cpu(mac_caps->max_bw_supported_5g); 528 cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g); 529 cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g); 530 cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext); 531 cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g); 532 for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++) 533 cap_band->he_cap_phy_info[i] = 534 le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]); 535 536 cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1); 537 cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info); 538 539 for (i = 0; i < WMI_MAX_NUM_SS; i++) 540 cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] = 541 le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]); 542 } 543 544 return 0; 545 } 546 547 static int 548 ath12k_pull_reg_cap_svc_rdy_ext(struct ath12k_wmi_pdev *wmi_handle, 549 const struct ath12k_wmi_soc_hal_reg_caps_params *reg_caps, 550 const struct ath12k_wmi_hal_reg_caps_ext_params *ext_caps, 551 u8 phy_idx, 552 struct ath12k_wmi_hal_reg_capabilities_ext_arg *param) 553 { 554 const struct ath12k_wmi_hal_reg_caps_ext_params *ext_reg_cap; 555 556 if (!reg_caps || !ext_caps) 557 return -EINVAL; 558 559 if (phy_idx >= le32_to_cpu(reg_caps->num_phy)) 560 return -EINVAL; 561 562 ext_reg_cap = &ext_caps[phy_idx]; 563 564 param->phy_id = le32_to_cpu(ext_reg_cap->phy_id); 565 param->eeprom_reg_domain = le32_to_cpu(ext_reg_cap->eeprom_reg_domain); 566 param->eeprom_reg_domain_ext = 567 le32_to_cpu(ext_reg_cap->eeprom_reg_domain_ext); 568 param->regcap1 = le32_to_cpu(ext_reg_cap->regcap1); 569 param->regcap2 = le32_to_cpu(ext_reg_cap->regcap2); 570 /* check if param->wireless_mode is needed */ 571 param->low_2ghz_chan = le32_to_cpu(ext_reg_cap->low_2ghz_chan); 572 param->high_2ghz_chan = le32_to_cpu(ext_reg_cap->high_2ghz_chan); 573 param->low_5ghz_chan = le32_to_cpu(ext_reg_cap->low_5ghz_chan); 574 param->high_5ghz_chan = le32_to_cpu(ext_reg_cap->high_5ghz_chan); 575 576 return 0; 577 } 578 579 static int ath12k_pull_service_ready_tlv(struct ath12k_base *ab, 580 const void *evt_buf, 581 struct ath12k_wmi_target_cap_arg *cap) 582 { 583 const struct wmi_service_ready_event *ev = evt_buf; 584 585 if (!ev) { 586 ath12k_err(ab, "%s: failed by NULL param\n", 587 __func__); 588 return -EINVAL; 589 } 590 591 cap->phy_capability = le32_to_cpu(ev->phy_capability); 592 cap->max_frag_entry = le32_to_cpu(ev->max_frag_entry); 593 cap->num_rf_chains = le32_to_cpu(ev->num_rf_chains); 594 cap->ht_cap_info = le32_to_cpu(ev->ht_cap_info); 595 cap->vht_cap_info = le32_to_cpu(ev->vht_cap_info); 596 cap->vht_supp_mcs = le32_to_cpu(ev->vht_supp_mcs); 597 cap->hw_min_tx_power = le32_to_cpu(ev->hw_min_tx_power); 598 cap->hw_max_tx_power = le32_to_cpu(ev->hw_max_tx_power); 599 cap->sys_cap_info = le32_to_cpu(ev->sys_cap_info); 600 cap->min_pkt_size_enable = le32_to_cpu(ev->min_pkt_size_enable); 601 cap->max_bcn_ie_size = le32_to_cpu(ev->max_bcn_ie_size); 602 cap->max_num_scan_channels = le32_to_cpu(ev->max_num_scan_channels); 603 cap->max_supported_macs = le32_to_cpu(ev->max_supported_macs); 604 cap->wmi_fw_sub_feat_caps = le32_to_cpu(ev->wmi_fw_sub_feat_caps); 605 cap->txrx_chainmask = le32_to_cpu(ev->txrx_chainmask); 606 cap->default_dbs_hw_mode_index = le32_to_cpu(ev->default_dbs_hw_mode_index); 607 cap->num_msdu_desc = le32_to_cpu(ev->num_msdu_desc); 608 609 return 0; 610 } 611 612 /* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in 613 * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each 614 * 4-byte word. 615 */ 616 static void ath12k_wmi_service_bitmap_copy(struct ath12k_wmi_pdev *wmi, 617 const u32 *wmi_svc_bm) 618 { 619 int i, j; 620 621 for (i = 0, j = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) { 622 do { 623 if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32)) 624 set_bit(j, wmi->wmi_ab->svc_map); 625 } while (++j % WMI_SERVICE_BITS_IN_SIZE32); 626 } 627 } 628 629 static int ath12k_wmi_svc_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len, 630 const void *ptr, void *data) 631 { 632 struct ath12k_wmi_svc_ready_parse *svc_ready = data; 633 struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0]; 634 u16 expect_len; 635 636 switch (tag) { 637 case WMI_TAG_SERVICE_READY_EVENT: 638 if (ath12k_pull_service_ready_tlv(ab, ptr, &ab->target_caps)) 639 return -EINVAL; 640 break; 641 642 case WMI_TAG_ARRAY_UINT32: 643 if (!svc_ready->wmi_svc_bitmap_done) { 644 expect_len = WMI_SERVICE_BM_SIZE * sizeof(u32); 645 if (len < expect_len) { 646 ath12k_warn(ab, "invalid len %d for the tag 0x%x\n", 647 len, tag); 648 return -EINVAL; 649 } 650 651 ath12k_wmi_service_bitmap_copy(wmi_handle, ptr); 652 653 svc_ready->wmi_svc_bitmap_done = true; 654 } 655 break; 656 default: 657 break; 658 } 659 660 return 0; 661 } 662 663 static int ath12k_service_ready_event(struct ath12k_base *ab, struct sk_buff *skb) 664 { 665 struct ath12k_wmi_svc_ready_parse svc_ready = { }; 666 int ret; 667 668 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 669 ath12k_wmi_svc_rdy_parse, 670 &svc_ready); 671 if (ret) { 672 ath12k_warn(ab, "failed to parse tlv %d\n", ret); 673 return ret; 674 } 675 676 return 0; 677 } 678 679 static u32 ath12k_wmi_mgmt_get_freq(struct ath12k *ar, 680 struct ieee80211_tx_info *info) 681 { 682 struct ath12k_base *ab = ar->ab; 683 u32 freq = 0; 684 685 if (ab->hw_params->single_pdev_only && 686 ar->scan.is_roc && 687 (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) 688 freq = ar->scan.roc_freq; 689 690 return freq; 691 } 692 693 struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_ab, u32 len) 694 { 695 struct sk_buff *skb; 696 struct ath12k_base *ab = wmi_ab->ab; 697 u32 round_len = roundup(len, 4); 698 699 skb = ath12k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len); 700 if (!skb) 701 return NULL; 702 703 skb_reserve(skb, WMI_SKB_HEADROOM); 704 if (!IS_ALIGNED((unsigned long)skb->data, 4)) 705 ath12k_warn(ab, "unaligned WMI skb data\n"); 706 707 skb_put(skb, round_len); 708 memset(skb->data, 0, round_len); 709 710 return skb; 711 } 712 713 int ath12k_wmi_mgmt_send(struct ath12k_link_vif *arvif, u32 buf_id, 714 struct sk_buff *frame) 715 { 716 struct ath12k *ar = arvif->ar; 717 struct ath12k_wmi_pdev *wmi = ar->wmi; 718 struct wmi_mgmt_send_cmd *cmd; 719 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame); 720 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)frame->data; 721 struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); 722 int cmd_len = sizeof(struct ath12k_wmi_mgmt_send_tx_params); 723 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)hdr; 724 struct ath12k_wmi_mlo_mgmt_send_params *ml_params; 725 struct ath12k_base *ab = ar->ab; 726 struct wmi_tlv *frame_tlv, *tlv; 727 struct ath12k_skb_cb *skb_cb; 728 u32 buf_len, buf_len_aligned; 729 u32 vdev_id = arvif->vdev_id; 730 bool link_agnostic = false; 731 struct sk_buff *skb; 732 int ret, len; 733 void *ptr; 734 735 buf_len = min_t(int, frame->len, WMI_MGMT_SEND_DOWNLD_LEN); 736 737 buf_len_aligned = roundup(buf_len, sizeof(u32)); 738 739 len = sizeof(*cmd) + sizeof(*frame_tlv) + buf_len_aligned; 740 741 if (ieee80211_vif_is_mld(vif)) { 742 skb_cb = ATH12K_SKB_CB(frame); 743 if ((skb_cb->flags & ATH12K_SKB_MLO_STA) && 744 ab->hw_params->hw_ops->is_frame_link_agnostic && 745 ab->hw_params->hw_ops->is_frame_link_agnostic(arvif, mgmt)) { 746 len += cmd_len + TLV_HDR_SIZE + sizeof(*ml_params); 747 ath12k_generic_dbg(ATH12K_DBG_MGMT, 748 "Sending Mgmt Frame fc 0x%0x as link agnostic", 749 mgmt->frame_control); 750 link_agnostic = true; 751 } 752 } 753 754 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 755 if (!skb) 756 return -ENOMEM; 757 758 cmd = (struct wmi_mgmt_send_cmd *)skb->data; 759 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MGMT_TX_SEND_CMD, 760 sizeof(*cmd)); 761 cmd->vdev_id = cpu_to_le32(vdev_id); 762 cmd->desc_id = cpu_to_le32(buf_id); 763 cmd->chanfreq = cpu_to_le32(ath12k_wmi_mgmt_get_freq(ar, info)); 764 cmd->paddr_lo = cpu_to_le32(lower_32_bits(ATH12K_SKB_CB(frame)->paddr)); 765 cmd->paddr_hi = cpu_to_le32(upper_32_bits(ATH12K_SKB_CB(frame)->paddr)); 766 cmd->frame_len = cpu_to_le32(frame->len); 767 cmd->buf_len = cpu_to_le32(buf_len); 768 cmd->tx_params_valid = 0; 769 770 frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd)); 771 frame_tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, buf_len_aligned); 772 773 memcpy(frame_tlv->value, frame->data, buf_len); 774 775 if (!link_agnostic) 776 goto send; 777 778 ptr = skb->data + sizeof(*cmd) + sizeof(*frame_tlv) + buf_len_aligned; 779 780 tlv = ptr; 781 782 /* Tx params not used currently */ 783 tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TX_SEND_PARAMS, cmd_len); 784 ptr += cmd_len; 785 786 tlv = ptr; 787 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, sizeof(*ml_params)); 788 ptr += TLV_HDR_SIZE; 789 790 ml_params = ptr; 791 ml_params->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_TX_SEND_PARAMS, 792 sizeof(*ml_params)); 793 794 ml_params->hw_link_id = cpu_to_le32(WMI_MGMT_LINK_AGNOSTIC_ID); 795 796 send: 797 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID); 798 if (ret) { 799 ath12k_warn(ar->ab, 800 "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n"); 801 dev_kfree_skb(skb); 802 } 803 804 return ret; 805 } 806 807 int ath12k_wmi_send_stats_request_cmd(struct ath12k *ar, u32 stats_id, 808 u32 vdev_id, u32 pdev_id) 809 { 810 struct ath12k_wmi_pdev *wmi = ar->wmi; 811 struct wmi_request_stats_cmd *cmd; 812 struct sk_buff *skb; 813 int ret; 814 815 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 816 if (!skb) 817 return -ENOMEM; 818 819 cmd = (struct wmi_request_stats_cmd *)skb->data; 820 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REQUEST_STATS_CMD, 821 sizeof(*cmd)); 822 823 cmd->stats_id = cpu_to_le32(stats_id); 824 cmd->vdev_id = cpu_to_le32(vdev_id); 825 cmd->pdev_id = cpu_to_le32(pdev_id); 826 827 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_REQUEST_STATS_CMDID); 828 if (ret) { 829 ath12k_warn(ar->ab, "failed to send WMI_REQUEST_STATS cmd\n"); 830 dev_kfree_skb(skb); 831 } 832 833 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 834 "WMI request stats 0x%x vdev id %d pdev id %d\n", 835 stats_id, vdev_id, pdev_id); 836 837 return ret; 838 } 839 840 int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr, 841 struct ath12k_wmi_vdev_create_arg *args) 842 { 843 struct ath12k_wmi_pdev *wmi = ar->wmi; 844 struct wmi_vdev_create_cmd *cmd; 845 struct sk_buff *skb; 846 struct ath12k_wmi_vdev_txrx_streams_params *txrx_streams; 847 bool is_ml_vdev = is_valid_ether_addr(args->mld_addr); 848 struct wmi_vdev_create_mlo_params *ml_params; 849 struct wmi_tlv *tlv; 850 int ret, len; 851 void *ptr; 852 853 /* It can be optimized my sending tx/rx chain configuration 854 * only for supported bands instead of always sending it for 855 * both the bands. 856 */ 857 len = sizeof(*cmd) + TLV_HDR_SIZE + 858 (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams)) + 859 (is_ml_vdev ? TLV_HDR_SIZE + sizeof(*ml_params) : 0); 860 861 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 862 if (!skb) 863 return -ENOMEM; 864 865 cmd = (struct wmi_vdev_create_cmd *)skb->data; 866 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CREATE_CMD, 867 sizeof(*cmd)); 868 869 cmd->vdev_id = cpu_to_le32(args->if_id); 870 cmd->vdev_type = cpu_to_le32(args->type); 871 cmd->vdev_subtype = cpu_to_le32(args->subtype); 872 cmd->num_cfg_txrx_streams = cpu_to_le32(WMI_NUM_SUPPORTED_BAND_MAX); 873 cmd->pdev_id = cpu_to_le32(args->pdev_id); 874 cmd->mbssid_flags = cpu_to_le32(args->mbssid_flags); 875 cmd->mbssid_tx_vdev_id = cpu_to_le32(args->mbssid_tx_vdev_id); 876 cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id); 877 ether_addr_copy(cmd->vdev_macaddr.addr, macaddr); 878 879 if (args->if_stats_id != ATH12K_INVAL_VDEV_STATS_ID) 880 cmd->vdev_stats_id_valid = cpu_to_le32(BIT(0)); 881 882 ptr = skb->data + sizeof(*cmd); 883 len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams); 884 885 tlv = ptr; 886 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); 887 888 ptr += TLV_HDR_SIZE; 889 txrx_streams = ptr; 890 len = sizeof(*txrx_streams); 891 txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS, 892 len); 893 txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_2G); 894 txrx_streams->supported_tx_streams = 895 cpu_to_le32(args->chains[NL80211_BAND_2GHZ].tx); 896 txrx_streams->supported_rx_streams = 897 cpu_to_le32(args->chains[NL80211_BAND_2GHZ].rx); 898 899 txrx_streams++; 900 txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS, 901 len); 902 txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_5G); 903 txrx_streams->supported_tx_streams = 904 cpu_to_le32(args->chains[NL80211_BAND_5GHZ].tx); 905 txrx_streams->supported_rx_streams = 906 cpu_to_le32(args->chains[NL80211_BAND_5GHZ].rx); 907 908 ptr += WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams); 909 910 if (is_ml_vdev) { 911 tlv = ptr; 912 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 913 sizeof(*ml_params)); 914 ptr += TLV_HDR_SIZE; 915 ml_params = ptr; 916 917 ml_params->tlv_header = 918 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_CREATE_PARAMS, 919 sizeof(*ml_params)); 920 ether_addr_copy(ml_params->mld_macaddr.addr, args->mld_addr); 921 } 922 923 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 924 "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n", 925 args->if_id, args->type, args->subtype, 926 macaddr, args->pdev_id); 927 928 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_CREATE_CMDID); 929 if (ret) { 930 ath12k_warn(ar->ab, 931 "failed to submit WMI_VDEV_CREATE_CMDID\n"); 932 dev_kfree_skb(skb); 933 } 934 935 return ret; 936 } 937 938 int ath12k_wmi_vdev_delete(struct ath12k *ar, u8 vdev_id) 939 { 940 struct ath12k_wmi_pdev *wmi = ar->wmi; 941 struct wmi_vdev_delete_cmd *cmd; 942 struct sk_buff *skb; 943 int ret; 944 945 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 946 if (!skb) 947 return -ENOMEM; 948 949 cmd = (struct wmi_vdev_delete_cmd *)skb->data; 950 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DELETE_CMD, 951 sizeof(*cmd)); 952 cmd->vdev_id = cpu_to_le32(vdev_id); 953 954 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev delete id %d\n", vdev_id); 955 956 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DELETE_CMDID); 957 if (ret) { 958 ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DELETE_CMDID\n"); 959 dev_kfree_skb(skb); 960 } 961 962 return ret; 963 } 964 965 int ath12k_wmi_vdev_stop(struct ath12k *ar, u8 vdev_id) 966 { 967 struct ath12k_wmi_pdev *wmi = ar->wmi; 968 struct wmi_vdev_stop_cmd *cmd; 969 struct sk_buff *skb; 970 int ret; 971 972 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 973 if (!skb) 974 return -ENOMEM; 975 976 cmd = (struct wmi_vdev_stop_cmd *)skb->data; 977 978 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_STOP_CMD, 979 sizeof(*cmd)); 980 cmd->vdev_id = cpu_to_le32(vdev_id); 981 982 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev stop id 0x%x\n", vdev_id); 983 984 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_STOP_CMDID); 985 if (ret) { 986 ath12k_warn(ar->ab, "failed to submit WMI_VDEV_STOP cmd\n"); 987 dev_kfree_skb(skb); 988 } 989 990 return ret; 991 } 992 993 int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id) 994 { 995 struct ath12k_wmi_pdev *wmi = ar->wmi; 996 struct wmi_vdev_down_cmd *cmd; 997 struct sk_buff *skb; 998 int ret; 999 1000 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1001 if (!skb) 1002 return -ENOMEM; 1003 1004 cmd = (struct wmi_vdev_down_cmd *)skb->data; 1005 1006 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DOWN_CMD, 1007 sizeof(*cmd)); 1008 cmd->vdev_id = cpu_to_le32(vdev_id); 1009 1010 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev down id 0x%x\n", vdev_id); 1011 1012 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DOWN_CMDID); 1013 if (ret) { 1014 ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd\n"); 1015 dev_kfree_skb(skb); 1016 } 1017 1018 return ret; 1019 } 1020 1021 static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan, 1022 struct wmi_vdev_start_req_arg *arg) 1023 { 1024 u32 center_freq1 = arg->band_center_freq1; 1025 1026 memset(chan, 0, sizeof(*chan)); 1027 1028 chan->mhz = cpu_to_le32(arg->freq); 1029 chan->band_center_freq1 = cpu_to_le32(center_freq1); 1030 if (arg->mode == MODE_11BE_EHT320) { 1031 if (arg->freq > center_freq1) 1032 chan->band_center_freq1 = cpu_to_le32(center_freq1 + 80); 1033 else 1034 chan->band_center_freq1 = cpu_to_le32(center_freq1 - 80); 1035 1036 chan->band_center_freq2 = cpu_to_le32(center_freq1); 1037 1038 } else if (arg->mode == MODE_11BE_EHT160 || 1039 arg->mode == MODE_11AX_HE160) { 1040 if (arg->freq > center_freq1) 1041 chan->band_center_freq1 = cpu_to_le32(center_freq1 + 40); 1042 else 1043 chan->band_center_freq1 = cpu_to_le32(center_freq1 - 40); 1044 1045 chan->band_center_freq2 = cpu_to_le32(center_freq1); 1046 } else { 1047 chan->band_center_freq2 = 0; 1048 } 1049 1050 chan->info |= le32_encode_bits(arg->mode, WMI_CHAN_INFO_MODE); 1051 if (arg->passive) 1052 chan->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE); 1053 if (arg->allow_ibss) 1054 chan->info |= cpu_to_le32(WMI_CHAN_INFO_ADHOC_ALLOWED); 1055 if (arg->allow_ht) 1056 chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT); 1057 if (arg->allow_vht) 1058 chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT); 1059 if (arg->allow_he) 1060 chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE); 1061 if (arg->ht40plus) 1062 chan->info |= cpu_to_le32(WMI_CHAN_INFO_HT40_PLUS); 1063 if (arg->chan_radar) 1064 chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS); 1065 if (arg->freq2_radar) 1066 chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS_FREQ2); 1067 1068 chan->reg_info_1 = le32_encode_bits(arg->max_power, 1069 WMI_CHAN_REG_INFO1_MAX_PWR) | 1070 le32_encode_bits(arg->max_reg_power, 1071 WMI_CHAN_REG_INFO1_MAX_REG_PWR); 1072 1073 chan->reg_info_2 = le32_encode_bits(arg->max_antenna_gain, 1074 WMI_CHAN_REG_INFO2_ANT_MAX) | 1075 le32_encode_bits(arg->max_power, WMI_CHAN_REG_INFO2_MAX_TX_PWR); 1076 } 1077 1078 int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg, 1079 bool restart) 1080 { 1081 struct wmi_vdev_start_mlo_params *ml_params; 1082 struct wmi_partner_link_info *partner_info; 1083 struct ath12k_wmi_pdev *wmi = ar->wmi; 1084 struct wmi_vdev_start_request_cmd *cmd; 1085 struct sk_buff *skb; 1086 struct ath12k_wmi_channel_params *chan; 1087 struct wmi_tlv *tlv; 1088 void *ptr; 1089 int ret, len, i, ml_arg_size = 0; 1090 1091 if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid))) 1092 return -EINVAL; 1093 1094 len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE; 1095 1096 if (!restart && arg->ml.enabled) { 1097 ml_arg_size = TLV_HDR_SIZE + sizeof(*ml_params) + 1098 TLV_HDR_SIZE + (arg->ml.num_partner_links * 1099 sizeof(*partner_info)); 1100 len += ml_arg_size; 1101 } 1102 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 1103 if (!skb) 1104 return -ENOMEM; 1105 1106 cmd = (struct wmi_vdev_start_request_cmd *)skb->data; 1107 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_START_REQUEST_CMD, 1108 sizeof(*cmd)); 1109 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 1110 cmd->beacon_interval = cpu_to_le32(arg->bcn_intval); 1111 cmd->bcn_tx_rate = cpu_to_le32(arg->bcn_tx_rate); 1112 cmd->dtim_period = cpu_to_le32(arg->dtim_period); 1113 cmd->num_noa_descriptors = cpu_to_le32(arg->num_noa_descriptors); 1114 cmd->preferred_rx_streams = cpu_to_le32(arg->pref_rx_streams); 1115 cmd->preferred_tx_streams = cpu_to_le32(arg->pref_tx_streams); 1116 cmd->cac_duration_ms = cpu_to_le32(arg->cac_duration_ms); 1117 cmd->regdomain = cpu_to_le32(arg->regdomain); 1118 cmd->he_ops = cpu_to_le32(arg->he_ops); 1119 cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap); 1120 cmd->mbssid_flags = cpu_to_le32(arg->mbssid_flags); 1121 cmd->mbssid_tx_vdev_id = cpu_to_le32(arg->mbssid_tx_vdev_id); 1122 1123 if (!restart) { 1124 if (arg->ssid) { 1125 cmd->ssid.ssid_len = cpu_to_le32(arg->ssid_len); 1126 memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len); 1127 } 1128 if (arg->hidden_ssid) 1129 cmd->flags |= cpu_to_le32(WMI_VDEV_START_HIDDEN_SSID); 1130 if (arg->pmf_enabled) 1131 cmd->flags |= cpu_to_le32(WMI_VDEV_START_PMF_ENABLED); 1132 } 1133 1134 cmd->flags |= cpu_to_le32(WMI_VDEV_START_LDPC_RX_ENABLED); 1135 1136 ptr = skb->data + sizeof(*cmd); 1137 chan = ptr; 1138 1139 ath12k_wmi_put_wmi_channel(chan, arg); 1140 1141 chan->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL, 1142 sizeof(*chan)); 1143 ptr += sizeof(*chan); 1144 1145 tlv = ptr; 1146 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0); 1147 1148 /* Note: This is a nested TLV containing: 1149 * [wmi_tlv][ath12k_wmi_p2p_noa_descriptor][wmi_tlv].. 1150 */ 1151 1152 ptr += sizeof(*tlv); 1153 1154 if (ml_arg_size) { 1155 tlv = ptr; 1156 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 1157 sizeof(*ml_params)); 1158 ptr += TLV_HDR_SIZE; 1159 1160 ml_params = ptr; 1161 1162 ml_params->tlv_header = 1163 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_START_PARAMS, 1164 sizeof(*ml_params)); 1165 1166 ml_params->flags = le32_encode_bits(arg->ml.enabled, 1167 ATH12K_WMI_FLAG_MLO_ENABLED) | 1168 le32_encode_bits(arg->ml.assoc_link, 1169 ATH12K_WMI_FLAG_MLO_ASSOC_LINK) | 1170 le32_encode_bits(arg->ml.mcast_link, 1171 ATH12K_WMI_FLAG_MLO_MCAST_VDEV) | 1172 le32_encode_bits(arg->ml.link_add, 1173 ATH12K_WMI_FLAG_MLO_LINK_ADD); 1174 1175 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %d start ml flags 0x%x\n", 1176 arg->vdev_id, ml_params->flags); 1177 1178 ptr += sizeof(*ml_params); 1179 1180 tlv = ptr; 1181 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 1182 arg->ml.num_partner_links * 1183 sizeof(*partner_info)); 1184 ptr += TLV_HDR_SIZE; 1185 1186 partner_info = ptr; 1187 1188 for (i = 0; i < arg->ml.num_partner_links; i++) { 1189 partner_info->tlv_header = 1190 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PARTNER_LINK_PARAMS, 1191 sizeof(*partner_info)); 1192 partner_info->vdev_id = 1193 cpu_to_le32(arg->ml.partner_info[i].vdev_id); 1194 partner_info->hw_link_id = 1195 cpu_to_le32(arg->ml.partner_info[i].hw_link_id); 1196 ether_addr_copy(partner_info->vdev_addr.addr, 1197 arg->ml.partner_info[i].addr); 1198 1199 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "partner vdev %d hw_link_id %d macaddr%pM\n", 1200 partner_info->vdev_id, partner_info->hw_link_id, 1201 partner_info->vdev_addr.addr); 1202 1203 partner_info++; 1204 } 1205 1206 ptr = partner_info; 1207 } 1208 1209 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n", 1210 restart ? "restart" : "start", arg->vdev_id, 1211 arg->freq, arg->mode); 1212 1213 if (restart) 1214 ret = ath12k_wmi_cmd_send(wmi, skb, 1215 WMI_VDEV_RESTART_REQUEST_CMDID); 1216 else 1217 ret = ath12k_wmi_cmd_send(wmi, skb, 1218 WMI_VDEV_START_REQUEST_CMDID); 1219 if (ret) { 1220 ath12k_warn(ar->ab, "failed to submit vdev_%s cmd\n", 1221 restart ? "restart" : "start"); 1222 dev_kfree_skb(skb); 1223 } 1224 1225 return ret; 1226 } 1227 1228 int ath12k_wmi_vdev_up(struct ath12k *ar, struct ath12k_wmi_vdev_up_params *params) 1229 { 1230 struct ath12k_wmi_pdev *wmi = ar->wmi; 1231 struct wmi_vdev_up_cmd *cmd; 1232 struct sk_buff *skb; 1233 int ret; 1234 1235 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1236 if (!skb) 1237 return -ENOMEM; 1238 1239 cmd = (struct wmi_vdev_up_cmd *)skb->data; 1240 1241 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_UP_CMD, 1242 sizeof(*cmd)); 1243 cmd->vdev_id = cpu_to_le32(params->vdev_id); 1244 cmd->vdev_assoc_id = cpu_to_le32(params->aid); 1245 1246 ether_addr_copy(cmd->vdev_bssid.addr, params->bssid); 1247 1248 if (params->tx_bssid) { 1249 ether_addr_copy(cmd->tx_vdev_bssid.addr, params->tx_bssid); 1250 cmd->nontx_profile_idx = cpu_to_le32(params->nontx_profile_idx); 1251 cmd->nontx_profile_cnt = cpu_to_le32(params->nontx_profile_cnt); 1252 } 1253 1254 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1255 "WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n", 1256 params->vdev_id, params->aid, params->bssid); 1257 1258 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID); 1259 if (ret) { 1260 ath12k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n"); 1261 dev_kfree_skb(skb); 1262 } 1263 1264 return ret; 1265 } 1266 1267 int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar, 1268 struct ath12k_wmi_peer_create_arg *arg) 1269 { 1270 struct ath12k_wmi_pdev *wmi = ar->wmi; 1271 struct wmi_peer_create_cmd *cmd; 1272 struct sk_buff *skb; 1273 int ret, len; 1274 struct wmi_peer_create_mlo_params *ml_param; 1275 void *ptr; 1276 struct wmi_tlv *tlv; 1277 1278 len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*ml_param); 1279 1280 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 1281 if (!skb) 1282 return -ENOMEM; 1283 1284 cmd = (struct wmi_peer_create_cmd *)skb->data; 1285 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_CREATE_CMD, 1286 sizeof(*cmd)); 1287 1288 ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_addr); 1289 cmd->peer_type = cpu_to_le32(arg->peer_type); 1290 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 1291 1292 ptr = skb->data + sizeof(*cmd); 1293 tlv = ptr; 1294 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 1295 sizeof(*ml_param)); 1296 ptr += TLV_HDR_SIZE; 1297 ml_param = ptr; 1298 ml_param->tlv_header = 1299 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_CREATE_PARAMS, 1300 sizeof(*ml_param)); 1301 if (arg->ml_enabled) 1302 ml_param->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED); 1303 1304 ptr += sizeof(*ml_param); 1305 1306 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1307 "WMI peer create vdev_id %d peer_addr %pM ml_flags 0x%x\n", 1308 arg->vdev_id, arg->peer_addr, ml_param->flags); 1309 1310 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID); 1311 if (ret) { 1312 ath12k_warn(ar->ab, "failed to submit WMI_PEER_CREATE cmd\n"); 1313 dev_kfree_skb(skb); 1314 } 1315 1316 return ret; 1317 } 1318 1319 int ath12k_wmi_send_peer_delete_cmd(struct ath12k *ar, 1320 const u8 *peer_addr, u8 vdev_id) 1321 { 1322 struct ath12k_wmi_pdev *wmi = ar->wmi; 1323 struct wmi_peer_delete_cmd *cmd; 1324 struct sk_buff *skb; 1325 int ret; 1326 1327 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1328 if (!skb) 1329 return -ENOMEM; 1330 1331 cmd = (struct wmi_peer_delete_cmd *)skb->data; 1332 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_DELETE_CMD, 1333 sizeof(*cmd)); 1334 1335 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 1336 cmd->vdev_id = cpu_to_le32(vdev_id); 1337 1338 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1339 "WMI peer delete vdev_id %d peer_addr %pM\n", 1340 vdev_id, peer_addr); 1341 1342 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID); 1343 if (ret) { 1344 ath12k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n"); 1345 dev_kfree_skb(skb); 1346 } 1347 1348 return ret; 1349 } 1350 1351 int ath12k_wmi_send_pdev_set_regdomain(struct ath12k *ar, 1352 struct ath12k_wmi_pdev_set_regdomain_arg *arg) 1353 { 1354 struct ath12k_wmi_pdev *wmi = ar->wmi; 1355 struct wmi_pdev_set_regdomain_cmd *cmd; 1356 struct sk_buff *skb; 1357 int ret; 1358 1359 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1360 if (!skb) 1361 return -ENOMEM; 1362 1363 cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data; 1364 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_REGDOMAIN_CMD, 1365 sizeof(*cmd)); 1366 1367 cmd->reg_domain = cpu_to_le32(arg->current_rd_in_use); 1368 cmd->reg_domain_2g = cpu_to_le32(arg->current_rd_2g); 1369 cmd->reg_domain_5g = cpu_to_le32(arg->current_rd_5g); 1370 cmd->conformance_test_limit_2g = cpu_to_le32(arg->ctl_2g); 1371 cmd->conformance_test_limit_5g = cpu_to_le32(arg->ctl_5g); 1372 cmd->dfs_domain = cpu_to_le32(arg->dfs_domain); 1373 cmd->pdev_id = cpu_to_le32(arg->pdev_id); 1374 1375 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1376 "WMI pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n", 1377 arg->current_rd_in_use, arg->current_rd_2g, 1378 arg->current_rd_5g, arg->dfs_domain, arg->pdev_id); 1379 1380 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_REGDOMAIN_CMDID); 1381 if (ret) { 1382 ath12k_warn(ar->ab, 1383 "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n"); 1384 dev_kfree_skb(skb); 1385 } 1386 1387 return ret; 1388 } 1389 1390 int ath12k_wmi_set_peer_param(struct ath12k *ar, const u8 *peer_addr, 1391 u32 vdev_id, u32 param_id, u32 param_val) 1392 { 1393 struct ath12k_wmi_pdev *wmi = ar->wmi; 1394 struct wmi_peer_set_param_cmd *cmd; 1395 struct sk_buff *skb; 1396 int ret; 1397 1398 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1399 if (!skb) 1400 return -ENOMEM; 1401 1402 cmd = (struct wmi_peer_set_param_cmd *)skb->data; 1403 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_SET_PARAM_CMD, 1404 sizeof(*cmd)); 1405 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 1406 cmd->vdev_id = cpu_to_le32(vdev_id); 1407 cmd->param_id = cpu_to_le32(param_id); 1408 cmd->param_value = cpu_to_le32(param_val); 1409 1410 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1411 "WMI vdev %d peer 0x%pM set param %d value %d\n", 1412 vdev_id, peer_addr, param_id, param_val); 1413 1414 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_SET_PARAM_CMDID); 1415 if (ret) { 1416 ath12k_warn(ar->ab, "failed to send WMI_PEER_SET_PARAM cmd\n"); 1417 dev_kfree_skb(skb); 1418 } 1419 1420 return ret; 1421 } 1422 1423 int ath12k_wmi_send_peer_flush_tids_cmd(struct ath12k *ar, 1424 u8 peer_addr[ETH_ALEN], 1425 u32 peer_tid_bitmap, 1426 u8 vdev_id) 1427 { 1428 struct ath12k_wmi_pdev *wmi = ar->wmi; 1429 struct wmi_peer_flush_tids_cmd *cmd; 1430 struct sk_buff *skb; 1431 int ret; 1432 1433 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1434 if (!skb) 1435 return -ENOMEM; 1436 1437 cmd = (struct wmi_peer_flush_tids_cmd *)skb->data; 1438 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_FLUSH_TIDS_CMD, 1439 sizeof(*cmd)); 1440 1441 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 1442 cmd->peer_tid_bitmap = cpu_to_le32(peer_tid_bitmap); 1443 cmd->vdev_id = cpu_to_le32(vdev_id); 1444 1445 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1446 "WMI peer flush vdev_id %d peer_addr %pM tids %08x\n", 1447 vdev_id, peer_addr, peer_tid_bitmap); 1448 1449 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_FLUSH_TIDS_CMDID); 1450 if (ret) { 1451 ath12k_warn(ar->ab, 1452 "failed to send WMI_PEER_FLUSH_TIDS cmd\n"); 1453 dev_kfree_skb(skb); 1454 } 1455 1456 return ret; 1457 } 1458 1459 int ath12k_wmi_peer_rx_reorder_queue_setup(struct ath12k *ar, 1460 int vdev_id, const u8 *addr, 1461 dma_addr_t paddr, u8 tid, 1462 u8 ba_window_size_valid, 1463 u32 ba_window_size) 1464 { 1465 struct wmi_peer_reorder_queue_setup_cmd *cmd; 1466 struct sk_buff *skb; 1467 int ret; 1468 1469 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); 1470 if (!skb) 1471 return -ENOMEM; 1472 1473 cmd = (struct wmi_peer_reorder_queue_setup_cmd *)skb->data; 1474 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_SETUP_CMD, 1475 sizeof(*cmd)); 1476 1477 ether_addr_copy(cmd->peer_macaddr.addr, addr); 1478 cmd->vdev_id = cpu_to_le32(vdev_id); 1479 cmd->tid = cpu_to_le32(tid); 1480 cmd->queue_ptr_lo = cpu_to_le32(lower_32_bits(paddr)); 1481 cmd->queue_ptr_hi = cpu_to_le32(upper_32_bits(paddr)); 1482 cmd->queue_no = cpu_to_le32(tid); 1483 cmd->ba_window_size_valid = cpu_to_le32(ba_window_size_valid); 1484 cmd->ba_window_size = cpu_to_le32(ba_window_size); 1485 1486 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1487 "wmi rx reorder queue setup addr %pM vdev_id %d tid %d\n", 1488 addr, vdev_id, tid); 1489 1490 ret = ath12k_wmi_cmd_send(ar->wmi, skb, 1491 WMI_PEER_REORDER_QUEUE_SETUP_CMDID); 1492 if (ret) { 1493 ath12k_warn(ar->ab, 1494 "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n"); 1495 dev_kfree_skb(skb); 1496 } 1497 1498 return ret; 1499 } 1500 1501 int 1502 ath12k_wmi_rx_reord_queue_remove(struct ath12k *ar, 1503 struct ath12k_wmi_rx_reorder_queue_remove_arg *arg) 1504 { 1505 struct ath12k_wmi_pdev *wmi = ar->wmi; 1506 struct wmi_peer_reorder_queue_remove_cmd *cmd; 1507 struct sk_buff *skb; 1508 int ret; 1509 1510 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1511 if (!skb) 1512 return -ENOMEM; 1513 1514 cmd = (struct wmi_peer_reorder_queue_remove_cmd *)skb->data; 1515 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_REMOVE_CMD, 1516 sizeof(*cmd)); 1517 1518 ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_macaddr); 1519 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 1520 cmd->tid_mask = cpu_to_le32(arg->peer_tid_bitmap); 1521 1522 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1523 "%s: peer_macaddr %pM vdev_id %d, tid_map %d", __func__, 1524 arg->peer_macaddr, arg->vdev_id, arg->peer_tid_bitmap); 1525 1526 ret = ath12k_wmi_cmd_send(wmi, skb, 1527 WMI_PEER_REORDER_QUEUE_REMOVE_CMDID); 1528 if (ret) { 1529 ath12k_warn(ar->ab, 1530 "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID"); 1531 dev_kfree_skb(skb); 1532 } 1533 1534 return ret; 1535 } 1536 1537 int ath12k_wmi_pdev_set_param(struct ath12k *ar, u32 param_id, 1538 u32 param_value, u8 pdev_id) 1539 { 1540 struct ath12k_wmi_pdev *wmi = ar->wmi; 1541 struct wmi_pdev_set_param_cmd *cmd; 1542 struct sk_buff *skb; 1543 int ret; 1544 1545 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1546 if (!skb) 1547 return -ENOMEM; 1548 1549 cmd = (struct wmi_pdev_set_param_cmd *)skb->data; 1550 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_PARAM_CMD, 1551 sizeof(*cmd)); 1552 cmd->pdev_id = cpu_to_le32(pdev_id); 1553 cmd->param_id = cpu_to_le32(param_id); 1554 cmd->param_value = cpu_to_le32(param_value); 1555 1556 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1557 "WMI pdev set param %d pdev id %d value %d\n", 1558 param_id, pdev_id, param_value); 1559 1560 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_PARAM_CMDID); 1561 if (ret) { 1562 ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n"); 1563 dev_kfree_skb(skb); 1564 } 1565 1566 return ret; 1567 } 1568 1569 int ath12k_wmi_pdev_set_ps_mode(struct ath12k *ar, int vdev_id, u32 enable) 1570 { 1571 struct ath12k_wmi_pdev *wmi = ar->wmi; 1572 struct wmi_pdev_set_ps_mode_cmd *cmd; 1573 struct sk_buff *skb; 1574 int ret; 1575 1576 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1577 if (!skb) 1578 return -ENOMEM; 1579 1580 cmd = (struct wmi_pdev_set_ps_mode_cmd *)skb->data; 1581 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_MODE_CMD, 1582 sizeof(*cmd)); 1583 cmd->vdev_id = cpu_to_le32(vdev_id); 1584 cmd->sta_ps_mode = cpu_to_le32(enable); 1585 1586 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1587 "WMI vdev set psmode %d vdev id %d\n", 1588 enable, vdev_id); 1589 1590 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID); 1591 if (ret) { 1592 ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n"); 1593 dev_kfree_skb(skb); 1594 } 1595 1596 return ret; 1597 } 1598 1599 int ath12k_wmi_pdev_suspend(struct ath12k *ar, u32 suspend_opt, 1600 u32 pdev_id) 1601 { 1602 struct ath12k_wmi_pdev *wmi = ar->wmi; 1603 struct wmi_pdev_suspend_cmd *cmd; 1604 struct sk_buff *skb; 1605 int ret; 1606 1607 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1608 if (!skb) 1609 return -ENOMEM; 1610 1611 cmd = (struct wmi_pdev_suspend_cmd *)skb->data; 1612 1613 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SUSPEND_CMD, 1614 sizeof(*cmd)); 1615 1616 cmd->suspend_opt = cpu_to_le32(suspend_opt); 1617 cmd->pdev_id = cpu_to_le32(pdev_id); 1618 1619 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1620 "WMI pdev suspend pdev_id %d\n", pdev_id); 1621 1622 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SUSPEND_CMDID); 1623 if (ret) { 1624 ath12k_warn(ar->ab, "failed to send WMI_PDEV_SUSPEND cmd\n"); 1625 dev_kfree_skb(skb); 1626 } 1627 1628 return ret; 1629 } 1630 1631 int ath12k_wmi_pdev_resume(struct ath12k *ar, u32 pdev_id) 1632 { 1633 struct ath12k_wmi_pdev *wmi = ar->wmi; 1634 struct wmi_pdev_resume_cmd *cmd; 1635 struct sk_buff *skb; 1636 int ret; 1637 1638 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1639 if (!skb) 1640 return -ENOMEM; 1641 1642 cmd = (struct wmi_pdev_resume_cmd *)skb->data; 1643 1644 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_RESUME_CMD, 1645 sizeof(*cmd)); 1646 cmd->pdev_id = cpu_to_le32(pdev_id); 1647 1648 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1649 "WMI pdev resume pdev id %d\n", pdev_id); 1650 1651 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_RESUME_CMDID); 1652 if (ret) { 1653 ath12k_warn(ar->ab, "failed to send WMI_PDEV_RESUME cmd\n"); 1654 dev_kfree_skb(skb); 1655 } 1656 1657 return ret; 1658 } 1659 1660 /* TODO FW Support for the cmd is not available yet. 1661 * Can be tested once the command and corresponding 1662 * event is implemented in FW 1663 */ 1664 int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar, 1665 enum wmi_bss_chan_info_req_type type) 1666 { 1667 struct ath12k_wmi_pdev *wmi = ar->wmi; 1668 struct wmi_pdev_bss_chan_info_req_cmd *cmd; 1669 struct sk_buff *skb; 1670 int ret; 1671 1672 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1673 if (!skb) 1674 return -ENOMEM; 1675 1676 cmd = (struct wmi_pdev_bss_chan_info_req_cmd *)skb->data; 1677 1678 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST, 1679 sizeof(*cmd)); 1680 cmd->req_type = cpu_to_le32(type); 1681 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); 1682 1683 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1684 "WMI bss chan info req type %d\n", type); 1685 1686 ret = ath12k_wmi_cmd_send(wmi, skb, 1687 WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID); 1688 if (ret) { 1689 ath12k_warn(ar->ab, 1690 "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n"); 1691 dev_kfree_skb(skb); 1692 } 1693 1694 return ret; 1695 } 1696 1697 int ath12k_wmi_send_set_ap_ps_param_cmd(struct ath12k *ar, u8 *peer_addr, 1698 struct ath12k_wmi_ap_ps_arg *arg) 1699 { 1700 struct ath12k_wmi_pdev *wmi = ar->wmi; 1701 struct wmi_ap_ps_peer_cmd *cmd; 1702 struct sk_buff *skb; 1703 int ret; 1704 1705 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1706 if (!skb) 1707 return -ENOMEM; 1708 1709 cmd = (struct wmi_ap_ps_peer_cmd *)skb->data; 1710 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_AP_PS_PEER_CMD, 1711 sizeof(*cmd)); 1712 1713 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 1714 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 1715 cmd->param = cpu_to_le32(arg->param); 1716 cmd->value = cpu_to_le32(arg->value); 1717 1718 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1719 "WMI set ap ps vdev id %d peer %pM param %d value %d\n", 1720 arg->vdev_id, peer_addr, arg->param, arg->value); 1721 1722 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_AP_PS_PEER_PARAM_CMDID); 1723 if (ret) { 1724 ath12k_warn(ar->ab, 1725 "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n"); 1726 dev_kfree_skb(skb); 1727 } 1728 1729 return ret; 1730 } 1731 1732 int ath12k_wmi_set_sta_ps_param(struct ath12k *ar, u32 vdev_id, 1733 u32 param, u32 param_value) 1734 { 1735 struct ath12k_wmi_pdev *wmi = ar->wmi; 1736 struct wmi_sta_powersave_param_cmd *cmd; 1737 struct sk_buff *skb; 1738 int ret; 1739 1740 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1741 if (!skb) 1742 return -ENOMEM; 1743 1744 cmd = (struct wmi_sta_powersave_param_cmd *)skb->data; 1745 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_PARAM_CMD, 1746 sizeof(*cmd)); 1747 1748 cmd->vdev_id = cpu_to_le32(vdev_id); 1749 cmd->param = cpu_to_le32(param); 1750 cmd->value = cpu_to_le32(param_value); 1751 1752 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1753 "WMI set sta ps vdev_id %d param %d value %d\n", 1754 vdev_id, param, param_value); 1755 1756 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_PARAM_CMDID); 1757 if (ret) { 1758 ath12k_warn(ar->ab, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID"); 1759 dev_kfree_skb(skb); 1760 } 1761 1762 return ret; 1763 } 1764 1765 int ath12k_wmi_force_fw_hang_cmd(struct ath12k *ar, u32 type, u32 delay_time_ms) 1766 { 1767 struct ath12k_wmi_pdev *wmi = ar->wmi; 1768 struct wmi_force_fw_hang_cmd *cmd; 1769 struct sk_buff *skb; 1770 int ret, len; 1771 1772 len = sizeof(*cmd); 1773 1774 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 1775 if (!skb) 1776 return -ENOMEM; 1777 1778 cmd = (struct wmi_force_fw_hang_cmd *)skb->data; 1779 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FORCE_FW_HANG_CMD, 1780 len); 1781 1782 cmd->type = cpu_to_le32(type); 1783 cmd->delay_time_ms = cpu_to_le32(delay_time_ms); 1784 1785 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_FORCE_FW_HANG_CMDID); 1786 1787 if (ret) { 1788 ath12k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID"); 1789 dev_kfree_skb(skb); 1790 } 1791 return ret; 1792 } 1793 1794 int ath12k_wmi_vdev_set_param_cmd(struct ath12k *ar, u32 vdev_id, 1795 u32 param_id, u32 param_value) 1796 { 1797 struct ath12k_wmi_pdev *wmi = ar->wmi; 1798 struct wmi_vdev_set_param_cmd *cmd; 1799 struct sk_buff *skb; 1800 int ret; 1801 1802 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1803 if (!skb) 1804 return -ENOMEM; 1805 1806 cmd = (struct wmi_vdev_set_param_cmd *)skb->data; 1807 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_PARAM_CMD, 1808 sizeof(*cmd)); 1809 1810 cmd->vdev_id = cpu_to_le32(vdev_id); 1811 cmd->param_id = cpu_to_le32(param_id); 1812 cmd->param_value = cpu_to_le32(param_value); 1813 1814 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1815 "WMI vdev id 0x%x set param %d value %d\n", 1816 vdev_id, param_id, param_value); 1817 1818 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_PARAM_CMDID); 1819 if (ret) { 1820 ath12k_warn(ar->ab, 1821 "failed to send WMI_VDEV_SET_PARAM_CMDID\n"); 1822 dev_kfree_skb(skb); 1823 } 1824 1825 return ret; 1826 } 1827 1828 int ath12k_wmi_send_pdev_temperature_cmd(struct ath12k *ar) 1829 { 1830 struct ath12k_wmi_pdev *wmi = ar->wmi; 1831 struct wmi_get_pdev_temperature_cmd *cmd; 1832 struct sk_buff *skb; 1833 int ret; 1834 1835 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1836 if (!skb) 1837 return -ENOMEM; 1838 1839 cmd = (struct wmi_get_pdev_temperature_cmd *)skb->data; 1840 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_GET_TEMPERATURE_CMD, 1841 sizeof(*cmd)); 1842 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); 1843 1844 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1845 "WMI pdev get temperature for pdev_id %d\n", ar->pdev->pdev_id); 1846 1847 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_GET_TEMPERATURE_CMDID); 1848 if (ret) { 1849 ath12k_warn(ar->ab, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n"); 1850 dev_kfree_skb(skb); 1851 } 1852 1853 return ret; 1854 } 1855 1856 int ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k *ar, 1857 u32 vdev_id, u32 bcn_ctrl_op) 1858 { 1859 struct ath12k_wmi_pdev *wmi = ar->wmi; 1860 struct wmi_bcn_offload_ctrl_cmd *cmd; 1861 struct sk_buff *skb; 1862 int ret; 1863 1864 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1865 if (!skb) 1866 return -ENOMEM; 1867 1868 cmd = (struct wmi_bcn_offload_ctrl_cmd *)skb->data; 1869 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_OFFLOAD_CTRL_CMD, 1870 sizeof(*cmd)); 1871 1872 cmd->vdev_id = cpu_to_le32(vdev_id); 1873 cmd->bcn_ctrl_op = cpu_to_le32(bcn_ctrl_op); 1874 1875 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1876 "WMI bcn ctrl offload vdev id %d ctrl_op %d\n", 1877 vdev_id, bcn_ctrl_op); 1878 1879 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_OFFLOAD_CTRL_CMDID); 1880 if (ret) { 1881 ath12k_warn(ar->ab, 1882 "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n"); 1883 dev_kfree_skb(skb); 1884 } 1885 1886 return ret; 1887 } 1888 1889 int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id, 1890 const u8 *p2p_ie) 1891 { 1892 struct ath12k_wmi_pdev *wmi = ar->wmi; 1893 struct wmi_p2p_go_set_beacon_ie_cmd *cmd; 1894 size_t p2p_ie_len, aligned_len; 1895 struct wmi_tlv *tlv; 1896 struct sk_buff *skb; 1897 void *ptr; 1898 int ret, len; 1899 1900 p2p_ie_len = p2p_ie[1] + 2; 1901 aligned_len = roundup(p2p_ie_len, sizeof(u32)); 1902 1903 len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len; 1904 1905 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 1906 if (!skb) 1907 return -ENOMEM; 1908 1909 ptr = skb->data; 1910 cmd = ptr; 1911 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_P2P_GO_SET_BEACON_IE, 1912 sizeof(*cmd)); 1913 cmd->vdev_id = cpu_to_le32(vdev_id); 1914 cmd->ie_buf_len = cpu_to_le32(p2p_ie_len); 1915 1916 ptr += sizeof(*cmd); 1917 tlv = ptr; 1918 tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE, 1919 aligned_len); 1920 memcpy(tlv->value, p2p_ie, p2p_ie_len); 1921 1922 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_P2P_GO_SET_BEACON_IE); 1923 if (ret) { 1924 ath12k_warn(ar->ab, "failed to send WMI_P2P_GO_SET_BEACON_IE\n"); 1925 dev_kfree_skb(skb); 1926 } 1927 1928 return ret; 1929 } 1930 1931 int ath12k_wmi_bcn_tmpl(struct ath12k_link_vif *arvif, 1932 struct ieee80211_mutable_offsets *offs, 1933 struct sk_buff *bcn, 1934 struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args) 1935 { 1936 struct ath12k *ar = arvif->ar; 1937 struct ath12k_wmi_pdev *wmi = ar->wmi; 1938 struct ath12k_base *ab = ar->ab; 1939 struct wmi_bcn_tmpl_cmd *cmd; 1940 struct ath12k_wmi_bcn_prb_info_params *bcn_prb_info; 1941 struct ath12k_vif *ahvif = arvif->ahvif; 1942 struct ieee80211_bss_conf *conf; 1943 u32 vdev_id = arvif->vdev_id; 1944 struct wmi_tlv *tlv; 1945 struct sk_buff *skb; 1946 u32 ema_params = 0; 1947 void *ptr; 1948 int ret, len; 1949 size_t aligned_len = roundup(bcn->len, 4); 1950 1951 conf = ath12k_mac_get_link_bss_conf(arvif); 1952 if (!conf) { 1953 ath12k_warn(ab, 1954 "unable to access bss link conf in beacon template command for vif %pM link %u\n", 1955 ahvif->vif->addr, arvif->link_id); 1956 return -EINVAL; 1957 } 1958 1959 len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len; 1960 1961 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 1962 if (!skb) 1963 return -ENOMEM; 1964 1965 cmd = (struct wmi_bcn_tmpl_cmd *)skb->data; 1966 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_TMPL_CMD, 1967 sizeof(*cmd)); 1968 cmd->vdev_id = cpu_to_le32(vdev_id); 1969 cmd->tim_ie_offset = cpu_to_le32(offs->tim_offset); 1970 1971 if (conf->csa_active) { 1972 cmd->csa_switch_count_offset = 1973 cpu_to_le32(offs->cntdwn_counter_offs[0]); 1974 cmd->ext_csa_switch_count_offset = 1975 cpu_to_le32(offs->cntdwn_counter_offs[1]); 1976 cmd->csa_event_bitmap = cpu_to_le32(0xFFFFFFFF); 1977 arvif->current_cntdown_counter = bcn->data[offs->cntdwn_counter_offs[0]]; 1978 } 1979 1980 cmd->buf_len = cpu_to_le32(bcn->len); 1981 cmd->mbssid_ie_offset = cpu_to_le32(offs->mbssid_off); 1982 if (ema_args) { 1983 u32p_replace_bits(&ema_params, ema_args->bcn_cnt, WMI_EMA_BEACON_CNT); 1984 u32p_replace_bits(&ema_params, ema_args->bcn_index, WMI_EMA_BEACON_IDX); 1985 if (ema_args->bcn_index == 0) 1986 u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_FIRST); 1987 if (ema_args->bcn_index + 1 == ema_args->bcn_cnt) 1988 u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_LAST); 1989 cmd->ema_params = cpu_to_le32(ema_params); 1990 } 1991 cmd->feature_enable_bitmap = 1992 cpu_to_le32(u32_encode_bits(arvif->beacon_prot, 1993 WMI_BEACON_PROTECTION_EN_BIT)); 1994 1995 ptr = skb->data + sizeof(*cmd); 1996 1997 bcn_prb_info = ptr; 1998 len = sizeof(*bcn_prb_info); 1999 bcn_prb_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO, 2000 len); 2001 bcn_prb_info->caps = 0; 2002 bcn_prb_info->erp = 0; 2003 2004 ptr += sizeof(*bcn_prb_info); 2005 2006 tlv = ptr; 2007 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len); 2008 memcpy(tlv->value, bcn->data, bcn->len); 2009 2010 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID); 2011 if (ret) { 2012 ath12k_warn(ab, "failed to send WMI_BCN_TMPL_CMDID\n"); 2013 dev_kfree_skb(skb); 2014 } 2015 2016 return ret; 2017 } 2018 2019 int ath12k_wmi_vdev_install_key(struct ath12k *ar, 2020 struct wmi_vdev_install_key_arg *arg) 2021 { 2022 struct ath12k_wmi_pdev *wmi = ar->wmi; 2023 struct wmi_vdev_install_key_cmd *cmd; 2024 struct wmi_tlv *tlv; 2025 struct sk_buff *skb; 2026 int ret, len, key_len_aligned; 2027 2028 /* WMI_TAG_ARRAY_BYTE needs to be aligned with 4, the actual key 2029 * length is specified in cmd->key_len. 2030 */ 2031 key_len_aligned = roundup(arg->key_len, 4); 2032 2033 len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned; 2034 2035 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 2036 if (!skb) 2037 return -ENOMEM; 2038 2039 cmd = (struct wmi_vdev_install_key_cmd *)skb->data; 2040 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_INSTALL_KEY_CMD, 2041 sizeof(*cmd)); 2042 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 2043 ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr); 2044 cmd->key_idx = cpu_to_le32(arg->key_idx); 2045 cmd->key_flags = cpu_to_le32(arg->key_flags); 2046 cmd->key_cipher = cpu_to_le32(arg->key_cipher); 2047 cmd->key_len = cpu_to_le32(arg->key_len); 2048 cmd->key_txmic_len = cpu_to_le32(arg->key_txmic_len); 2049 cmd->key_rxmic_len = cpu_to_le32(arg->key_rxmic_len); 2050 2051 if (arg->key_rsc_counter) 2052 cmd->key_rsc_counter = cpu_to_le64(arg->key_rsc_counter); 2053 2054 tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd)); 2055 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, key_len_aligned); 2056 memcpy(tlv->value, arg->key_data, arg->key_len); 2057 2058 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 2059 "WMI vdev install key idx %d cipher %d len %d\n", 2060 arg->key_idx, arg->key_cipher, arg->key_len); 2061 2062 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID); 2063 if (ret) { 2064 ath12k_warn(ar->ab, 2065 "failed to send WMI_VDEV_INSTALL_KEY cmd\n"); 2066 dev_kfree_skb(skb); 2067 } 2068 2069 return ret; 2070 } 2071 2072 static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd, 2073 struct ath12k_wmi_peer_assoc_arg *arg, 2074 bool hw_crypto_disabled) 2075 { 2076 cmd->peer_flags = 0; 2077 cmd->peer_flags_ext = 0; 2078 2079 if (arg->is_wme_set) { 2080 if (arg->qos_flag) 2081 cmd->peer_flags |= cpu_to_le32(WMI_PEER_QOS); 2082 if (arg->apsd_flag) 2083 cmd->peer_flags |= cpu_to_le32(WMI_PEER_APSD); 2084 if (arg->ht_flag) 2085 cmd->peer_flags |= cpu_to_le32(WMI_PEER_HT); 2086 if (arg->bw_40) 2087 cmd->peer_flags |= cpu_to_le32(WMI_PEER_40MHZ); 2088 if (arg->bw_80) 2089 cmd->peer_flags |= cpu_to_le32(WMI_PEER_80MHZ); 2090 if (arg->bw_160) 2091 cmd->peer_flags |= cpu_to_le32(WMI_PEER_160MHZ); 2092 if (arg->bw_320) 2093 cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_320MHZ); 2094 2095 /* Typically if STBC is enabled for VHT it should be enabled 2096 * for HT as well 2097 **/ 2098 if (arg->stbc_flag) 2099 cmd->peer_flags |= cpu_to_le32(WMI_PEER_STBC); 2100 2101 /* Typically if LDPC is enabled for VHT it should be enabled 2102 * for HT as well 2103 **/ 2104 if (arg->ldpc_flag) 2105 cmd->peer_flags |= cpu_to_le32(WMI_PEER_LDPC); 2106 2107 if (arg->static_mimops_flag) 2108 cmd->peer_flags |= cpu_to_le32(WMI_PEER_STATIC_MIMOPS); 2109 if (arg->dynamic_mimops_flag) 2110 cmd->peer_flags |= cpu_to_le32(WMI_PEER_DYN_MIMOPS); 2111 if (arg->spatial_mux_flag) 2112 cmd->peer_flags |= cpu_to_le32(WMI_PEER_SPATIAL_MUX); 2113 if (arg->vht_flag) 2114 cmd->peer_flags |= cpu_to_le32(WMI_PEER_VHT); 2115 if (arg->he_flag) 2116 cmd->peer_flags |= cpu_to_le32(WMI_PEER_HE); 2117 if (arg->twt_requester) 2118 cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_REQ); 2119 if (arg->twt_responder) 2120 cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_RESP); 2121 if (arg->eht_flag) 2122 cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_EHT); 2123 } 2124 2125 /* Suppress authorization for all AUTH modes that need 4-way handshake 2126 * (during re-association). 2127 * Authorization will be done for these modes on key installation. 2128 */ 2129 if (arg->auth_flag) 2130 cmd->peer_flags |= cpu_to_le32(WMI_PEER_AUTH); 2131 if (arg->need_ptk_4_way) { 2132 cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_PTK_4_WAY); 2133 if (!hw_crypto_disabled && arg->is_assoc) 2134 cmd->peer_flags &= cpu_to_le32(~WMI_PEER_AUTH); 2135 } 2136 if (arg->need_gtk_2_way) 2137 cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_GTK_2_WAY); 2138 /* safe mode bypass the 4-way handshake */ 2139 if (arg->safe_mode_enabled) 2140 cmd->peer_flags &= cpu_to_le32(~(WMI_PEER_NEED_PTK_4_WAY | 2141 WMI_PEER_NEED_GTK_2_WAY)); 2142 2143 if (arg->is_pmf_enabled) 2144 cmd->peer_flags |= cpu_to_le32(WMI_PEER_PMF); 2145 2146 /* Disable AMSDU for station transmit, if user configures it */ 2147 /* Disable AMSDU for AP transmit to 11n Stations, if user configures 2148 * it 2149 * if (arg->amsdu_disable) Add after FW support 2150 **/ 2151 2152 /* Target asserts if node is marked HT and all MCS is set to 0. 2153 * Mark the node as non-HT if all the mcs rates are disabled through 2154 * iwpriv 2155 **/ 2156 if (arg->peer_ht_rates.num_rates == 0) 2157 cmd->peer_flags &= cpu_to_le32(~WMI_PEER_HT); 2158 } 2159 2160 int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar, 2161 struct ath12k_wmi_peer_assoc_arg *arg) 2162 { 2163 struct ath12k_wmi_pdev *wmi = ar->wmi; 2164 struct wmi_peer_assoc_complete_cmd *cmd; 2165 struct ath12k_wmi_vht_rate_set_params *mcs; 2166 struct ath12k_wmi_he_rate_set_params *he_mcs; 2167 struct ath12k_wmi_eht_rate_set_params *eht_mcs; 2168 struct wmi_peer_assoc_mlo_params *ml_params; 2169 struct wmi_peer_assoc_mlo_partner_info_params *partner_info; 2170 struct sk_buff *skb; 2171 struct wmi_tlv *tlv; 2172 void *ptr; 2173 u32 peer_legacy_rates_align, eml_pad_delay, eml_trans_delay; 2174 u32 peer_ht_rates_align, eml_trans_timeout; 2175 int i, ret, len; 2176 u16 eml_cap; 2177 __le32 v; 2178 2179 peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates, 2180 sizeof(u32)); 2181 peer_ht_rates_align = roundup(arg->peer_ht_rates.num_rates, 2182 sizeof(u32)); 2183 2184 len = sizeof(*cmd) + 2185 TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(u8)) + 2186 TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) + 2187 sizeof(*mcs) + TLV_HDR_SIZE + 2188 (sizeof(*he_mcs) * arg->peer_he_mcs_count) + 2189 TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count); 2190 2191 if (arg->ml.enabled) 2192 len += TLV_HDR_SIZE + sizeof(*ml_params) + 2193 TLV_HDR_SIZE + (arg->ml.num_partner_links * sizeof(*partner_info)); 2194 else 2195 len += (2 * TLV_HDR_SIZE); 2196 2197 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 2198 if (!skb) 2199 return -ENOMEM; 2200 2201 ptr = skb->data; 2202 2203 cmd = ptr; 2204 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_ASSOC_COMPLETE_CMD, 2205 sizeof(*cmd)); 2206 2207 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 2208 2209 cmd->peer_new_assoc = cpu_to_le32(arg->peer_new_assoc); 2210 cmd->peer_associd = cpu_to_le32(arg->peer_associd); 2211 cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap); 2212 2213 ath12k_wmi_copy_peer_flags(cmd, arg, 2214 test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, 2215 &ar->ab->dev_flags)); 2216 2217 ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_mac); 2218 2219 cmd->peer_rate_caps = cpu_to_le32(arg->peer_rate_caps); 2220 cmd->peer_caps = cpu_to_le32(arg->peer_caps); 2221 cmd->peer_listen_intval = cpu_to_le32(arg->peer_listen_intval); 2222 cmd->peer_ht_caps = cpu_to_le32(arg->peer_ht_caps); 2223 cmd->peer_max_mpdu = cpu_to_le32(arg->peer_max_mpdu); 2224 cmd->peer_mpdu_density = cpu_to_le32(arg->peer_mpdu_density); 2225 cmd->peer_vht_caps = cpu_to_le32(arg->peer_vht_caps); 2226 cmd->peer_phymode = cpu_to_le32(arg->peer_phymode); 2227 2228 /* Update 11ax capabilities */ 2229 cmd->peer_he_cap_info = cpu_to_le32(arg->peer_he_cap_macinfo[0]); 2230 cmd->peer_he_cap_info_ext = cpu_to_le32(arg->peer_he_cap_macinfo[1]); 2231 cmd->peer_he_cap_info_internal = cpu_to_le32(arg->peer_he_cap_macinfo_internal); 2232 cmd->peer_he_caps_6ghz = cpu_to_le32(arg->peer_he_caps_6ghz); 2233 cmd->peer_he_ops = cpu_to_le32(arg->peer_he_ops); 2234 for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++) 2235 cmd->peer_he_cap_phy[i] = 2236 cpu_to_le32(arg->peer_he_cap_phyinfo[i]); 2237 cmd->peer_ppet.numss_m1 = cpu_to_le32(arg->peer_ppet.numss_m1); 2238 cmd->peer_ppet.ru_info = cpu_to_le32(arg->peer_ppet.ru_bit_mask); 2239 for (i = 0; i < WMI_MAX_NUM_SS; i++) 2240 cmd->peer_ppet.ppet16_ppet8_ru3_ru0[i] = 2241 cpu_to_le32(arg->peer_ppet.ppet16_ppet8_ru3_ru0[i]); 2242 2243 /* Update 11be capabilities */ 2244 memcpy_and_pad(cmd->peer_eht_cap_mac, sizeof(cmd->peer_eht_cap_mac), 2245 arg->peer_eht_cap_mac, sizeof(arg->peer_eht_cap_mac), 2246 0); 2247 memcpy_and_pad(cmd->peer_eht_cap_phy, sizeof(cmd->peer_eht_cap_phy), 2248 arg->peer_eht_cap_phy, sizeof(arg->peer_eht_cap_phy), 2249 0); 2250 memcpy_and_pad(&cmd->peer_eht_ppet, sizeof(cmd->peer_eht_ppet), 2251 &arg->peer_eht_ppet, sizeof(arg->peer_eht_ppet), 0); 2252 2253 /* Update peer legacy rate information */ 2254 ptr += sizeof(*cmd); 2255 2256 tlv = ptr; 2257 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_legacy_rates_align); 2258 2259 ptr += TLV_HDR_SIZE; 2260 2261 cmd->num_peer_legacy_rates = cpu_to_le32(arg->peer_legacy_rates.num_rates); 2262 memcpy(ptr, arg->peer_legacy_rates.rates, 2263 arg->peer_legacy_rates.num_rates); 2264 2265 /* Update peer HT rate information */ 2266 ptr += peer_legacy_rates_align; 2267 2268 tlv = ptr; 2269 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_ht_rates_align); 2270 ptr += TLV_HDR_SIZE; 2271 cmd->num_peer_ht_rates = cpu_to_le32(arg->peer_ht_rates.num_rates); 2272 memcpy(ptr, arg->peer_ht_rates.rates, 2273 arg->peer_ht_rates.num_rates); 2274 2275 /* VHT Rates */ 2276 ptr += peer_ht_rates_align; 2277 2278 mcs = ptr; 2279 2280 mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VHT_RATE_SET, 2281 sizeof(*mcs)); 2282 2283 cmd->peer_nss = cpu_to_le32(arg->peer_nss); 2284 2285 /* Update bandwidth-NSS mapping */ 2286 cmd->peer_bw_rxnss_override = 0; 2287 cmd->peer_bw_rxnss_override |= cpu_to_le32(arg->peer_bw_rxnss_override); 2288 2289 if (arg->vht_capable) { 2290 /* Firmware interprets mcs->tx_mcs_set field as peer's 2291 * RX capability 2292 */ 2293 mcs->rx_max_rate = cpu_to_le32(arg->tx_max_rate); 2294 mcs->rx_mcs_set = cpu_to_le32(arg->tx_mcs_set); 2295 mcs->tx_max_rate = cpu_to_le32(arg->rx_max_rate); 2296 mcs->tx_mcs_set = cpu_to_le32(arg->rx_mcs_set); 2297 } 2298 2299 /* HE Rates */ 2300 cmd->peer_he_mcs = cpu_to_le32(arg->peer_he_mcs_count); 2301 cmd->min_data_rate = cpu_to_le32(arg->min_data_rate); 2302 2303 ptr += sizeof(*mcs); 2304 2305 len = arg->peer_he_mcs_count * sizeof(*he_mcs); 2306 2307 tlv = ptr; 2308 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); 2309 ptr += TLV_HDR_SIZE; 2310 2311 /* Loop through the HE rate set */ 2312 for (i = 0; i < arg->peer_he_mcs_count; i++) { 2313 he_mcs = ptr; 2314 he_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET, 2315 sizeof(*he_mcs)); 2316 2317 he_mcs->rx_mcs_set = cpu_to_le32(arg->peer_he_rx_mcs_set[i]); 2318 he_mcs->tx_mcs_set = cpu_to_le32(arg->peer_he_tx_mcs_set[i]); 2319 ptr += sizeof(*he_mcs); 2320 } 2321 2322 tlv = ptr; 2323 len = arg->ml.enabled ? sizeof(*ml_params) : 0; 2324 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); 2325 ptr += TLV_HDR_SIZE; 2326 if (!len) 2327 goto skip_ml_params; 2328 2329 ml_params = ptr; 2330 ml_params->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_ASSOC_PARAMS, 2331 len); 2332 ml_params->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED); 2333 2334 if (arg->ml.assoc_link) 2335 ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK); 2336 2337 if (arg->ml.primary_umac) 2338 ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC); 2339 2340 if (arg->ml.logical_link_idx_valid) 2341 ml_params->flags |= 2342 cpu_to_le32(ATH12K_WMI_FLAG_MLO_LOGICAL_LINK_IDX_VALID); 2343 2344 if (arg->ml.peer_id_valid) 2345 ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PEER_ID_VALID); 2346 2347 ether_addr_copy(ml_params->mld_addr.addr, arg->ml.mld_addr); 2348 ml_params->logical_link_idx = cpu_to_le32(arg->ml.logical_link_idx); 2349 ml_params->ml_peer_id = cpu_to_le32(arg->ml.ml_peer_id); 2350 ml_params->ieee_link_id = cpu_to_le32(arg->ml.ieee_link_id); 2351 2352 eml_cap = arg->ml.eml_cap; 2353 if (u16_get_bits(eml_cap, IEEE80211_EML_CAP_EMLSR_SUPP)) { 2354 ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_EMLSR_SUPPORT); 2355 /* Padding delay */ 2356 eml_pad_delay = ieee80211_emlsr_pad_delay_in_us(eml_cap); 2357 ml_params->emlsr_padding_delay_us = cpu_to_le32(eml_pad_delay); 2358 /* Transition delay */ 2359 eml_trans_delay = ieee80211_emlsr_trans_delay_in_us(eml_cap); 2360 ml_params->emlsr_trans_delay_us = cpu_to_le32(eml_trans_delay); 2361 /* Transition timeout */ 2362 eml_trans_timeout = ieee80211_eml_trans_timeout_in_us(eml_cap); 2363 ml_params->emlsr_trans_timeout_us = 2364 cpu_to_le32(eml_trans_timeout); 2365 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi peer %pM emlsr padding delay %u, trans delay %u trans timeout %u", 2366 arg->peer_mac, eml_pad_delay, eml_trans_delay, 2367 eml_trans_timeout); 2368 } 2369 2370 ptr += sizeof(*ml_params); 2371 2372 skip_ml_params: 2373 /* Loop through the EHT rate set */ 2374 len = arg->peer_eht_mcs_count * sizeof(*eht_mcs); 2375 tlv = ptr; 2376 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); 2377 ptr += TLV_HDR_SIZE; 2378 2379 for (i = 0; i < arg->peer_eht_mcs_count; i++) { 2380 eht_mcs = ptr; 2381 eht_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_EHT_RATE_SET, 2382 sizeof(*eht_mcs)); 2383 2384 eht_mcs->rx_mcs_set = cpu_to_le32(arg->peer_eht_rx_mcs_set[i]); 2385 eht_mcs->tx_mcs_set = cpu_to_le32(arg->peer_eht_tx_mcs_set[i]); 2386 ptr += sizeof(*eht_mcs); 2387 } 2388 2389 /* Update MCS15 capability */ 2390 if (arg->eht_disable_mcs15) 2391 cmd->peer_eht_ops = cpu_to_le32(IEEE80211_EHT_OPER_MCS15_DISABLE); 2392 2393 tlv = ptr; 2394 len = arg->ml.enabled ? arg->ml.num_partner_links * sizeof(*partner_info) : 0; 2395 /* fill ML Partner links */ 2396 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); 2397 ptr += TLV_HDR_SIZE; 2398 2399 if (len == 0) 2400 goto send; 2401 2402 for (i = 0; i < arg->ml.num_partner_links; i++) { 2403 u32 cmd = WMI_TAG_MLO_PARTNER_LINK_PARAMS_PEER_ASSOC; 2404 2405 partner_info = ptr; 2406 partner_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(cmd, 2407 sizeof(*partner_info)); 2408 partner_info->vdev_id = cpu_to_le32(arg->ml.partner_info[i].vdev_id); 2409 partner_info->hw_link_id = 2410 cpu_to_le32(arg->ml.partner_info[i].hw_link_id); 2411 partner_info->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED); 2412 2413 if (arg->ml.partner_info[i].assoc_link) 2414 partner_info->flags |= 2415 cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK); 2416 2417 if (arg->ml.partner_info[i].primary_umac) 2418 partner_info->flags |= 2419 cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC); 2420 2421 if (arg->ml.partner_info[i].logical_link_idx_valid) { 2422 v = cpu_to_le32(ATH12K_WMI_FLAG_MLO_LINK_ID_VALID); 2423 partner_info->flags |= v; 2424 } 2425 2426 partner_info->logical_link_idx = 2427 cpu_to_le32(arg->ml.partner_info[i].logical_link_idx); 2428 ptr += sizeof(*partner_info); 2429 } 2430 2431 send: 2432 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 2433 "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x peer_eht_ops %x\n", 2434 cmd->vdev_id, cmd->peer_associd, arg->peer_mac, 2435 cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps, 2436 cmd->peer_listen_intval, cmd->peer_ht_caps, 2437 cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode, 2438 cmd->peer_mpdu_density, 2439 cmd->peer_vht_caps, cmd->peer_he_cap_info, 2440 cmd->peer_he_ops, cmd->peer_he_cap_info_ext, 2441 cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1], 2442 cmd->peer_he_cap_phy[2], 2443 cmd->peer_bw_rxnss_override, cmd->peer_flags_ext, 2444 cmd->peer_eht_cap_mac[0], cmd->peer_eht_cap_mac[1], 2445 cmd->peer_eht_cap_phy[0], cmd->peer_eht_cap_phy[1], 2446 cmd->peer_eht_cap_phy[2], cmd->peer_eht_ops); 2447 2448 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID); 2449 if (ret) { 2450 ath12k_warn(ar->ab, 2451 "failed to send WMI_PEER_ASSOC_CMDID\n"); 2452 dev_kfree_skb(skb); 2453 } 2454 2455 return ret; 2456 } 2457 2458 void ath12k_wmi_start_scan_init(struct ath12k *ar, 2459 struct ath12k_wmi_scan_req_arg *arg) 2460 { 2461 /* setup commonly used values */ 2462 arg->scan_req_id = 1; 2463 arg->scan_priority = WMI_SCAN_PRIORITY_LOW; 2464 arg->dwell_time_active = 50; 2465 arg->dwell_time_active_2g = 0; 2466 arg->dwell_time_passive = 150; 2467 arg->dwell_time_active_6g = 70; 2468 arg->dwell_time_passive_6g = 70; 2469 arg->min_rest_time = 50; 2470 arg->max_rest_time = 500; 2471 arg->repeat_probe_time = 0; 2472 arg->probe_spacing_time = 0; 2473 arg->idle_time = 0; 2474 arg->max_scan_time = 20000; 2475 arg->probe_delay = 5; 2476 arg->notify_scan_events = WMI_SCAN_EVENT_STARTED | 2477 WMI_SCAN_EVENT_COMPLETED | 2478 WMI_SCAN_EVENT_BSS_CHANNEL | 2479 WMI_SCAN_EVENT_FOREIGN_CHAN | 2480 WMI_SCAN_EVENT_DEQUEUED; 2481 arg->scan_f_chan_stat_evnt = 1; 2482 arg->num_bssid = 1; 2483 2484 /* fill bssid_list[0] with 0xff, otherwise bssid and RA will be 2485 * ZEROs in probe request 2486 */ 2487 eth_broadcast_addr(arg->bssid_list[0].addr); 2488 } 2489 2490 static void ath12k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd, 2491 struct ath12k_wmi_scan_req_arg *arg) 2492 { 2493 /* Scan events subscription */ 2494 if (arg->scan_ev_started) 2495 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_STARTED); 2496 if (arg->scan_ev_completed) 2497 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_COMPLETED); 2498 if (arg->scan_ev_bss_chan) 2499 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_BSS_CHANNEL); 2500 if (arg->scan_ev_foreign_chan) 2501 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN); 2502 if (arg->scan_ev_dequeued) 2503 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_DEQUEUED); 2504 if (arg->scan_ev_preempted) 2505 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_PREEMPTED); 2506 if (arg->scan_ev_start_failed) 2507 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_START_FAILED); 2508 if (arg->scan_ev_restarted) 2509 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESTARTED); 2510 if (arg->scan_ev_foreign_chn_exit) 2511 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT); 2512 if (arg->scan_ev_suspended) 2513 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_SUSPENDED); 2514 if (arg->scan_ev_resumed) 2515 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESUMED); 2516 2517 /** Set scan control flags */ 2518 cmd->scan_ctrl_flags = 0; 2519 if (arg->scan_f_passive) 2520 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_PASSIVE); 2521 if (arg->scan_f_strict_passive_pch) 2522 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN); 2523 if (arg->scan_f_promisc_mode) 2524 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROMISCUOS); 2525 if (arg->scan_f_capture_phy_err) 2526 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CAPTURE_PHY_ERROR); 2527 if (arg->scan_f_half_rate) 2528 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_HALF_RATE_SUPPORT); 2529 if (arg->scan_f_quarter_rate) 2530 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT); 2531 if (arg->scan_f_cck_rates) 2532 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_CCK_RATES); 2533 if (arg->scan_f_ofdm_rates) 2534 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_OFDM_RATES); 2535 if (arg->scan_f_chan_stat_evnt) 2536 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CHAN_STAT_EVENT); 2537 if (arg->scan_f_filter_prb_req) 2538 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ); 2539 if (arg->scan_f_bcast_probe) 2540 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_BCAST_PROBE_REQ); 2541 if (arg->scan_f_offchan_mgmt_tx) 2542 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_MGMT_TX); 2543 if (arg->scan_f_offchan_data_tx) 2544 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_DATA_TX); 2545 if (arg->scan_f_force_active_dfs_chn) 2546 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS); 2547 if (arg->scan_f_add_tpc_ie_in_probe) 2548 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ); 2549 if (arg->scan_f_add_ds_ie_in_probe) 2550 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ); 2551 if (arg->scan_f_add_spoofed_mac_in_probe) 2552 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ); 2553 if (arg->scan_f_add_rand_seq_in_probe) 2554 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ); 2555 if (arg->scan_f_en_ie_whitelist_in_probe) 2556 cmd->scan_ctrl_flags |= 2557 cpu_to_le32(WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ); 2558 2559 cmd->scan_ctrl_flags |= le32_encode_bits(arg->adaptive_dwell_time_mode, 2560 WMI_SCAN_DWELL_MODE_MASK); 2561 } 2562 2563 int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar, 2564 struct ath12k_wmi_scan_req_arg *arg) 2565 { 2566 struct ath12k_wmi_pdev *wmi = ar->wmi; 2567 struct wmi_start_scan_cmd *cmd; 2568 struct ath12k_wmi_ssid_params *ssid = NULL; 2569 struct ath12k_wmi_mac_addr_params *bssid; 2570 struct sk_buff *skb; 2571 struct wmi_tlv *tlv; 2572 void *ptr; 2573 int i, ret, len; 2574 u32 *tmp_ptr, extraie_len_with_pad = 0; 2575 struct ath12k_wmi_hint_short_ssid_arg *s_ssid = NULL; 2576 struct ath12k_wmi_hint_bssid_arg *hint_bssid = NULL; 2577 2578 len = sizeof(*cmd); 2579 2580 len += TLV_HDR_SIZE; 2581 if (arg->num_chan) 2582 len += arg->num_chan * sizeof(u32); 2583 2584 len += TLV_HDR_SIZE; 2585 if (arg->num_ssids) 2586 len += arg->num_ssids * sizeof(*ssid); 2587 2588 len += TLV_HDR_SIZE; 2589 if (arg->num_bssid) 2590 len += sizeof(*bssid) * arg->num_bssid; 2591 2592 if (arg->num_hint_bssid) 2593 len += TLV_HDR_SIZE + 2594 arg->num_hint_bssid * sizeof(*hint_bssid); 2595 2596 if (arg->num_hint_s_ssid) 2597 len += TLV_HDR_SIZE + 2598 arg->num_hint_s_ssid * sizeof(*s_ssid); 2599 2600 len += TLV_HDR_SIZE; 2601 if (arg->extraie.len) 2602 extraie_len_with_pad = 2603 roundup(arg->extraie.len, sizeof(u32)); 2604 if (extraie_len_with_pad <= (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len)) { 2605 len += extraie_len_with_pad; 2606 } else { 2607 ath12k_warn(ar->ab, "discard large size %d bytes extraie for scan start\n", 2608 arg->extraie.len); 2609 extraie_len_with_pad = 0; 2610 } 2611 2612 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 2613 if (!skb) 2614 return -ENOMEM; 2615 2616 ptr = skb->data; 2617 2618 cmd = ptr; 2619 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_START_SCAN_CMD, 2620 sizeof(*cmd)); 2621 2622 cmd->scan_id = cpu_to_le32(arg->scan_id); 2623 cmd->scan_req_id = cpu_to_le32(arg->scan_req_id); 2624 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 2625 if (ar->state_11d == ATH12K_11D_PREPARING) 2626 arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM; 2627 else 2628 arg->scan_priority = WMI_SCAN_PRIORITY_LOW; 2629 cmd->notify_scan_events = cpu_to_le32(arg->notify_scan_events); 2630 2631 ath12k_wmi_copy_scan_event_cntrl_flags(cmd, arg); 2632 2633 cmd->dwell_time_active = cpu_to_le32(arg->dwell_time_active); 2634 cmd->dwell_time_active_2g = cpu_to_le32(arg->dwell_time_active_2g); 2635 cmd->dwell_time_passive = cpu_to_le32(arg->dwell_time_passive); 2636 cmd->dwell_time_active_6g = cpu_to_le32(arg->dwell_time_active_6g); 2637 cmd->dwell_time_passive_6g = cpu_to_le32(arg->dwell_time_passive_6g); 2638 cmd->min_rest_time = cpu_to_le32(arg->min_rest_time); 2639 cmd->max_rest_time = cpu_to_le32(arg->max_rest_time); 2640 cmd->repeat_probe_time = cpu_to_le32(arg->repeat_probe_time); 2641 cmd->probe_spacing_time = cpu_to_le32(arg->probe_spacing_time); 2642 cmd->idle_time = cpu_to_le32(arg->idle_time); 2643 cmd->max_scan_time = cpu_to_le32(arg->max_scan_time); 2644 cmd->probe_delay = cpu_to_le32(arg->probe_delay); 2645 cmd->burst_duration = cpu_to_le32(arg->burst_duration); 2646 cmd->num_chan = cpu_to_le32(arg->num_chan); 2647 cmd->num_bssid = cpu_to_le32(arg->num_bssid); 2648 cmd->num_ssids = cpu_to_le32(arg->num_ssids); 2649 cmd->ie_len = cpu_to_le32(arg->extraie.len); 2650 cmd->n_probes = cpu_to_le32(arg->n_probes); 2651 2652 ptr += sizeof(*cmd); 2653 2654 len = arg->num_chan * sizeof(u32); 2655 2656 tlv = ptr; 2657 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, len); 2658 ptr += TLV_HDR_SIZE; 2659 tmp_ptr = (u32 *)ptr; 2660 2661 memcpy(tmp_ptr, arg->chan_list, arg->num_chan * 4); 2662 2663 ptr += len; 2664 2665 len = arg->num_ssids * sizeof(*ssid); 2666 tlv = ptr; 2667 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len); 2668 2669 ptr += TLV_HDR_SIZE; 2670 2671 if (arg->num_ssids) { 2672 ssid = ptr; 2673 for (i = 0; i < arg->num_ssids; ++i) { 2674 ssid->ssid_len = cpu_to_le32(arg->ssid[i].ssid_len); 2675 memcpy(ssid->ssid, arg->ssid[i].ssid, 2676 arg->ssid[i].ssid_len); 2677 ssid++; 2678 } 2679 } 2680 2681 ptr += (arg->num_ssids * sizeof(*ssid)); 2682 len = arg->num_bssid * sizeof(*bssid); 2683 tlv = ptr; 2684 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len); 2685 2686 ptr += TLV_HDR_SIZE; 2687 bssid = ptr; 2688 2689 if (arg->num_bssid) { 2690 for (i = 0; i < arg->num_bssid; ++i) { 2691 ether_addr_copy(bssid->addr, 2692 arg->bssid_list[i].addr); 2693 bssid++; 2694 } 2695 } 2696 2697 ptr += arg->num_bssid * sizeof(*bssid); 2698 2699 len = extraie_len_with_pad; 2700 tlv = ptr; 2701 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len); 2702 ptr += TLV_HDR_SIZE; 2703 2704 if (extraie_len_with_pad) 2705 memcpy(ptr, arg->extraie.ptr, 2706 arg->extraie.len); 2707 2708 ptr += extraie_len_with_pad; 2709 2710 if (arg->num_hint_s_ssid) { 2711 len = arg->num_hint_s_ssid * sizeof(*s_ssid); 2712 tlv = ptr; 2713 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len); 2714 ptr += TLV_HDR_SIZE; 2715 s_ssid = ptr; 2716 for (i = 0; i < arg->num_hint_s_ssid; ++i) { 2717 s_ssid->freq_flags = arg->hint_s_ssid[i].freq_flags; 2718 s_ssid->short_ssid = arg->hint_s_ssid[i].short_ssid; 2719 s_ssid++; 2720 } 2721 ptr += len; 2722 } 2723 2724 if (arg->num_hint_bssid) { 2725 len = arg->num_hint_bssid * sizeof(struct ath12k_wmi_hint_bssid_arg); 2726 tlv = ptr; 2727 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len); 2728 ptr += TLV_HDR_SIZE; 2729 hint_bssid = ptr; 2730 for (i = 0; i < arg->num_hint_bssid; ++i) { 2731 hint_bssid->freq_flags = 2732 arg->hint_bssid[i].freq_flags; 2733 ether_addr_copy(&arg->hint_bssid[i].bssid.addr[0], 2734 &hint_bssid->bssid.addr[0]); 2735 hint_bssid++; 2736 } 2737 } 2738 2739 ret = ath12k_wmi_cmd_send(wmi, skb, 2740 WMI_START_SCAN_CMDID); 2741 if (ret) { 2742 ath12k_warn(ar->ab, "failed to send WMI_START_SCAN_CMDID\n"); 2743 dev_kfree_skb(skb); 2744 } 2745 2746 return ret; 2747 } 2748 2749 int ath12k_wmi_send_scan_stop_cmd(struct ath12k *ar, 2750 struct ath12k_wmi_scan_cancel_arg *arg) 2751 { 2752 struct ath12k_wmi_pdev *wmi = ar->wmi; 2753 struct wmi_stop_scan_cmd *cmd; 2754 struct sk_buff *skb; 2755 int ret; 2756 2757 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 2758 if (!skb) 2759 return -ENOMEM; 2760 2761 cmd = (struct wmi_stop_scan_cmd *)skb->data; 2762 2763 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STOP_SCAN_CMD, 2764 sizeof(*cmd)); 2765 2766 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 2767 cmd->requestor = cpu_to_le32(arg->requester); 2768 cmd->scan_id = cpu_to_le32(arg->scan_id); 2769 cmd->pdev_id = cpu_to_le32(arg->pdev_id); 2770 /* stop the scan with the corresponding scan_id */ 2771 if (arg->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) { 2772 /* Cancelling all scans */ 2773 cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_ALL); 2774 } else if (arg->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) { 2775 /* Cancelling VAP scans */ 2776 cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_VAP_ALL); 2777 } else if (arg->req_type == WLAN_SCAN_CANCEL_SINGLE) { 2778 /* Cancelling specific scan */ 2779 cmd->req_type = WMI_SCAN_STOP_ONE; 2780 } else { 2781 ath12k_warn(ar->ab, "invalid scan cancel req_type %d", 2782 arg->req_type); 2783 dev_kfree_skb(skb); 2784 return -EINVAL; 2785 } 2786 2787 ret = ath12k_wmi_cmd_send(wmi, skb, 2788 WMI_STOP_SCAN_CMDID); 2789 if (ret) { 2790 ath12k_warn(ar->ab, "failed to send WMI_STOP_SCAN_CMDID\n"); 2791 dev_kfree_skb(skb); 2792 } 2793 2794 return ret; 2795 } 2796 2797 int ath12k_wmi_send_scan_chan_list_cmd(struct ath12k *ar, 2798 struct ath12k_wmi_scan_chan_list_arg *arg) 2799 { 2800 struct ath12k_wmi_pdev *wmi = ar->wmi; 2801 struct wmi_scan_chan_list_cmd *cmd; 2802 struct sk_buff *skb; 2803 struct ath12k_wmi_channel_params *chan_info; 2804 struct ath12k_wmi_channel_arg *channel_arg; 2805 struct wmi_tlv *tlv; 2806 void *ptr; 2807 int i, ret, len; 2808 u16 num_send_chans, num_sends = 0, max_chan_limit = 0; 2809 __le32 *reg1, *reg2; 2810 2811 channel_arg = &arg->channel[0]; 2812 while (arg->nallchans) { 2813 len = sizeof(*cmd) + TLV_HDR_SIZE; 2814 max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) / 2815 sizeof(*chan_info); 2816 2817 num_send_chans = min3(arg->nallchans, max_chan_limit, 2818 ATH12K_WMI_MAX_NUM_CHAN_PER_CMD); 2819 2820 arg->nallchans -= num_send_chans; 2821 len += sizeof(*chan_info) * num_send_chans; 2822 2823 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 2824 if (!skb) 2825 return -ENOMEM; 2826 2827 cmd = (struct wmi_scan_chan_list_cmd *)skb->data; 2828 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SCAN_CHAN_LIST_CMD, 2829 sizeof(*cmd)); 2830 cmd->pdev_id = cpu_to_le32(arg->pdev_id); 2831 cmd->num_scan_chans = cpu_to_le32(num_send_chans); 2832 if (num_sends) 2833 cmd->flags |= cpu_to_le32(WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG); 2834 2835 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 2836 "WMI no.of chan = %d len = %d pdev_id = %d num_sends = %d\n", 2837 num_send_chans, len, cmd->pdev_id, num_sends); 2838 2839 ptr = skb->data + sizeof(*cmd); 2840 2841 len = sizeof(*chan_info) * num_send_chans; 2842 tlv = ptr; 2843 tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_STRUCT, 2844 len); 2845 ptr += TLV_HDR_SIZE; 2846 2847 for (i = 0; i < num_send_chans; ++i) { 2848 chan_info = ptr; 2849 memset(chan_info, 0, sizeof(*chan_info)); 2850 len = sizeof(*chan_info); 2851 chan_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL, 2852 len); 2853 2854 reg1 = &chan_info->reg_info_1; 2855 reg2 = &chan_info->reg_info_2; 2856 chan_info->mhz = cpu_to_le32(channel_arg->mhz); 2857 chan_info->band_center_freq1 = cpu_to_le32(channel_arg->cfreq1); 2858 chan_info->band_center_freq2 = cpu_to_le32(channel_arg->cfreq2); 2859 2860 if (channel_arg->is_chan_passive) 2861 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE); 2862 if (channel_arg->allow_he) 2863 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE); 2864 else if (channel_arg->allow_vht) 2865 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT); 2866 else if (channel_arg->allow_ht) 2867 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT); 2868 if (channel_arg->half_rate) 2869 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_HALF_RATE); 2870 if (channel_arg->quarter_rate) 2871 chan_info->info |= 2872 cpu_to_le32(WMI_CHAN_INFO_QUARTER_RATE); 2873 2874 if (channel_arg->psc_channel) 2875 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PSC); 2876 2877 if (channel_arg->dfs_set) 2878 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_DFS); 2879 2880 chan_info->info |= le32_encode_bits(channel_arg->phy_mode, 2881 WMI_CHAN_INFO_MODE); 2882 *reg1 |= le32_encode_bits(channel_arg->minpower, 2883 WMI_CHAN_REG_INFO1_MIN_PWR); 2884 *reg1 |= le32_encode_bits(channel_arg->maxpower, 2885 WMI_CHAN_REG_INFO1_MAX_PWR); 2886 *reg1 |= le32_encode_bits(channel_arg->maxregpower, 2887 WMI_CHAN_REG_INFO1_MAX_REG_PWR); 2888 *reg1 |= le32_encode_bits(channel_arg->reg_class_id, 2889 WMI_CHAN_REG_INFO1_REG_CLS); 2890 *reg2 |= le32_encode_bits(channel_arg->antennamax, 2891 WMI_CHAN_REG_INFO2_ANT_MAX); 2892 *reg2 |= le32_encode_bits(channel_arg->maxregpower, 2893 WMI_CHAN_REG_INFO2_MAX_TX_PWR); 2894 2895 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 2896 "WMI chan scan list chan[%d] = %u, chan_info->info %8x\n", 2897 i, chan_info->mhz, chan_info->info); 2898 2899 ptr += sizeof(*chan_info); 2900 2901 channel_arg++; 2902 } 2903 2904 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID); 2905 if (ret) { 2906 ath12k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n"); 2907 dev_kfree_skb(skb); 2908 return ret; 2909 } 2910 2911 num_sends++; 2912 } 2913 2914 return 0; 2915 } 2916 2917 int ath12k_wmi_send_wmm_update_cmd(struct ath12k *ar, u32 vdev_id, 2918 struct wmi_wmm_params_all_arg *param) 2919 { 2920 struct ath12k_wmi_pdev *wmi = ar->wmi; 2921 struct wmi_vdev_set_wmm_params_cmd *cmd; 2922 struct wmi_wmm_params *wmm_param; 2923 struct wmi_wmm_params_arg *wmi_wmm_arg; 2924 struct sk_buff *skb; 2925 int ret, ac; 2926 2927 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 2928 if (!skb) 2929 return -ENOMEM; 2930 2931 cmd = (struct wmi_vdev_set_wmm_params_cmd *)skb->data; 2932 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD, 2933 sizeof(*cmd)); 2934 2935 cmd->vdev_id = cpu_to_le32(vdev_id); 2936 cmd->wmm_param_type = 0; 2937 2938 for (ac = 0; ac < WME_NUM_AC; ac++) { 2939 switch (ac) { 2940 case WME_AC_BE: 2941 wmi_wmm_arg = ¶m->ac_be; 2942 break; 2943 case WME_AC_BK: 2944 wmi_wmm_arg = ¶m->ac_bk; 2945 break; 2946 case WME_AC_VI: 2947 wmi_wmm_arg = ¶m->ac_vi; 2948 break; 2949 case WME_AC_VO: 2950 wmi_wmm_arg = ¶m->ac_vo; 2951 break; 2952 } 2953 2954 wmm_param = (struct wmi_wmm_params *)&cmd->wmm_params[ac]; 2955 wmm_param->tlv_header = 2956 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD, 2957 sizeof(*wmm_param)); 2958 2959 wmm_param->aifs = cpu_to_le32(wmi_wmm_arg->aifs); 2960 wmm_param->cwmin = cpu_to_le32(wmi_wmm_arg->cwmin); 2961 wmm_param->cwmax = cpu_to_le32(wmi_wmm_arg->cwmax); 2962 wmm_param->txoplimit = cpu_to_le32(wmi_wmm_arg->txop); 2963 wmm_param->acm = cpu_to_le32(wmi_wmm_arg->acm); 2964 wmm_param->no_ack = cpu_to_le32(wmi_wmm_arg->no_ack); 2965 2966 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 2967 "wmi wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n", 2968 ac, wmm_param->aifs, wmm_param->cwmin, 2969 wmm_param->cwmax, wmm_param->txoplimit, 2970 wmm_param->acm, wmm_param->no_ack); 2971 } 2972 ret = ath12k_wmi_cmd_send(wmi, skb, 2973 WMI_VDEV_SET_WMM_PARAMS_CMDID); 2974 if (ret) { 2975 ath12k_warn(ar->ab, 2976 "failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID"); 2977 dev_kfree_skb(skb); 2978 } 2979 2980 return ret; 2981 } 2982 2983 int ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath12k *ar, 2984 u32 pdev_id) 2985 { 2986 struct ath12k_wmi_pdev *wmi = ar->wmi; 2987 struct wmi_dfs_phyerr_offload_cmd *cmd; 2988 struct sk_buff *skb; 2989 int ret; 2990 2991 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 2992 if (!skb) 2993 return -ENOMEM; 2994 2995 cmd = (struct wmi_dfs_phyerr_offload_cmd *)skb->data; 2996 cmd->tlv_header = 2997 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD, 2998 sizeof(*cmd)); 2999 3000 cmd->pdev_id = cpu_to_le32(pdev_id); 3001 3002 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3003 "WMI dfs phy err offload enable pdev id %d\n", pdev_id); 3004 3005 ret = ath12k_wmi_cmd_send(wmi, skb, 3006 WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID); 3007 if (ret) { 3008 ath12k_warn(ar->ab, 3009 "failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n"); 3010 dev_kfree_skb(skb); 3011 } 3012 3013 return ret; 3014 } 3015 3016 int ath12k_wmi_set_bios_cmd(struct ath12k_base *ab, u32 param_id, 3017 const u8 *buf, size_t buf_len) 3018 { 3019 struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; 3020 struct wmi_pdev_set_bios_interface_cmd *cmd; 3021 struct wmi_tlv *tlv; 3022 struct sk_buff *skb; 3023 u8 *ptr; 3024 u32 len, len_aligned; 3025 int ret; 3026 3027 len_aligned = roundup(buf_len, sizeof(u32)); 3028 len = sizeof(*cmd) + TLV_HDR_SIZE + len_aligned; 3029 3030 skb = ath12k_wmi_alloc_skb(wmi_ab, len); 3031 if (!skb) 3032 return -ENOMEM; 3033 3034 cmd = (struct wmi_pdev_set_bios_interface_cmd *)skb->data; 3035 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD, 3036 sizeof(*cmd)); 3037 cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC); 3038 cmd->param_type_id = cpu_to_le32(param_id); 3039 cmd->length = cpu_to_le32(buf_len); 3040 3041 ptr = skb->data + sizeof(*cmd); 3042 tlv = (struct wmi_tlv *)ptr; 3043 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len_aligned); 3044 ptr += TLV_HDR_SIZE; 3045 memcpy(ptr, buf, buf_len); 3046 3047 ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], 3048 skb, 3049 WMI_PDEV_SET_BIOS_INTERFACE_CMDID); 3050 if (ret) { 3051 ath12k_warn(ab, 3052 "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID parameter id %d: %d\n", 3053 param_id, ret); 3054 dev_kfree_skb(skb); 3055 } 3056 3057 return 0; 3058 } 3059 3060 int ath12k_wmi_set_bios_sar_cmd(struct ath12k_base *ab, const u8 *psar_table) 3061 { 3062 struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; 3063 struct wmi_pdev_set_bios_sar_table_cmd *cmd; 3064 struct wmi_tlv *tlv; 3065 struct sk_buff *skb; 3066 int ret; 3067 u8 *buf_ptr; 3068 u32 len, sar_table_len_aligned, sar_dbs_backoff_len_aligned; 3069 const u8 *psar_value = psar_table + ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET; 3070 const u8 *pdbs_value = psar_table + ATH12K_ACPI_DBS_BACKOFF_DATA_OFFSET; 3071 3072 sar_table_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_TABLE_LEN, sizeof(u32)); 3073 sar_dbs_backoff_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN, 3074 sizeof(u32)); 3075 len = sizeof(*cmd) + TLV_HDR_SIZE + sar_table_len_aligned + 3076 TLV_HDR_SIZE + sar_dbs_backoff_len_aligned; 3077 3078 skb = ath12k_wmi_alloc_skb(wmi_ab, len); 3079 if (!skb) 3080 return -ENOMEM; 3081 3082 cmd = (struct wmi_pdev_set_bios_sar_table_cmd *)skb->data; 3083 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD, 3084 sizeof(*cmd)); 3085 cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC); 3086 cmd->sar_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_TABLE_LEN); 3087 cmd->dbs_backoff_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN); 3088 3089 buf_ptr = skb->data + sizeof(*cmd); 3090 tlv = (struct wmi_tlv *)buf_ptr; 3091 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, 3092 sar_table_len_aligned); 3093 buf_ptr += TLV_HDR_SIZE; 3094 memcpy(buf_ptr, psar_value, ATH12K_ACPI_BIOS_SAR_TABLE_LEN); 3095 3096 buf_ptr += sar_table_len_aligned; 3097 tlv = (struct wmi_tlv *)buf_ptr; 3098 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, 3099 sar_dbs_backoff_len_aligned); 3100 buf_ptr += TLV_HDR_SIZE; 3101 memcpy(buf_ptr, pdbs_value, ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN); 3102 3103 ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], 3104 skb, 3105 WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID); 3106 if (ret) { 3107 ath12k_warn(ab, 3108 "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID %d\n", 3109 ret); 3110 dev_kfree_skb(skb); 3111 } 3112 3113 return ret; 3114 } 3115 3116 int ath12k_wmi_set_bios_geo_cmd(struct ath12k_base *ab, const u8 *pgeo_table) 3117 { 3118 struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; 3119 struct wmi_pdev_set_bios_geo_table_cmd *cmd; 3120 struct wmi_tlv *tlv; 3121 struct sk_buff *skb; 3122 int ret; 3123 u8 *buf_ptr; 3124 u32 len, sar_geo_len_aligned; 3125 const u8 *pgeo_value = pgeo_table + ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET; 3126 3127 sar_geo_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN, sizeof(u32)); 3128 len = sizeof(*cmd) + TLV_HDR_SIZE + sar_geo_len_aligned; 3129 3130 skb = ath12k_wmi_alloc_skb(wmi_ab, len); 3131 if (!skb) 3132 return -ENOMEM; 3133 3134 cmd = (struct wmi_pdev_set_bios_geo_table_cmd *)skb->data; 3135 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD, 3136 sizeof(*cmd)); 3137 cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC); 3138 cmd->geo_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN); 3139 3140 buf_ptr = skb->data + sizeof(*cmd); 3141 tlv = (struct wmi_tlv *)buf_ptr; 3142 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, sar_geo_len_aligned); 3143 buf_ptr += TLV_HDR_SIZE; 3144 memcpy(buf_ptr, pgeo_value, ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN); 3145 3146 ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], 3147 skb, 3148 WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID); 3149 if (ret) { 3150 ath12k_warn(ab, 3151 "failed to send WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID %d\n", 3152 ret); 3153 dev_kfree_skb(skb); 3154 } 3155 3156 return ret; 3157 } 3158 3159 int ath12k_wmi_delba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac, 3160 u32 tid, u32 initiator, u32 reason) 3161 { 3162 struct ath12k_wmi_pdev *wmi = ar->wmi; 3163 struct wmi_delba_send_cmd *cmd; 3164 struct sk_buff *skb; 3165 int ret; 3166 3167 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3168 if (!skb) 3169 return -ENOMEM; 3170 3171 cmd = (struct wmi_delba_send_cmd *)skb->data; 3172 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DELBA_SEND_CMD, 3173 sizeof(*cmd)); 3174 cmd->vdev_id = cpu_to_le32(vdev_id); 3175 ether_addr_copy(cmd->peer_macaddr.addr, mac); 3176 cmd->tid = cpu_to_le32(tid); 3177 cmd->initiator = cpu_to_le32(initiator); 3178 cmd->reasoncode = cpu_to_le32(reason); 3179 3180 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3181 "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n", 3182 vdev_id, mac, tid, initiator, reason); 3183 3184 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_DELBA_SEND_CMDID); 3185 3186 if (ret) { 3187 ath12k_warn(ar->ab, 3188 "failed to send WMI_DELBA_SEND_CMDID cmd\n"); 3189 dev_kfree_skb(skb); 3190 } 3191 3192 return ret; 3193 } 3194 3195 int ath12k_wmi_addba_set_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac, 3196 u32 tid, u32 status) 3197 { 3198 struct ath12k_wmi_pdev *wmi = ar->wmi; 3199 struct wmi_addba_setresponse_cmd *cmd; 3200 struct sk_buff *skb; 3201 int ret; 3202 3203 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3204 if (!skb) 3205 return -ENOMEM; 3206 3207 cmd = (struct wmi_addba_setresponse_cmd *)skb->data; 3208 cmd->tlv_header = 3209 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SETRESPONSE_CMD, 3210 sizeof(*cmd)); 3211 cmd->vdev_id = cpu_to_le32(vdev_id); 3212 ether_addr_copy(cmd->peer_macaddr.addr, mac); 3213 cmd->tid = cpu_to_le32(tid); 3214 cmd->statuscode = cpu_to_le32(status); 3215 3216 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3217 "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n", 3218 vdev_id, mac, tid, status); 3219 3220 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SET_RESP_CMDID); 3221 3222 if (ret) { 3223 ath12k_warn(ar->ab, 3224 "failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n"); 3225 dev_kfree_skb(skb); 3226 } 3227 3228 return ret; 3229 } 3230 3231 int ath12k_wmi_addba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac, 3232 u32 tid, u32 buf_size) 3233 { 3234 struct ath12k_wmi_pdev *wmi = ar->wmi; 3235 struct wmi_addba_send_cmd *cmd; 3236 struct sk_buff *skb; 3237 int ret; 3238 3239 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3240 if (!skb) 3241 return -ENOMEM; 3242 3243 cmd = (struct wmi_addba_send_cmd *)skb->data; 3244 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SEND_CMD, 3245 sizeof(*cmd)); 3246 cmd->vdev_id = cpu_to_le32(vdev_id); 3247 ether_addr_copy(cmd->peer_macaddr.addr, mac); 3248 cmd->tid = cpu_to_le32(tid); 3249 cmd->buffersize = cpu_to_le32(buf_size); 3250 3251 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3252 "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n", 3253 vdev_id, mac, tid, buf_size); 3254 3255 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SEND_CMDID); 3256 3257 if (ret) { 3258 ath12k_warn(ar->ab, 3259 "failed to send WMI_ADDBA_SEND_CMDID cmd\n"); 3260 dev_kfree_skb(skb); 3261 } 3262 3263 return ret; 3264 } 3265 3266 int ath12k_wmi_addba_clear_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac) 3267 { 3268 struct ath12k_wmi_pdev *wmi = ar->wmi; 3269 struct wmi_addba_clear_resp_cmd *cmd; 3270 struct sk_buff *skb; 3271 int ret; 3272 3273 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3274 if (!skb) 3275 return -ENOMEM; 3276 3277 cmd = (struct wmi_addba_clear_resp_cmd *)skb->data; 3278 cmd->tlv_header = 3279 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_CLEAR_RESP_CMD, 3280 sizeof(*cmd)); 3281 cmd->vdev_id = cpu_to_le32(vdev_id); 3282 ether_addr_copy(cmd->peer_macaddr.addr, mac); 3283 3284 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3285 "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n", 3286 vdev_id, mac); 3287 3288 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_CLEAR_RESP_CMDID); 3289 3290 if (ret) { 3291 ath12k_warn(ar->ab, 3292 "failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n"); 3293 dev_kfree_skb(skb); 3294 } 3295 3296 return ret; 3297 } 3298 3299 int ath12k_wmi_send_init_country_cmd(struct ath12k *ar, 3300 struct ath12k_wmi_init_country_arg *arg) 3301 { 3302 struct ath12k_wmi_pdev *wmi = ar->wmi; 3303 struct wmi_init_country_cmd *cmd; 3304 struct sk_buff *skb; 3305 int ret; 3306 3307 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3308 if (!skb) 3309 return -ENOMEM; 3310 3311 cmd = (struct wmi_init_country_cmd *)skb->data; 3312 cmd->tlv_header = 3313 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_INIT_COUNTRY_CMD, 3314 sizeof(*cmd)); 3315 3316 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); 3317 3318 switch (arg->flags) { 3319 case ALPHA_IS_SET: 3320 cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_ALPHA; 3321 memcpy(&cmd->cc_info.alpha2, arg->cc_info.alpha2, 3); 3322 break; 3323 case CC_IS_SET: 3324 cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE); 3325 cmd->cc_info.country_code = 3326 cpu_to_le32(arg->cc_info.country_code); 3327 break; 3328 case REGDMN_IS_SET: 3329 cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_REGDOMAIN); 3330 cmd->cc_info.regdom_id = cpu_to_le32(arg->cc_info.regdom_id); 3331 break; 3332 default: 3333 ret = -EINVAL; 3334 goto out; 3335 } 3336 3337 ret = ath12k_wmi_cmd_send(wmi, skb, 3338 WMI_SET_INIT_COUNTRY_CMDID); 3339 3340 out: 3341 if (ret) { 3342 ath12k_warn(ar->ab, 3343 "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n", 3344 ret); 3345 dev_kfree_skb(skb); 3346 } 3347 3348 return ret; 3349 } 3350 3351 int ath12k_wmi_send_set_current_country_cmd(struct ath12k *ar, 3352 struct wmi_set_current_country_arg *arg) 3353 { 3354 struct ath12k_wmi_pdev *wmi = ar->wmi; 3355 struct wmi_set_current_country_cmd *cmd; 3356 struct sk_buff *skb; 3357 int ret; 3358 3359 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3360 if (!skb) 3361 return -ENOMEM; 3362 3363 cmd = (struct wmi_set_current_country_cmd *)skb->data; 3364 cmd->tlv_header = 3365 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_CURRENT_COUNTRY_CMD, 3366 sizeof(*cmd)); 3367 3368 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); 3369 memcpy(&cmd->new_alpha2, &arg->alpha2, sizeof(arg->alpha2)); 3370 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SET_CURRENT_COUNTRY_CMDID); 3371 3372 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3373 "set current country pdev id %d alpha2 %c%c\n", 3374 ar->pdev->pdev_id, 3375 arg->alpha2[0], 3376 arg->alpha2[1]); 3377 3378 if (ret) { 3379 ath12k_warn(ar->ab, 3380 "failed to send WMI_SET_CURRENT_COUNTRY_CMDID: %d\n", ret); 3381 dev_kfree_skb(skb); 3382 } 3383 3384 return ret; 3385 } 3386 3387 int ath12k_wmi_send_11d_scan_start_cmd(struct ath12k *ar, 3388 struct wmi_11d_scan_start_arg *arg) 3389 { 3390 struct ath12k_wmi_pdev *wmi = ar->wmi; 3391 struct wmi_11d_scan_start_cmd *cmd; 3392 struct sk_buff *skb; 3393 int ret; 3394 3395 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3396 if (!skb) 3397 return -ENOMEM; 3398 3399 cmd = (struct wmi_11d_scan_start_cmd *)skb->data; 3400 cmd->tlv_header = 3401 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_11D_SCAN_START_CMD, 3402 sizeof(*cmd)); 3403 3404 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 3405 cmd->scan_period_msec = cpu_to_le32(arg->scan_period_msec); 3406 cmd->start_interval_msec = cpu_to_le32(arg->start_interval_msec); 3407 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_START_CMDID); 3408 3409 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3410 "send 11d scan start vdev id %d period %d ms internal %d ms\n", 3411 arg->vdev_id, arg->scan_period_msec, 3412 arg->start_interval_msec); 3413 3414 if (ret) { 3415 ath12k_warn(ar->ab, 3416 "failed to send WMI_11D_SCAN_START_CMDID: %d\n", ret); 3417 dev_kfree_skb(skb); 3418 } 3419 3420 return ret; 3421 } 3422 3423 int ath12k_wmi_send_11d_scan_stop_cmd(struct ath12k *ar, u32 vdev_id) 3424 { 3425 struct ath12k_wmi_pdev *wmi = ar->wmi; 3426 struct wmi_11d_scan_stop_cmd *cmd; 3427 struct sk_buff *skb; 3428 int ret; 3429 3430 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3431 if (!skb) 3432 return -ENOMEM; 3433 3434 cmd = (struct wmi_11d_scan_stop_cmd *)skb->data; 3435 cmd->tlv_header = 3436 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_11D_SCAN_STOP_CMD, 3437 sizeof(*cmd)); 3438 3439 cmd->vdev_id = cpu_to_le32(vdev_id); 3440 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_STOP_CMDID); 3441 3442 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3443 "send 11d scan stop vdev id %d\n", 3444 cmd->vdev_id); 3445 3446 if (ret) { 3447 ath12k_warn(ar->ab, 3448 "failed to send WMI_11D_SCAN_STOP_CMDID: %d\n", ret); 3449 dev_kfree_skb(skb); 3450 } 3451 3452 return ret; 3453 } 3454 3455 int 3456 ath12k_wmi_send_twt_enable_cmd(struct ath12k *ar, u32 pdev_id) 3457 { 3458 struct ath12k_wmi_pdev *wmi = ar->wmi; 3459 struct ath12k_base *ab = wmi->wmi_ab->ab; 3460 struct wmi_twt_enable_params_cmd *cmd; 3461 struct sk_buff *skb; 3462 int ret, len; 3463 3464 len = sizeof(*cmd); 3465 3466 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 3467 if (!skb) 3468 return -ENOMEM; 3469 3470 cmd = (struct wmi_twt_enable_params_cmd *)skb->data; 3471 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_ENABLE_CMD, 3472 len); 3473 cmd->pdev_id = cpu_to_le32(pdev_id); 3474 cmd->sta_cong_timer_ms = cpu_to_le32(ATH12K_TWT_DEF_STA_CONG_TIMER_MS); 3475 cmd->default_slot_size = cpu_to_le32(ATH12K_TWT_DEF_DEFAULT_SLOT_SIZE); 3476 cmd->congestion_thresh_setup = 3477 cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_SETUP); 3478 cmd->congestion_thresh_teardown = 3479 cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_TEARDOWN); 3480 cmd->congestion_thresh_critical = 3481 cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_CRITICAL); 3482 cmd->interference_thresh_teardown = 3483 cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN); 3484 cmd->interference_thresh_setup = 3485 cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_SETUP); 3486 cmd->min_no_sta_setup = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_SETUP); 3487 cmd->min_no_sta_teardown = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_TEARDOWN); 3488 cmd->no_of_bcast_mcast_slots = 3489 cpu_to_le32(ATH12K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS); 3490 cmd->min_no_twt_slots = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_TWT_SLOTS); 3491 cmd->max_no_sta_twt = cpu_to_le32(ATH12K_TWT_DEF_MAX_NO_STA_TWT); 3492 cmd->mode_check_interval = cpu_to_le32(ATH12K_TWT_DEF_MODE_CHECK_INTERVAL); 3493 cmd->add_sta_slot_interval = cpu_to_le32(ATH12K_TWT_DEF_ADD_STA_SLOT_INTERVAL); 3494 cmd->remove_sta_slot_interval = 3495 cpu_to_le32(ATH12K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL); 3496 /* TODO add MBSSID support */ 3497 cmd->mbss_support = 0; 3498 3499 ret = ath12k_wmi_cmd_send(wmi, skb, 3500 WMI_TWT_ENABLE_CMDID); 3501 if (ret) { 3502 ath12k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID"); 3503 dev_kfree_skb(skb); 3504 } 3505 return ret; 3506 } 3507 3508 int 3509 ath12k_wmi_send_twt_disable_cmd(struct ath12k *ar, u32 pdev_id) 3510 { 3511 struct ath12k_wmi_pdev *wmi = ar->wmi; 3512 struct ath12k_base *ab = wmi->wmi_ab->ab; 3513 struct wmi_twt_disable_params_cmd *cmd; 3514 struct sk_buff *skb; 3515 int ret, len; 3516 3517 len = sizeof(*cmd); 3518 3519 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 3520 if (!skb) 3521 return -ENOMEM; 3522 3523 cmd = (struct wmi_twt_disable_params_cmd *)skb->data; 3524 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_DISABLE_CMD, 3525 len); 3526 cmd->pdev_id = cpu_to_le32(pdev_id); 3527 3528 ret = ath12k_wmi_cmd_send(wmi, skb, 3529 WMI_TWT_DISABLE_CMDID); 3530 if (ret) { 3531 ath12k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID"); 3532 dev_kfree_skb(skb); 3533 } 3534 return ret; 3535 } 3536 3537 int 3538 ath12k_wmi_send_obss_spr_cmd(struct ath12k *ar, u32 vdev_id, 3539 struct ieee80211_he_obss_pd *he_obss_pd) 3540 { 3541 struct ath12k_wmi_pdev *wmi = ar->wmi; 3542 struct ath12k_base *ab = wmi->wmi_ab->ab; 3543 struct wmi_obss_spatial_reuse_params_cmd *cmd; 3544 struct sk_buff *skb; 3545 int ret, len; 3546 3547 len = sizeof(*cmd); 3548 3549 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 3550 if (!skb) 3551 return -ENOMEM; 3552 3553 cmd = (struct wmi_obss_spatial_reuse_params_cmd *)skb->data; 3554 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD, 3555 len); 3556 cmd->vdev_id = cpu_to_le32(vdev_id); 3557 cmd->enable = cpu_to_le32(he_obss_pd->enable); 3558 cmd->obss_min = a_cpu_to_sle32(he_obss_pd->min_offset); 3559 cmd->obss_max = a_cpu_to_sle32(he_obss_pd->max_offset); 3560 3561 ret = ath12k_wmi_cmd_send(wmi, skb, 3562 WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID); 3563 if (ret) { 3564 ath12k_warn(ab, 3565 "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID"); 3566 dev_kfree_skb(skb); 3567 } 3568 return ret; 3569 } 3570 3571 u32 ath12k_wmi_build_obss_pd(const struct ath12k_wmi_obss_pd_arg *arg) 3572 { 3573 u32 param_val = 0; 3574 3575 param_val |= u32_encode_bits((u8)arg->srg_th, GENMASK(15, 8)); 3576 param_val |= u32_encode_bits((u8)arg->non_srg_th, GENMASK(7, 0)); 3577 3578 if (arg->srp_support) 3579 param_val |= ATH12K_OBSS_PD_THRESHOLD_IN_DBM; 3580 3581 if (arg->srg_enabled && arg->srp_support) 3582 param_val |= ATH12K_OBSS_PD_SRG_EN; 3583 3584 if (arg->non_srg_enabled) 3585 param_val |= ATH12K_OBSS_PD_NON_SRG_EN; 3586 3587 return param_val; 3588 } 3589 3590 static int ath12k_wmi_pdev_set_obss_bitmap(struct ath12k *ar, 3591 const struct wmi_pdev_set_obss_bitmap_arg *arg) 3592 { 3593 struct wmi_pdev_obss_pd_bitmap_cmd *cmd; 3594 struct ath12k_wmi_pdev *wmi = ar->wmi; 3595 const int len = sizeof(*cmd); 3596 struct sk_buff *skb; 3597 int ret; 3598 3599 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 3600 if (!skb) 3601 return -ENOMEM; 3602 3603 cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data; 3604 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(arg->tlv_tag, len); 3605 cmd->pdev_id = cpu_to_le32(arg->pdev_id); 3606 memcpy(cmd->bitmap, arg->bitmap, sizeof(cmd->bitmap)); 3607 3608 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3609 "wmi set pdev %u %s %08x %08x\n", 3610 arg->pdev_id, arg->label, arg->bitmap[0], arg->bitmap[1]); 3611 3612 ret = ath12k_wmi_cmd_send(wmi, skb, arg->cmd_id); 3613 if (ret) { 3614 ath12k_warn(ar->ab, "failed to send %s: %d\n", arg->label, ret); 3615 dev_kfree_skb(skb); 3616 } 3617 3618 return ret; 3619 } 3620 3621 int ath12k_wmi_pdev_set_srg_bss_color_bitmap(struct ath12k *ar, 3622 u32 pdev_id, const u32 *bitmap) 3623 { 3624 struct wmi_pdev_set_obss_bitmap_arg arg = { 3625 .tlv_tag = WMI_TAG_PDEV_SRG_BSS_COLOR_BITMAP_CMD, 3626 .pdev_id = pdev_id, 3627 .cmd_id = WMI_PDEV_SET_SRG_BSS_COLOR_BITMAP_CMDID, 3628 .bitmap = bitmap, 3629 .label = "SRG bss color bitmap", 3630 }; 3631 3632 return ath12k_wmi_pdev_set_obss_bitmap(ar, &arg); 3633 } 3634 3635 int ath12k_wmi_pdev_set_srg_partial_bssid_bitmap(struct ath12k *ar, 3636 u32 pdev_id, const u32 *bitmap) 3637 { 3638 struct wmi_pdev_set_obss_bitmap_arg arg = { 3639 .tlv_tag = WMI_TAG_PDEV_SRG_PARTIAL_BSSID_BITMAP_CMD, 3640 .pdev_id = pdev_id, 3641 .cmd_id = WMI_PDEV_SET_SRG_PARTIAL_BSSID_BITMAP_CMDID, 3642 .bitmap = bitmap, 3643 .label = "SRG partial bssid bitmap", 3644 }; 3645 3646 return ath12k_wmi_pdev_set_obss_bitmap(ar, &arg); 3647 } 3648 3649 int ath12k_wmi_pdev_srg_obss_color_enable_bitmap(struct ath12k *ar, 3650 u32 pdev_id, const u32 *bitmap) 3651 { 3652 struct wmi_pdev_set_obss_bitmap_arg arg = { 3653 .tlv_tag = WMI_TAG_PDEV_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD, 3654 .pdev_id = pdev_id, 3655 .cmd_id = WMI_PDEV_SET_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID, 3656 .bitmap = bitmap, 3657 .label = "SRG obss color enable bitmap", 3658 }; 3659 3660 return ath12k_wmi_pdev_set_obss_bitmap(ar, &arg); 3661 } 3662 3663 int ath12k_wmi_pdev_srg_obss_bssid_enable_bitmap(struct ath12k *ar, 3664 u32 pdev_id, const u32 *bitmap) 3665 { 3666 struct wmi_pdev_set_obss_bitmap_arg arg = { 3667 .tlv_tag = WMI_TAG_PDEV_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD, 3668 .pdev_id = pdev_id, 3669 .cmd_id = WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID, 3670 .bitmap = bitmap, 3671 .label = "SRG obss bssid enable bitmap", 3672 }; 3673 3674 return ath12k_wmi_pdev_set_obss_bitmap(ar, &arg); 3675 } 3676 3677 int ath12k_wmi_pdev_non_srg_obss_color_enable_bitmap(struct ath12k *ar, 3678 u32 pdev_id, const u32 *bitmap) 3679 { 3680 struct wmi_pdev_set_obss_bitmap_arg arg = { 3681 .tlv_tag = WMI_TAG_PDEV_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD, 3682 .pdev_id = pdev_id, 3683 .cmd_id = WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID, 3684 .bitmap = bitmap, 3685 .label = "non SRG obss color enable bitmap", 3686 }; 3687 3688 return ath12k_wmi_pdev_set_obss_bitmap(ar, &arg); 3689 } 3690 3691 int ath12k_wmi_pdev_non_srg_obss_bssid_enable_bitmap(struct ath12k *ar, 3692 u32 pdev_id, const u32 *bitmap) 3693 { 3694 struct wmi_pdev_set_obss_bitmap_arg arg = { 3695 .tlv_tag = WMI_TAG_PDEV_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD, 3696 .pdev_id = pdev_id, 3697 .cmd_id = WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID, 3698 .bitmap = bitmap, 3699 .label = "non SRG obss bssid enable bitmap", 3700 }; 3701 3702 return ath12k_wmi_pdev_set_obss_bitmap(ar, &arg); 3703 } 3704 3705 int ath12k_wmi_obss_color_cfg_cmd(struct ath12k *ar, u32 vdev_id, 3706 u8 bss_color, u32 period, 3707 bool enable) 3708 { 3709 struct ath12k_wmi_pdev *wmi = ar->wmi; 3710 struct ath12k_base *ab = wmi->wmi_ab->ab; 3711 struct wmi_obss_color_collision_cfg_params_cmd *cmd; 3712 struct sk_buff *skb; 3713 int ret, len; 3714 3715 len = sizeof(*cmd); 3716 3717 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 3718 if (!skb) 3719 return -ENOMEM; 3720 3721 cmd = (struct wmi_obss_color_collision_cfg_params_cmd *)skb->data; 3722 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG, 3723 len); 3724 cmd->vdev_id = cpu_to_le32(vdev_id); 3725 cmd->evt_type = enable ? cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION) : 3726 cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION_DISABLE); 3727 cmd->current_bss_color = cpu_to_le32(bss_color); 3728 cmd->detection_period_ms = cpu_to_le32(period); 3729 cmd->scan_period_ms = cpu_to_le32(ATH12K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS); 3730 cmd->free_slot_expiry_time_ms = 0; 3731 cmd->flags = 0; 3732 3733 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3734 "wmi_send_obss_color_collision_cfg id %d type %d bss_color %d detect_period %d scan_period %d\n", 3735 cmd->vdev_id, cmd->evt_type, cmd->current_bss_color, 3736 cmd->detection_period_ms, cmd->scan_period_ms); 3737 3738 ret = ath12k_wmi_cmd_send(wmi, skb, 3739 WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID); 3740 if (ret) { 3741 ath12k_warn(ab, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID"); 3742 dev_kfree_skb(skb); 3743 } 3744 return ret; 3745 } 3746 3747 int ath12k_wmi_send_bss_color_change_enable_cmd(struct ath12k *ar, u32 vdev_id, 3748 bool enable) 3749 { 3750 struct ath12k_wmi_pdev *wmi = ar->wmi; 3751 struct ath12k_base *ab = wmi->wmi_ab->ab; 3752 struct wmi_bss_color_change_enable_params_cmd *cmd; 3753 struct sk_buff *skb; 3754 int ret, len; 3755 3756 len = sizeof(*cmd); 3757 3758 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 3759 if (!skb) 3760 return -ENOMEM; 3761 3762 cmd = (struct wmi_bss_color_change_enable_params_cmd *)skb->data; 3763 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BSS_COLOR_CHANGE_ENABLE, 3764 len); 3765 cmd->vdev_id = cpu_to_le32(vdev_id); 3766 cmd->enable = enable ? cpu_to_le32(1) : 0; 3767 3768 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3769 "wmi_send_bss_color_change_enable id %d enable %d\n", 3770 cmd->vdev_id, cmd->enable); 3771 3772 ret = ath12k_wmi_cmd_send(wmi, skb, 3773 WMI_BSS_COLOR_CHANGE_ENABLE_CMDID); 3774 if (ret) { 3775 ath12k_warn(ab, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID"); 3776 dev_kfree_skb(skb); 3777 } 3778 return ret; 3779 } 3780 3781 int ath12k_wmi_fils_discovery_tmpl(struct ath12k *ar, u32 vdev_id, 3782 struct sk_buff *tmpl) 3783 { 3784 struct wmi_tlv *tlv; 3785 struct sk_buff *skb; 3786 void *ptr; 3787 int ret, len; 3788 size_t aligned_len; 3789 struct wmi_fils_discovery_tmpl_cmd *cmd; 3790 3791 aligned_len = roundup(tmpl->len, 4); 3792 len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len; 3793 3794 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3795 "WMI vdev %i set FILS discovery template\n", vdev_id); 3796 3797 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 3798 if (!skb) 3799 return -ENOMEM; 3800 3801 cmd = (struct wmi_fils_discovery_tmpl_cmd *)skb->data; 3802 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FILS_DISCOVERY_TMPL_CMD, 3803 sizeof(*cmd)); 3804 cmd->vdev_id = cpu_to_le32(vdev_id); 3805 cmd->buf_len = cpu_to_le32(tmpl->len); 3806 ptr = skb->data + sizeof(*cmd); 3807 3808 tlv = ptr; 3809 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len); 3810 memcpy(tlv->value, tmpl->data, tmpl->len); 3811 3812 ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_FILS_DISCOVERY_TMPL_CMDID); 3813 if (ret) { 3814 ath12k_warn(ar->ab, 3815 "WMI vdev %i failed to send FILS discovery template command\n", 3816 vdev_id); 3817 dev_kfree_skb(skb); 3818 } 3819 return ret; 3820 } 3821 3822 int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id, 3823 struct sk_buff *tmpl) 3824 { 3825 struct wmi_probe_tmpl_cmd *cmd; 3826 struct ath12k_wmi_bcn_prb_info_params *probe_info; 3827 struct wmi_tlv *tlv; 3828 struct sk_buff *skb; 3829 void *ptr; 3830 int ret, len; 3831 size_t aligned_len = roundup(tmpl->len, 4); 3832 3833 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3834 "WMI vdev %i set probe response template\n", vdev_id); 3835 3836 len = sizeof(*cmd) + sizeof(*probe_info) + TLV_HDR_SIZE + aligned_len; 3837 3838 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 3839 if (!skb) 3840 return -ENOMEM; 3841 3842 cmd = (struct wmi_probe_tmpl_cmd *)skb->data; 3843 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PRB_TMPL_CMD, 3844 sizeof(*cmd)); 3845 cmd->vdev_id = cpu_to_le32(vdev_id); 3846 cmd->buf_len = cpu_to_le32(tmpl->len); 3847 3848 ptr = skb->data + sizeof(*cmd); 3849 3850 probe_info = ptr; 3851 len = sizeof(*probe_info); 3852 probe_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO, 3853 len); 3854 probe_info->caps = 0; 3855 probe_info->erp = 0; 3856 3857 ptr += sizeof(*probe_info); 3858 3859 tlv = ptr; 3860 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len); 3861 memcpy(tlv->value, tmpl->data, tmpl->len); 3862 3863 ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_PRB_TMPL_CMDID); 3864 if (ret) { 3865 ath12k_warn(ar->ab, 3866 "WMI vdev %i failed to send probe response template command\n", 3867 vdev_id); 3868 dev_kfree_skb(skb); 3869 } 3870 return ret; 3871 } 3872 3873 int ath12k_wmi_fils_discovery(struct ath12k *ar, u32 vdev_id, u32 interval, 3874 bool unsol_bcast_probe_resp_enabled) 3875 { 3876 struct sk_buff *skb; 3877 int ret, len; 3878 struct wmi_fils_discovery_cmd *cmd; 3879 3880 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3881 "WMI vdev %i set %s interval to %u TU\n", 3882 vdev_id, unsol_bcast_probe_resp_enabled ? 3883 "unsolicited broadcast probe response" : "FILS discovery", 3884 interval); 3885 3886 len = sizeof(*cmd); 3887 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 3888 if (!skb) 3889 return -ENOMEM; 3890 3891 cmd = (struct wmi_fils_discovery_cmd *)skb->data; 3892 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ENABLE_FILS_CMD, 3893 len); 3894 cmd->vdev_id = cpu_to_le32(vdev_id); 3895 cmd->interval = cpu_to_le32(interval); 3896 cmd->config = cpu_to_le32(unsol_bcast_probe_resp_enabled); 3897 3898 ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_ENABLE_FILS_CMDID); 3899 if (ret) { 3900 ath12k_warn(ar->ab, 3901 "WMI vdev %i failed to send FILS discovery enable/disable command\n", 3902 vdev_id); 3903 dev_kfree_skb(skb); 3904 } 3905 return ret; 3906 } 3907 3908 static void 3909 ath12k_wmi_obss_color_collision_event(struct ath12k_base *ab, struct sk_buff *skb) 3910 { 3911 const struct wmi_obss_color_collision_event *ev; 3912 struct ath12k_link_vif *arvif; 3913 u32 vdev_id, evt_type; 3914 u64 bitmap; 3915 3916 const void **tb __free(kfree) = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 3917 if (IS_ERR(tb)) { 3918 ath12k_warn(ab, "failed to parse OBSS color collision tlv %ld\n", 3919 PTR_ERR(tb)); 3920 return; 3921 } 3922 3923 ev = tb[WMI_TAG_OBSS_COLOR_COLLISION_EVT]; 3924 if (!ev) { 3925 ath12k_warn(ab, "failed to fetch OBSS color collision event\n"); 3926 return; 3927 } 3928 3929 vdev_id = le32_to_cpu(ev->vdev_id); 3930 evt_type = le32_to_cpu(ev->evt_type); 3931 bitmap = le64_to_cpu(ev->obss_color_bitmap); 3932 3933 guard(rcu)(); 3934 3935 arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_id); 3936 if (!arvif) { 3937 ath12k_warn(ab, "no arvif found for vdev %u in OBSS color collision event\n", 3938 vdev_id); 3939 return; 3940 } 3941 3942 switch (evt_type) { 3943 case WMI_BSS_COLOR_COLLISION_DETECTION: 3944 ieee80211_obss_color_collision_notify(arvif->ahvif->vif, 3945 bitmap, 3946 arvif->link_id); 3947 ath12k_dbg(ab, ATH12K_DBG_WMI, 3948 "obss color collision detected vdev %u event %d bitmap %016llx\n", 3949 vdev_id, evt_type, bitmap); 3950 break; 3951 case WMI_BSS_COLOR_COLLISION_DISABLE: 3952 case WMI_BSS_COLOR_FREE_SLOT_TIMER_EXPIRY: 3953 case WMI_BSS_COLOR_FREE_SLOT_AVAILABLE: 3954 break; 3955 default: 3956 ath12k_warn(ab, "unknown OBSS color collision event type %d\n", evt_type); 3957 } 3958 } 3959 3960 static void 3961 ath12k_fill_band_to_mac_param(struct ath12k_base *soc, 3962 struct ath12k_wmi_pdev_band_arg *arg) 3963 { 3964 u8 i; 3965 struct ath12k_wmi_hal_reg_capabilities_ext_arg *hal_reg_cap; 3966 struct ath12k_pdev *pdev; 3967 3968 for (i = 0; i < soc->num_radios; i++) { 3969 pdev = &soc->pdevs[i]; 3970 hal_reg_cap = &soc->hal_reg_cap[i]; 3971 arg[i].pdev_id = pdev->pdev_id; 3972 3973 switch (pdev->cap.supported_bands) { 3974 case WMI_HOST_WLAN_2GHZ_5GHZ_CAP: 3975 arg[i].start_freq = hal_reg_cap->low_2ghz_chan; 3976 arg[i].end_freq = hal_reg_cap->high_5ghz_chan; 3977 break; 3978 case WMI_HOST_WLAN_2GHZ_CAP: 3979 arg[i].start_freq = hal_reg_cap->low_2ghz_chan; 3980 arg[i].end_freq = hal_reg_cap->high_2ghz_chan; 3981 break; 3982 case WMI_HOST_WLAN_5GHZ_CAP: 3983 arg[i].start_freq = hal_reg_cap->low_5ghz_chan; 3984 arg[i].end_freq = hal_reg_cap->high_5ghz_chan; 3985 break; 3986 default: 3987 break; 3988 } 3989 } 3990 } 3991 3992 static void 3993 ath12k_wmi_copy_resource_config(struct ath12k_base *ab, 3994 struct ath12k_wmi_resource_config_params *wmi_cfg, 3995 struct ath12k_wmi_resource_config_arg *tg_cfg) 3996 { 3997 wmi_cfg->num_vdevs = cpu_to_le32(tg_cfg->num_vdevs); 3998 wmi_cfg->num_peers = cpu_to_le32(tg_cfg->num_peers); 3999 wmi_cfg->num_offload_peers = cpu_to_le32(tg_cfg->num_offload_peers); 4000 wmi_cfg->num_offload_reorder_buffs = 4001 cpu_to_le32(tg_cfg->num_offload_reorder_buffs); 4002 wmi_cfg->num_peer_keys = cpu_to_le32(tg_cfg->num_peer_keys); 4003 wmi_cfg->num_tids = cpu_to_le32(tg_cfg->num_tids); 4004 wmi_cfg->ast_skid_limit = cpu_to_le32(tg_cfg->ast_skid_limit); 4005 wmi_cfg->tx_chain_mask = cpu_to_le32(tg_cfg->tx_chain_mask); 4006 wmi_cfg->rx_chain_mask = cpu_to_le32(tg_cfg->rx_chain_mask); 4007 wmi_cfg->rx_timeout_pri[0] = cpu_to_le32(tg_cfg->rx_timeout_pri[0]); 4008 wmi_cfg->rx_timeout_pri[1] = cpu_to_le32(tg_cfg->rx_timeout_pri[1]); 4009 wmi_cfg->rx_timeout_pri[2] = cpu_to_le32(tg_cfg->rx_timeout_pri[2]); 4010 wmi_cfg->rx_timeout_pri[3] = cpu_to_le32(tg_cfg->rx_timeout_pri[3]); 4011 wmi_cfg->rx_decap_mode = cpu_to_le32(tg_cfg->rx_decap_mode); 4012 wmi_cfg->scan_max_pending_req = cpu_to_le32(tg_cfg->scan_max_pending_req); 4013 wmi_cfg->bmiss_offload_max_vdev = cpu_to_le32(tg_cfg->bmiss_offload_max_vdev); 4014 wmi_cfg->roam_offload_max_vdev = cpu_to_le32(tg_cfg->roam_offload_max_vdev); 4015 wmi_cfg->roam_offload_max_ap_profiles = 4016 cpu_to_le32(tg_cfg->roam_offload_max_ap_profiles); 4017 wmi_cfg->num_mcast_groups = cpu_to_le32(tg_cfg->num_mcast_groups); 4018 wmi_cfg->num_mcast_table_elems = cpu_to_le32(tg_cfg->num_mcast_table_elems); 4019 wmi_cfg->mcast2ucast_mode = cpu_to_le32(tg_cfg->mcast2ucast_mode); 4020 wmi_cfg->tx_dbg_log_size = cpu_to_le32(tg_cfg->tx_dbg_log_size); 4021 wmi_cfg->num_wds_entries = cpu_to_le32(tg_cfg->num_wds_entries); 4022 wmi_cfg->dma_burst_size = cpu_to_le32(tg_cfg->dma_burst_size); 4023 wmi_cfg->mac_aggr_delim = cpu_to_le32(tg_cfg->mac_aggr_delim); 4024 wmi_cfg->rx_skip_defrag_timeout_dup_detection_check = 4025 cpu_to_le32(tg_cfg->rx_skip_defrag_timeout_dup_detection_check); 4026 wmi_cfg->vow_config = cpu_to_le32(tg_cfg->vow_config); 4027 wmi_cfg->gtk_offload_max_vdev = cpu_to_le32(tg_cfg->gtk_offload_max_vdev); 4028 wmi_cfg->num_msdu_desc = cpu_to_le32(tg_cfg->num_msdu_desc); 4029 wmi_cfg->max_frag_entries = cpu_to_le32(tg_cfg->max_frag_entries); 4030 wmi_cfg->num_tdls_vdevs = cpu_to_le32(tg_cfg->num_tdls_vdevs); 4031 wmi_cfg->num_tdls_conn_table_entries = 4032 cpu_to_le32(tg_cfg->num_tdls_conn_table_entries); 4033 wmi_cfg->beacon_tx_offload_max_vdev = 4034 cpu_to_le32(tg_cfg->beacon_tx_offload_max_vdev); 4035 wmi_cfg->num_multicast_filter_entries = 4036 cpu_to_le32(tg_cfg->num_multicast_filter_entries); 4037 wmi_cfg->num_wow_filters = cpu_to_le32(tg_cfg->num_wow_filters); 4038 wmi_cfg->num_keep_alive_pattern = cpu_to_le32(tg_cfg->num_keep_alive_pattern); 4039 wmi_cfg->keep_alive_pattern_size = cpu_to_le32(tg_cfg->keep_alive_pattern_size); 4040 wmi_cfg->max_tdls_concurrent_sleep_sta = 4041 cpu_to_le32(tg_cfg->max_tdls_concurrent_sleep_sta); 4042 wmi_cfg->max_tdls_concurrent_buffer_sta = 4043 cpu_to_le32(tg_cfg->max_tdls_concurrent_buffer_sta); 4044 wmi_cfg->wmi_send_separate = cpu_to_le32(tg_cfg->wmi_send_separate); 4045 wmi_cfg->num_ocb_vdevs = cpu_to_le32(tg_cfg->num_ocb_vdevs); 4046 wmi_cfg->num_ocb_channels = cpu_to_le32(tg_cfg->num_ocb_channels); 4047 wmi_cfg->num_ocb_schedules = cpu_to_le32(tg_cfg->num_ocb_schedules); 4048 wmi_cfg->bpf_instruction_size = cpu_to_le32(tg_cfg->bpf_instruction_size); 4049 wmi_cfg->max_bssid_rx_filters = cpu_to_le32(tg_cfg->max_bssid_rx_filters); 4050 wmi_cfg->use_pdev_id = cpu_to_le32(tg_cfg->use_pdev_id); 4051 wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config | 4052 WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64 | 4053 WMI_RSRC_CFG_FLAG1_ACK_RSSI); 4054 wmi_cfg->peer_map_unmap_version = cpu_to_le32(tg_cfg->peer_map_unmap_version); 4055 wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params); 4056 wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count); 4057 wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count); 4058 wmi_cfg->flags2 = le32_encode_bits(tg_cfg->peer_metadata_ver, 4059 WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION); 4060 wmi_cfg->host_service_flags = cpu_to_le32(tg_cfg->is_reg_cc_ext_event_supported << 4061 WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT); 4062 if (ab->hw_params->reoq_lut_support) 4063 wmi_cfg->host_service_flags |= 4064 cpu_to_le32(1 << WMI_RSRC_CFG_HOST_SVC_FLAG_REO_QREF_SUPPORT_BIT); 4065 wmi_cfg->ema_max_vap_cnt = cpu_to_le32(tg_cfg->ema_max_vap_cnt); 4066 wmi_cfg->ema_max_profile_period = cpu_to_le32(tg_cfg->ema_max_profile_period); 4067 wmi_cfg->flags2 |= cpu_to_le32(WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET); 4068 } 4069 4070 static int ath12k_init_cmd_send(struct ath12k_wmi_pdev *wmi, 4071 struct ath12k_wmi_init_cmd_arg *arg) 4072 { 4073 struct ath12k_base *ab = wmi->wmi_ab->ab; 4074 struct sk_buff *skb; 4075 struct wmi_init_cmd *cmd; 4076 struct ath12k_wmi_resource_config_params *cfg; 4077 struct ath12k_wmi_pdev_set_hw_mode_cmd *hw_mode; 4078 struct ath12k_wmi_pdev_band_to_mac_params *band_to_mac; 4079 struct ath12k_wmi_host_mem_chunk_params *host_mem_chunks; 4080 struct wmi_tlv *tlv; 4081 size_t ret, len; 4082 void *ptr; 4083 u32 hw_mode_len = 0; 4084 u16 idx; 4085 4086 if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX) 4087 hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE + 4088 (arg->num_band_to_mac * sizeof(*band_to_mac)); 4089 4090 len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len + 4091 (arg->num_mem_chunks ? (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0); 4092 4093 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 4094 if (!skb) 4095 return -ENOMEM; 4096 4097 cmd = (struct wmi_init_cmd *)skb->data; 4098 4099 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_INIT_CMD, 4100 sizeof(*cmd)); 4101 4102 ptr = skb->data + sizeof(*cmd); 4103 cfg = ptr; 4104 4105 ath12k_wmi_copy_resource_config(ab, cfg, &arg->res_cfg); 4106 4107 cfg->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_RESOURCE_CONFIG, 4108 sizeof(*cfg)); 4109 4110 ptr += sizeof(*cfg); 4111 host_mem_chunks = ptr + TLV_HDR_SIZE; 4112 len = sizeof(struct ath12k_wmi_host_mem_chunk_params); 4113 4114 for (idx = 0; idx < arg->num_mem_chunks; ++idx) { 4115 host_mem_chunks[idx].tlv_header = 4116 ath12k_wmi_tlv_hdr(WMI_TAG_WLAN_HOST_MEMORY_CHUNK, 4117 len); 4118 4119 host_mem_chunks[idx].ptr = cpu_to_le32(arg->mem_chunks[idx].paddr); 4120 host_mem_chunks[idx].size = cpu_to_le32(arg->mem_chunks[idx].len); 4121 host_mem_chunks[idx].req_id = cpu_to_le32(arg->mem_chunks[idx].req_id); 4122 4123 ath12k_dbg(ab, ATH12K_DBG_WMI, 4124 "WMI host mem chunk req_id %d paddr 0x%llx len %d\n", 4125 arg->mem_chunks[idx].req_id, 4126 (u64)arg->mem_chunks[idx].paddr, 4127 arg->mem_chunks[idx].len); 4128 } 4129 cmd->num_host_mem_chunks = cpu_to_le32(arg->num_mem_chunks); 4130 len = sizeof(struct ath12k_wmi_host_mem_chunk_params) * arg->num_mem_chunks; 4131 4132 /* num_mem_chunks is zero */ 4133 tlv = ptr; 4134 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); 4135 ptr += TLV_HDR_SIZE + len; 4136 4137 if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX) { 4138 hw_mode = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)ptr; 4139 hw_mode->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD, 4140 sizeof(*hw_mode)); 4141 4142 hw_mode->hw_mode_index = cpu_to_le32(arg->hw_mode_id); 4143 hw_mode->num_band_to_mac = cpu_to_le32(arg->num_band_to_mac); 4144 4145 ptr += sizeof(*hw_mode); 4146 4147 len = arg->num_band_to_mac * sizeof(*band_to_mac); 4148 tlv = ptr; 4149 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); 4150 4151 ptr += TLV_HDR_SIZE; 4152 len = sizeof(*band_to_mac); 4153 4154 for (idx = 0; idx < arg->num_band_to_mac; idx++) { 4155 band_to_mac = (void *)ptr; 4156 4157 band_to_mac->tlv_header = 4158 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BAND_TO_MAC, 4159 len); 4160 band_to_mac->pdev_id = cpu_to_le32(arg->band_to_mac[idx].pdev_id); 4161 band_to_mac->start_freq = 4162 cpu_to_le32(arg->band_to_mac[idx].start_freq); 4163 band_to_mac->end_freq = 4164 cpu_to_le32(arg->band_to_mac[idx].end_freq); 4165 ptr += sizeof(*band_to_mac); 4166 } 4167 } 4168 4169 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_INIT_CMDID); 4170 if (ret) { 4171 ath12k_warn(ab, "failed to send WMI_INIT_CMDID\n"); 4172 dev_kfree_skb(skb); 4173 } 4174 4175 return ret; 4176 } 4177 4178 int ath12k_wmi_pdev_lro_cfg(struct ath12k *ar, 4179 int pdev_id) 4180 { 4181 struct ath12k_wmi_pdev_lro_config_cmd *cmd; 4182 struct sk_buff *skb; 4183 int ret; 4184 4185 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); 4186 if (!skb) 4187 return -ENOMEM; 4188 4189 cmd = (struct ath12k_wmi_pdev_lro_config_cmd *)skb->data; 4190 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_LRO_INFO_CMD, 4191 sizeof(*cmd)); 4192 4193 get_random_bytes(cmd->th_4, sizeof(cmd->th_4)); 4194 get_random_bytes(cmd->th_6, sizeof(cmd->th_6)); 4195 4196 cmd->pdev_id = cpu_to_le32(pdev_id); 4197 4198 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 4199 "WMI lro cfg cmd pdev_id 0x%x\n", pdev_id); 4200 4201 ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_LRO_CONFIG_CMDID); 4202 if (ret) { 4203 ath12k_warn(ar->ab, 4204 "failed to send lro cfg req wmi cmd\n"); 4205 goto err; 4206 } 4207 4208 return 0; 4209 err: 4210 dev_kfree_skb(skb); 4211 return ret; 4212 } 4213 4214 int ath12k_wmi_wait_for_service_ready(struct ath12k_base *ab) 4215 { 4216 unsigned long time_left; 4217 4218 time_left = wait_for_completion_timeout(&ab->wmi_ab.service_ready, 4219 WMI_SERVICE_READY_TIMEOUT_HZ); 4220 if (!time_left) 4221 return -ETIMEDOUT; 4222 4223 return 0; 4224 } 4225 4226 int ath12k_wmi_wait_for_unified_ready(struct ath12k_base *ab) 4227 { 4228 unsigned long time_left; 4229 4230 time_left = wait_for_completion_timeout(&ab->wmi_ab.unified_ready, 4231 WMI_SERVICE_READY_TIMEOUT_HZ); 4232 if (!time_left) 4233 return -ETIMEDOUT; 4234 4235 return 0; 4236 } 4237 4238 int ath12k_wmi_set_hw_mode(struct ath12k_base *ab, 4239 enum wmi_host_hw_mode_config_type mode) 4240 { 4241 struct ath12k_wmi_pdev_set_hw_mode_cmd *cmd; 4242 struct sk_buff *skb; 4243 struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; 4244 int len; 4245 int ret; 4246 4247 len = sizeof(*cmd); 4248 4249 skb = ath12k_wmi_alloc_skb(wmi_ab, len); 4250 if (!skb) 4251 return -ENOMEM; 4252 4253 cmd = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)skb->data; 4254 4255 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD, 4256 sizeof(*cmd)); 4257 4258 cmd->pdev_id = WMI_PDEV_ID_SOC; 4259 cmd->hw_mode_index = cpu_to_le32(mode); 4260 4261 ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_PDEV_SET_HW_MODE_CMDID); 4262 if (ret) { 4263 ath12k_warn(ab, "failed to send WMI_PDEV_SET_HW_MODE_CMDID\n"); 4264 dev_kfree_skb(skb); 4265 } 4266 4267 return ret; 4268 } 4269 4270 int ath12k_wmi_cmd_init(struct ath12k_base *ab) 4271 { 4272 struct ath12k_dp *dp = ath12k_ab_to_dp(ab); 4273 struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; 4274 struct ath12k_wmi_init_cmd_arg arg = {}; 4275 4276 if (test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT, 4277 ab->wmi_ab.svc_map)) 4278 arg.res_cfg.is_reg_cc_ext_event_supported = true; 4279 4280 ab->hw_params->wmi_init(ab, &arg.res_cfg); 4281 ab->wow.wmi_conf_rx_decap_mode = arg.res_cfg.rx_decap_mode; 4282 4283 arg.num_mem_chunks = wmi_ab->num_mem_chunks; 4284 arg.hw_mode_id = wmi_ab->preferred_hw_mode; 4285 arg.mem_chunks = wmi_ab->mem_chunks; 4286 4287 if (ab->hw_params->single_pdev_only) 4288 arg.hw_mode_id = WMI_HOST_HW_MODE_MAX; 4289 4290 arg.num_band_to_mac = ab->num_radios; 4291 ath12k_fill_band_to_mac_param(ab, arg.band_to_mac); 4292 4293 dp->peer_metadata_ver = arg.res_cfg.peer_metadata_ver; 4294 4295 return ath12k_init_cmd_send(&wmi_ab->wmi[0], &arg); 4296 } 4297 4298 int ath12k_wmi_vdev_spectral_conf(struct ath12k *ar, 4299 struct ath12k_wmi_vdev_spectral_conf_arg *arg) 4300 { 4301 struct ath12k_wmi_vdev_spectral_conf_cmd *cmd; 4302 struct sk_buff *skb; 4303 int ret; 4304 4305 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); 4306 if (!skb) 4307 return -ENOMEM; 4308 4309 cmd = (struct ath12k_wmi_vdev_spectral_conf_cmd *)skb->data; 4310 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD, 4311 sizeof(*cmd)); 4312 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 4313 cmd->scan_count = cpu_to_le32(arg->scan_count); 4314 cmd->scan_period = cpu_to_le32(arg->scan_period); 4315 cmd->scan_priority = cpu_to_le32(arg->scan_priority); 4316 cmd->scan_fft_size = cpu_to_le32(arg->scan_fft_size); 4317 cmd->scan_gc_ena = cpu_to_le32(arg->scan_gc_ena); 4318 cmd->scan_restart_ena = cpu_to_le32(arg->scan_restart_ena); 4319 cmd->scan_noise_floor_ref = cpu_to_le32(arg->scan_noise_floor_ref); 4320 cmd->scan_init_delay = cpu_to_le32(arg->scan_init_delay); 4321 cmd->scan_nb_tone_thr = cpu_to_le32(arg->scan_nb_tone_thr); 4322 cmd->scan_str_bin_thr = cpu_to_le32(arg->scan_str_bin_thr); 4323 cmd->scan_wb_rpt_mode = cpu_to_le32(arg->scan_wb_rpt_mode); 4324 cmd->scan_rssi_rpt_mode = cpu_to_le32(arg->scan_rssi_rpt_mode); 4325 cmd->scan_rssi_thr = cpu_to_le32(arg->scan_rssi_thr); 4326 cmd->scan_pwr_format = cpu_to_le32(arg->scan_pwr_format); 4327 cmd->scan_rpt_mode = cpu_to_le32(arg->scan_rpt_mode); 4328 cmd->scan_bin_scale = cpu_to_le32(arg->scan_bin_scale); 4329 cmd->scan_dbm_adj = cpu_to_le32(arg->scan_dbm_adj); 4330 cmd->scan_chn_mask = cpu_to_le32(arg->scan_chn_mask); 4331 4332 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 4333 "WMI spectral scan config cmd vdev_id 0x%x\n", 4334 arg->vdev_id); 4335 4336 ret = ath12k_wmi_cmd_send(ar->wmi, skb, 4337 WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID); 4338 if (ret) { 4339 ath12k_warn(ar->ab, 4340 "failed to send spectral scan config wmi cmd\n"); 4341 goto err; 4342 } 4343 4344 return 0; 4345 err: 4346 dev_kfree_skb(skb); 4347 return ret; 4348 } 4349 4350 int ath12k_wmi_vdev_spectral_enable(struct ath12k *ar, u32 vdev_id, 4351 u32 trigger, u32 enable) 4352 { 4353 struct ath12k_wmi_vdev_spectral_enable_cmd *cmd; 4354 struct sk_buff *skb; 4355 int ret; 4356 4357 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); 4358 if (!skb) 4359 return -ENOMEM; 4360 4361 cmd = (struct ath12k_wmi_vdev_spectral_enable_cmd *)skb->data; 4362 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD, 4363 sizeof(*cmd)); 4364 4365 cmd->vdev_id = cpu_to_le32(vdev_id); 4366 cmd->trigger_cmd = cpu_to_le32(trigger); 4367 cmd->enable_cmd = cpu_to_le32(enable); 4368 4369 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 4370 "WMI spectral enable cmd vdev id 0x%x\n", 4371 vdev_id); 4372 4373 ret = ath12k_wmi_cmd_send(ar->wmi, skb, 4374 WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID); 4375 if (ret) { 4376 ath12k_warn(ar->ab, 4377 "failed to send spectral enable wmi cmd\n"); 4378 goto err; 4379 } 4380 4381 return 0; 4382 err: 4383 dev_kfree_skb(skb); 4384 return ret; 4385 } 4386 4387 int ath12k_wmi_pdev_dma_ring_cfg(struct ath12k *ar, 4388 struct ath12k_wmi_pdev_dma_ring_cfg_arg *arg) 4389 { 4390 struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *cmd; 4391 struct sk_buff *skb; 4392 int ret; 4393 4394 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); 4395 if (!skb) 4396 return -ENOMEM; 4397 4398 cmd = (struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *)skb->data; 4399 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DMA_RING_CFG_REQ, 4400 sizeof(*cmd)); 4401 4402 cmd->pdev_id = cpu_to_le32(arg->pdev_id); 4403 cmd->module_id = cpu_to_le32(arg->module_id); 4404 cmd->base_paddr_lo = cpu_to_le32(arg->base_paddr_lo); 4405 cmd->base_paddr_hi = cpu_to_le32(arg->base_paddr_hi); 4406 cmd->head_idx_paddr_lo = cpu_to_le32(arg->head_idx_paddr_lo); 4407 cmd->head_idx_paddr_hi = cpu_to_le32(arg->head_idx_paddr_hi); 4408 cmd->tail_idx_paddr_lo = cpu_to_le32(arg->tail_idx_paddr_lo); 4409 cmd->tail_idx_paddr_hi = cpu_to_le32(arg->tail_idx_paddr_hi); 4410 cmd->num_elems = cpu_to_le32(arg->num_elems); 4411 cmd->buf_size = cpu_to_le32(arg->buf_size); 4412 cmd->num_resp_per_event = cpu_to_le32(arg->num_resp_per_event); 4413 cmd->event_timeout_ms = cpu_to_le32(arg->event_timeout_ms); 4414 4415 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 4416 "WMI DMA ring cfg req cmd pdev_id 0x%x\n", 4417 arg->pdev_id); 4418 4419 ret = ath12k_wmi_cmd_send(ar->wmi, skb, 4420 WMI_PDEV_DMA_RING_CFG_REQ_CMDID); 4421 if (ret) { 4422 ath12k_warn(ar->ab, 4423 "failed to send dma ring cfg req wmi cmd\n"); 4424 goto err; 4425 } 4426 4427 return 0; 4428 err: 4429 dev_kfree_skb(skb); 4430 return ret; 4431 } 4432 4433 static int ath12k_wmi_dma_buf_entry_parse(struct ath12k_base *soc, 4434 u16 tag, u16 len, 4435 const void *ptr, void *data) 4436 { 4437 struct ath12k_wmi_dma_buf_release_arg *arg = data; 4438 4439 if (tag != WMI_TAG_DMA_BUF_RELEASE_ENTRY) 4440 return -EPROTO; 4441 4442 if (arg->num_buf_entry >= le32_to_cpu(arg->fixed.num_buf_release_entry)) 4443 return -ENOBUFS; 4444 4445 arg->num_buf_entry++; 4446 return 0; 4447 } 4448 4449 static int ath12k_wmi_dma_buf_meta_parse(struct ath12k_base *soc, 4450 u16 tag, u16 len, 4451 const void *ptr, void *data) 4452 { 4453 struct ath12k_wmi_dma_buf_release_arg *arg = data; 4454 4455 if (tag != WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA) 4456 return -EPROTO; 4457 4458 if (arg->num_meta >= le32_to_cpu(arg->fixed.num_meta_data_entry)) 4459 return -ENOBUFS; 4460 4461 arg->num_meta++; 4462 4463 return 0; 4464 } 4465 4466 static int ath12k_wmi_dma_buf_parse(struct ath12k_base *ab, 4467 u16 tag, u16 len, 4468 const void *ptr, void *data) 4469 { 4470 struct ath12k_wmi_dma_buf_release_arg *arg = data; 4471 const struct ath12k_wmi_dma_buf_release_fixed_params *fixed; 4472 u32 pdev_id; 4473 int ret; 4474 4475 switch (tag) { 4476 case WMI_TAG_DMA_BUF_RELEASE: 4477 fixed = ptr; 4478 arg->fixed = *fixed; 4479 pdev_id = DP_HW2SW_MACID(le32_to_cpu(fixed->pdev_id)); 4480 arg->fixed.pdev_id = cpu_to_le32(pdev_id); 4481 break; 4482 case WMI_TAG_ARRAY_STRUCT: 4483 if (!arg->buf_entry_done) { 4484 arg->num_buf_entry = 0; 4485 arg->buf_entry = ptr; 4486 4487 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 4488 ath12k_wmi_dma_buf_entry_parse, 4489 arg); 4490 if (ret) { 4491 ath12k_warn(ab, "failed to parse dma buf entry tlv %d\n", 4492 ret); 4493 return ret; 4494 } 4495 4496 arg->buf_entry_done = true; 4497 } else if (!arg->meta_data_done) { 4498 arg->num_meta = 0; 4499 arg->meta_data = ptr; 4500 4501 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 4502 ath12k_wmi_dma_buf_meta_parse, 4503 arg); 4504 if (ret) { 4505 ath12k_warn(ab, "failed to parse dma buf meta tlv %d\n", 4506 ret); 4507 return ret; 4508 } 4509 4510 arg->meta_data_done = true; 4511 } 4512 break; 4513 default: 4514 break; 4515 } 4516 return 0; 4517 } 4518 4519 static void ath12k_wmi_pdev_dma_ring_buf_release_event(struct ath12k_base *ab, 4520 struct sk_buff *skb) 4521 { 4522 struct ath12k_wmi_dma_buf_release_arg arg = {}; 4523 struct ath12k_dbring_buf_release_event param; 4524 int ret; 4525 4526 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 4527 ath12k_wmi_dma_buf_parse, 4528 &arg); 4529 if (ret) { 4530 ath12k_warn(ab, "failed to parse dma buf release tlv %d\n", ret); 4531 return; 4532 } 4533 4534 param.fixed = arg.fixed; 4535 param.buf_entry = arg.buf_entry; 4536 param.num_buf_entry = arg.num_buf_entry; 4537 param.meta_data = arg.meta_data; 4538 param.num_meta = arg.num_meta; 4539 4540 ret = ath12k_dbring_buffer_release_event(ab, ¶m); 4541 if (ret) { 4542 ath12k_warn(ab, "failed to handle dma buf release event %d\n", ret); 4543 return; 4544 } 4545 } 4546 4547 static int ath12k_wmi_hw_mode_caps_parse(struct ath12k_base *soc, 4548 u16 tag, u16 len, 4549 const void *ptr, void *data) 4550 { 4551 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; 4552 struct ath12k_wmi_hw_mode_cap_params *hw_mode_cap; 4553 u32 phy_map = 0; 4554 4555 if (tag != WMI_TAG_HW_MODE_CAPABILITIES) 4556 return -EPROTO; 4557 4558 if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->arg.num_hw_modes) 4559 return -ENOBUFS; 4560 4561 hw_mode_cap = container_of(ptr, struct ath12k_wmi_hw_mode_cap_params, 4562 hw_mode_id); 4563 svc_rdy_ext->n_hw_mode_caps++; 4564 4565 phy_map = le32_to_cpu(hw_mode_cap->phy_id_map); 4566 svc_rdy_ext->tot_phy_id += fls(phy_map); 4567 4568 return 0; 4569 } 4570 4571 static int ath12k_wmi_hw_mode_caps(struct ath12k_base *soc, 4572 u16 len, const void *ptr, void *data) 4573 { 4574 struct ath12k_svc_ext_info *svc_ext_info = &soc->wmi_ab.svc_ext_info; 4575 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; 4576 const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps; 4577 enum wmi_host_hw_mode_config_type mode, pref; 4578 u32 i; 4579 int ret; 4580 4581 svc_rdy_ext->n_hw_mode_caps = 0; 4582 svc_rdy_ext->hw_mode_caps = ptr; 4583 4584 ret = ath12k_wmi_tlv_iter(soc, ptr, len, 4585 ath12k_wmi_hw_mode_caps_parse, 4586 svc_rdy_ext); 4587 if (ret) { 4588 ath12k_warn(soc, "failed to parse tlv %d\n", ret); 4589 return ret; 4590 } 4591 4592 for (i = 0 ; i < svc_rdy_ext->n_hw_mode_caps; i++) { 4593 hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i]; 4594 mode = le32_to_cpu(hw_mode_caps->hw_mode_id); 4595 4596 if (mode >= WMI_HOST_HW_MODE_MAX) 4597 continue; 4598 4599 pref = soc->wmi_ab.preferred_hw_mode; 4600 4601 if (ath12k_hw_mode_pri_map[mode] <= ath12k_hw_mode_pri_map[pref]) { 4602 svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps; 4603 soc->wmi_ab.preferred_hw_mode = mode; 4604 } 4605 } 4606 4607 svc_ext_info->num_hw_modes = svc_rdy_ext->n_hw_mode_caps; 4608 4609 ath12k_dbg(soc, ATH12K_DBG_WMI, "num hw modes %u preferred_hw_mode %d\n", 4610 svc_ext_info->num_hw_modes, soc->wmi_ab.preferred_hw_mode); 4611 4612 if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX) 4613 return -EINVAL; 4614 4615 return 0; 4616 } 4617 4618 static int ath12k_wmi_mac_phy_caps_parse(struct ath12k_base *soc, 4619 u16 tag, u16 len, 4620 const void *ptr, void *data) 4621 { 4622 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; 4623 4624 if (tag != WMI_TAG_MAC_PHY_CAPABILITIES) 4625 return -EPROTO; 4626 4627 if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id) 4628 return -ENOBUFS; 4629 4630 len = min_t(u16, len, sizeof(struct ath12k_wmi_mac_phy_caps_params)); 4631 if (!svc_rdy_ext->n_mac_phy_caps) { 4632 svc_rdy_ext->mac_phy_caps = kzalloc((svc_rdy_ext->tot_phy_id) * len, 4633 GFP_ATOMIC); 4634 if (!svc_rdy_ext->mac_phy_caps) 4635 return -ENOMEM; 4636 } 4637 4638 memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps, ptr, len); 4639 svc_rdy_ext->n_mac_phy_caps++; 4640 return 0; 4641 } 4642 4643 static int ath12k_wmi_ext_hal_reg_caps_parse(struct ath12k_base *soc, 4644 u16 tag, u16 len, 4645 const void *ptr, void *data) 4646 { 4647 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; 4648 4649 if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT) 4650 return -EPROTO; 4651 4652 if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->arg.num_phy) 4653 return -ENOBUFS; 4654 4655 svc_rdy_ext->n_ext_hal_reg_caps++; 4656 return 0; 4657 } 4658 4659 static int ath12k_wmi_ext_hal_reg_caps(struct ath12k_base *soc, 4660 u16 len, const void *ptr, void *data) 4661 { 4662 struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0]; 4663 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; 4664 struct ath12k_wmi_hal_reg_capabilities_ext_arg reg_cap; 4665 int ret; 4666 u32 i; 4667 4668 svc_rdy_ext->n_ext_hal_reg_caps = 0; 4669 svc_rdy_ext->ext_hal_reg_caps = ptr; 4670 ret = ath12k_wmi_tlv_iter(soc, ptr, len, 4671 ath12k_wmi_ext_hal_reg_caps_parse, 4672 svc_rdy_ext); 4673 if (ret) { 4674 ath12k_warn(soc, "failed to parse tlv %d\n", ret); 4675 return ret; 4676 } 4677 4678 for (i = 0; i < svc_rdy_ext->arg.num_phy; i++) { 4679 ret = ath12k_pull_reg_cap_svc_rdy_ext(wmi_handle, 4680 svc_rdy_ext->soc_hal_reg_caps, 4681 svc_rdy_ext->ext_hal_reg_caps, i, 4682 ®_cap); 4683 if (ret) { 4684 ath12k_warn(soc, "failed to extract reg cap %d\n", i); 4685 return ret; 4686 } 4687 4688 if (reg_cap.phy_id >= MAX_RADIOS) { 4689 ath12k_warn(soc, "unexpected phy id %u\n", reg_cap.phy_id); 4690 return -EINVAL; 4691 } 4692 4693 soc->hal_reg_cap[reg_cap.phy_id] = reg_cap; 4694 } 4695 return 0; 4696 } 4697 4698 static int ath12k_wmi_ext_soc_hal_reg_caps_parse(struct ath12k_base *soc, 4699 u16 len, const void *ptr, 4700 void *data) 4701 { 4702 struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0]; 4703 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; 4704 u8 hw_mode_id = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.hw_mode_id); 4705 u32 phy_id_map; 4706 int pdev_index = 0; 4707 int ret; 4708 4709 svc_rdy_ext->soc_hal_reg_caps = ptr; 4710 svc_rdy_ext->arg.num_phy = le32_to_cpu(svc_rdy_ext->soc_hal_reg_caps->num_phy); 4711 4712 soc->num_radios = 0; 4713 phy_id_map = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.phy_id_map); 4714 soc->fw_pdev_count = 0; 4715 4716 while (phy_id_map && soc->num_radios < MAX_RADIOS) { 4717 ret = ath12k_pull_mac_phy_cap_svc_ready_ext(wmi_handle, 4718 svc_rdy_ext, 4719 hw_mode_id, soc->num_radios, 4720 &soc->pdevs[pdev_index]); 4721 if (ret) { 4722 ath12k_warn(soc, "failed to extract mac caps, idx :%d\n", 4723 soc->num_radios); 4724 return ret; 4725 } 4726 4727 soc->num_radios++; 4728 4729 /* For single_pdev_only targets, 4730 * save mac_phy capability in the same pdev 4731 */ 4732 if (soc->hw_params->single_pdev_only) 4733 pdev_index = 0; 4734 else 4735 pdev_index = soc->num_radios; 4736 4737 /* TODO: mac_phy_cap prints */ 4738 phy_id_map >>= 1; 4739 } 4740 4741 if (soc->hw_params->single_pdev_only) { 4742 soc->num_radios = 1; 4743 soc->pdevs[0].pdev_id = 0; 4744 } 4745 4746 return 0; 4747 } 4748 4749 static int ath12k_wmi_dma_ring_caps_parse(struct ath12k_base *soc, 4750 u16 tag, u16 len, 4751 const void *ptr, void *data) 4752 { 4753 struct ath12k_wmi_dma_ring_caps_parse *parse = data; 4754 4755 if (tag != WMI_TAG_DMA_RING_CAPABILITIES) 4756 return -EPROTO; 4757 4758 parse->n_dma_ring_caps++; 4759 return 0; 4760 } 4761 4762 static int ath12k_wmi_alloc_dbring_caps(struct ath12k_base *ab, 4763 u32 num_cap) 4764 { 4765 size_t sz; 4766 void *ptr; 4767 4768 sz = num_cap * sizeof(struct ath12k_dbring_cap); 4769 ptr = kzalloc(sz, GFP_ATOMIC); 4770 if (!ptr) 4771 return -ENOMEM; 4772 4773 ab->db_caps = ptr; 4774 ab->num_db_cap = num_cap; 4775 4776 return 0; 4777 } 4778 4779 static void ath12k_wmi_free_dbring_caps(struct ath12k_base *ab) 4780 { 4781 kfree(ab->db_caps); 4782 ab->db_caps = NULL; 4783 ab->num_db_cap = 0; 4784 } 4785 4786 static int ath12k_wmi_dma_ring_caps(struct ath12k_base *ab, 4787 u16 len, const void *ptr, void *data) 4788 { 4789 struct ath12k_wmi_dma_ring_caps_parse *dma_caps_parse = data; 4790 struct ath12k_wmi_dma_ring_caps_params *dma_caps; 4791 struct ath12k_dbring_cap *dir_buff_caps; 4792 int ret; 4793 u32 i; 4794 4795 dma_caps_parse->n_dma_ring_caps = 0; 4796 dma_caps = (struct ath12k_wmi_dma_ring_caps_params *)ptr; 4797 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 4798 ath12k_wmi_dma_ring_caps_parse, 4799 dma_caps_parse); 4800 if (ret) { 4801 ath12k_warn(ab, "failed to parse dma ring caps tlv %d\n", ret); 4802 return ret; 4803 } 4804 4805 if (!dma_caps_parse->n_dma_ring_caps) 4806 return 0; 4807 4808 if (ab->num_db_cap) { 4809 ath12k_warn(ab, "Already processed, so ignoring dma ring caps\n"); 4810 return 0; 4811 } 4812 4813 ret = ath12k_wmi_alloc_dbring_caps(ab, dma_caps_parse->n_dma_ring_caps); 4814 if (ret) 4815 return ret; 4816 4817 dir_buff_caps = ab->db_caps; 4818 for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) { 4819 if (le32_to_cpu(dma_caps[i].module_id) >= WMI_DIRECT_BUF_MAX) { 4820 ath12k_warn(ab, "Invalid module id %d\n", 4821 le32_to_cpu(dma_caps[i].module_id)); 4822 ret = -EINVAL; 4823 goto free_dir_buff; 4824 } 4825 4826 dir_buff_caps[i].id = le32_to_cpu(dma_caps[i].module_id); 4827 dir_buff_caps[i].pdev_id = 4828 DP_HW2SW_MACID(le32_to_cpu(dma_caps[i].pdev_id)); 4829 dir_buff_caps[i].min_elem = le32_to_cpu(dma_caps[i].min_elem); 4830 dir_buff_caps[i].min_buf_sz = le32_to_cpu(dma_caps[i].min_buf_sz); 4831 dir_buff_caps[i].min_buf_align = le32_to_cpu(dma_caps[i].min_buf_align); 4832 } 4833 4834 return 0; 4835 4836 free_dir_buff: 4837 ath12k_wmi_free_dbring_caps(ab); 4838 return ret; 4839 } 4840 4841 static void 4842 ath12k_wmi_save_mac_phy_info(struct ath12k_base *ab, 4843 const struct ath12k_wmi_mac_phy_caps_params *mac_phy_cap, 4844 struct ath12k_svc_ext_mac_phy_info *mac_phy_info) 4845 { 4846 mac_phy_info->phy_id = __le32_to_cpu(mac_phy_cap->phy_id); 4847 mac_phy_info->supported_bands = __le32_to_cpu(mac_phy_cap->supported_bands); 4848 mac_phy_info->hw_freq_range.low_2ghz_freq = 4849 __le32_to_cpu(mac_phy_cap->low_2ghz_chan_freq); 4850 mac_phy_info->hw_freq_range.high_2ghz_freq = 4851 __le32_to_cpu(mac_phy_cap->high_2ghz_chan_freq); 4852 mac_phy_info->hw_freq_range.low_5ghz_freq = 4853 __le32_to_cpu(mac_phy_cap->low_5ghz_chan_freq); 4854 mac_phy_info->hw_freq_range.high_5ghz_freq = 4855 __le32_to_cpu(mac_phy_cap->high_5ghz_chan_freq); 4856 } 4857 4858 static void 4859 ath12k_wmi_save_all_mac_phy_info(struct ath12k_base *ab, 4860 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext) 4861 { 4862 struct ath12k_svc_ext_info *svc_ext_info = &ab->wmi_ab.svc_ext_info; 4863 const struct ath12k_wmi_mac_phy_caps_params *mac_phy_cap; 4864 const struct ath12k_wmi_hw_mode_cap_params *hw_mode_cap; 4865 struct ath12k_svc_ext_mac_phy_info *mac_phy_info; 4866 u32 hw_mode_id, phy_bit_map; 4867 u8 hw_idx; 4868 4869 mac_phy_info = &svc_ext_info->mac_phy_info[0]; 4870 mac_phy_cap = svc_rdy_ext->mac_phy_caps; 4871 4872 for (hw_idx = 0; hw_idx < svc_ext_info->num_hw_modes; hw_idx++) { 4873 hw_mode_cap = &svc_rdy_ext->hw_mode_caps[hw_idx]; 4874 hw_mode_id = __le32_to_cpu(hw_mode_cap->hw_mode_id); 4875 phy_bit_map = __le32_to_cpu(hw_mode_cap->phy_id_map); 4876 4877 while (phy_bit_map) { 4878 ath12k_wmi_save_mac_phy_info(ab, mac_phy_cap, mac_phy_info); 4879 mac_phy_info->hw_mode_config_type = 4880 le32_get_bits(hw_mode_cap->hw_mode_config_type, 4881 WMI_HW_MODE_CAP_CFG_TYPE); 4882 ath12k_dbg(ab, ATH12K_DBG_WMI, 4883 "hw_idx %u hw_mode_id %u hw_mode_config_type %u supported_bands %u phy_id %u 2 GHz [%u - %u] 5 GHz [%u - %u]\n", 4884 hw_idx, hw_mode_id, 4885 mac_phy_info->hw_mode_config_type, 4886 mac_phy_info->supported_bands, mac_phy_info->phy_id, 4887 mac_phy_info->hw_freq_range.low_2ghz_freq, 4888 mac_phy_info->hw_freq_range.high_2ghz_freq, 4889 mac_phy_info->hw_freq_range.low_5ghz_freq, 4890 mac_phy_info->hw_freq_range.high_5ghz_freq); 4891 4892 mac_phy_cap++; 4893 mac_phy_info++; 4894 4895 phy_bit_map >>= 1; 4896 } 4897 } 4898 } 4899 4900 static int ath12k_wmi_svc_rdy_ext_parse(struct ath12k_base *ab, 4901 u16 tag, u16 len, 4902 const void *ptr, void *data) 4903 { 4904 struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0]; 4905 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; 4906 int ret; 4907 4908 switch (tag) { 4909 case WMI_TAG_SERVICE_READY_EXT_EVENT: 4910 ret = ath12k_pull_svc_ready_ext(wmi_handle, ptr, 4911 &svc_rdy_ext->arg); 4912 if (ret) { 4913 ath12k_warn(ab, "unable to extract ext params\n"); 4914 return ret; 4915 } 4916 break; 4917 4918 case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS: 4919 svc_rdy_ext->hw_caps = ptr; 4920 svc_rdy_ext->arg.num_hw_modes = 4921 le32_to_cpu(svc_rdy_ext->hw_caps->num_hw_modes); 4922 break; 4923 4924 case WMI_TAG_SOC_HAL_REG_CAPABILITIES: 4925 ret = ath12k_wmi_ext_soc_hal_reg_caps_parse(ab, len, ptr, 4926 svc_rdy_ext); 4927 if (ret) 4928 return ret; 4929 break; 4930 4931 case WMI_TAG_ARRAY_STRUCT: 4932 if (!svc_rdy_ext->hw_mode_done) { 4933 ret = ath12k_wmi_hw_mode_caps(ab, len, ptr, svc_rdy_ext); 4934 if (ret) 4935 return ret; 4936 4937 svc_rdy_ext->hw_mode_done = true; 4938 } else if (!svc_rdy_ext->mac_phy_done) { 4939 svc_rdy_ext->n_mac_phy_caps = 0; 4940 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 4941 ath12k_wmi_mac_phy_caps_parse, 4942 svc_rdy_ext); 4943 if (ret) { 4944 ath12k_warn(ab, "failed to parse tlv %d\n", ret); 4945 return ret; 4946 } 4947 4948 ath12k_wmi_save_all_mac_phy_info(ab, svc_rdy_ext); 4949 4950 svc_rdy_ext->mac_phy_done = true; 4951 } else if (!svc_rdy_ext->ext_hal_reg_done) { 4952 ret = ath12k_wmi_ext_hal_reg_caps(ab, len, ptr, svc_rdy_ext); 4953 if (ret) 4954 return ret; 4955 4956 svc_rdy_ext->ext_hal_reg_done = true; 4957 } else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) { 4958 svc_rdy_ext->mac_phy_chainmask_combo_done = true; 4959 } else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) { 4960 svc_rdy_ext->mac_phy_chainmask_cap_done = true; 4961 } else if (!svc_rdy_ext->oem_dma_ring_cap_done) { 4962 svc_rdy_ext->oem_dma_ring_cap_done = true; 4963 } else if (!svc_rdy_ext->dma_ring_cap_done) { 4964 ret = ath12k_wmi_dma_ring_caps(ab, len, ptr, 4965 &svc_rdy_ext->dma_caps_parse); 4966 if (ret) 4967 return ret; 4968 4969 svc_rdy_ext->dma_ring_cap_done = true; 4970 } 4971 break; 4972 4973 default: 4974 break; 4975 } 4976 return 0; 4977 } 4978 4979 static int ath12k_service_ready_ext_event(struct ath12k_base *ab, 4980 struct sk_buff *skb) 4981 { 4982 struct ath12k_wmi_svc_rdy_ext_parse svc_rdy_ext = { }; 4983 int ret; 4984 4985 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 4986 ath12k_wmi_svc_rdy_ext_parse, 4987 &svc_rdy_ext); 4988 if (ret) { 4989 ath12k_warn(ab, "failed to parse tlv %d\n", ret); 4990 goto err; 4991 } 4992 4993 if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG, ab->wmi_ab.svc_map)) 4994 complete(&ab->wmi_ab.service_ready); 4995 4996 kfree(svc_rdy_ext.mac_phy_caps); 4997 return 0; 4998 4999 err: 5000 kfree(svc_rdy_ext.mac_phy_caps); 5001 ath12k_wmi_free_dbring_caps(ab); 5002 return ret; 5003 } 5004 5005 static int ath12k_pull_svc_ready_ext2(struct ath12k_wmi_pdev *wmi_handle, 5006 const void *ptr, 5007 struct ath12k_wmi_svc_rdy_ext2_arg *arg) 5008 { 5009 const struct wmi_service_ready_ext2_event *ev = ptr; 5010 5011 if (!ev) 5012 return -EINVAL; 5013 5014 arg->reg_db_version = le32_to_cpu(ev->reg_db_version); 5015 arg->hw_min_max_tx_power_2ghz = le32_to_cpu(ev->hw_min_max_tx_power_2ghz); 5016 arg->hw_min_max_tx_power_5ghz = le32_to_cpu(ev->hw_min_max_tx_power_5ghz); 5017 arg->chwidth_num_peer_caps = le32_to_cpu(ev->chwidth_num_peer_caps); 5018 arg->preamble_puncture_bw = le32_to_cpu(ev->preamble_puncture_bw); 5019 arg->max_user_per_ppdu_ofdma = le32_to_cpu(ev->max_user_per_ppdu_ofdma); 5020 arg->max_user_per_ppdu_mumimo = le32_to_cpu(ev->max_user_per_ppdu_mumimo); 5021 arg->target_cap_flags = le32_to_cpu(ev->target_cap_flags); 5022 return 0; 5023 } 5024 5025 static void ath12k_wmi_eht_caps_parse(struct ath12k_pdev *pdev, u32 band, 5026 const __le32 cap_mac_info[], 5027 const __le32 cap_phy_info[], 5028 const __le32 supp_mcs[], 5029 const struct ath12k_wmi_ppe_threshold_params *ppet, 5030 __le32 cap_info_internal) 5031 { 5032 struct ath12k_band_cap *cap_band = &pdev->cap.band[band]; 5033 u32 support_320mhz; 5034 u8 i; 5035 5036 if (band == NL80211_BAND_6GHZ) 5037 support_320mhz = cap_band->eht_cap_phy_info[0] & 5038 IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ; 5039 5040 for (i = 0; i < WMI_MAX_EHTCAP_MAC_SIZE; i++) 5041 cap_band->eht_cap_mac_info[i] = le32_to_cpu(cap_mac_info[i]); 5042 5043 for (i = 0; i < WMI_MAX_EHTCAP_PHY_SIZE; i++) 5044 cap_band->eht_cap_phy_info[i] = le32_to_cpu(cap_phy_info[i]); 5045 5046 if (band == NL80211_BAND_6GHZ) 5047 cap_band->eht_cap_phy_info[0] |= support_320mhz; 5048 5049 cap_band->eht_mcs_20_only = le32_to_cpu(supp_mcs[0]); 5050 cap_band->eht_mcs_80 = le32_to_cpu(supp_mcs[1]); 5051 if (band != NL80211_BAND_2GHZ) { 5052 cap_band->eht_mcs_160 = le32_to_cpu(supp_mcs[2]); 5053 cap_band->eht_mcs_320 = le32_to_cpu(supp_mcs[3]); 5054 } 5055 5056 cap_band->eht_ppet.numss_m1 = le32_to_cpu(ppet->numss_m1); 5057 cap_band->eht_ppet.ru_bit_mask = le32_to_cpu(ppet->ru_info); 5058 for (i = 0; i < WMI_MAX_NUM_SS; i++) 5059 cap_band->eht_ppet.ppet16_ppet8_ru3_ru0[i] = 5060 le32_to_cpu(ppet->ppet16_ppet8_ru3_ru0[i]); 5061 5062 cap_band->eht_cap_info_internal = le32_to_cpu(cap_info_internal); 5063 } 5064 5065 static int 5066 ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab, 5067 const struct ath12k_wmi_caps_ext_params *caps, 5068 struct ath12k_pdev *pdev) 5069 { 5070 u32 bands; 5071 int i; 5072 5073 if (ab->hw_params->single_pdev_only) { 5074 for (i = 0; i < ab->fw_pdev_count; i++) { 5075 struct ath12k_fw_pdev *fw_pdev = &ab->fw_pdev[i]; 5076 5077 if (fw_pdev->pdev_id == ath12k_wmi_caps_ext_get_pdev_id(caps) && 5078 fw_pdev->phy_id == le32_to_cpu(caps->phy_id)) { 5079 bands = fw_pdev->supported_bands; 5080 break; 5081 } 5082 } 5083 5084 if (i == ab->fw_pdev_count) 5085 return -EINVAL; 5086 } else { 5087 bands = pdev->cap.supported_bands; 5088 } 5089 5090 if (bands & WMI_HOST_WLAN_2GHZ_CAP) { 5091 ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_2GHZ, 5092 caps->eht_cap_mac_info_2ghz, 5093 caps->eht_cap_phy_info_2ghz, 5094 caps->eht_supp_mcs_ext_2ghz, 5095 &caps->eht_ppet_2ghz, 5096 caps->eht_cap_info_internal); 5097 } 5098 5099 if (bands & WMI_HOST_WLAN_5GHZ_CAP) { 5100 ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_5GHZ, 5101 caps->eht_cap_mac_info_5ghz, 5102 caps->eht_cap_phy_info_5ghz, 5103 caps->eht_supp_mcs_ext_5ghz, 5104 &caps->eht_ppet_5ghz, 5105 caps->eht_cap_info_internal); 5106 5107 ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_6GHZ, 5108 caps->eht_cap_mac_info_5ghz, 5109 caps->eht_cap_phy_info_5ghz, 5110 caps->eht_supp_mcs_ext_5ghz, 5111 &caps->eht_ppet_5ghz, 5112 caps->eht_cap_info_internal); 5113 } 5114 5115 pdev->cap.eml_cap = le32_to_cpu(caps->eml_capability); 5116 pdev->cap.mld_cap = le32_to_cpu(caps->mld_capability); 5117 5118 return 0; 5119 } 5120 5121 static int ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base *ab, u16 tag, 5122 u16 len, const void *ptr, 5123 void *data) 5124 { 5125 const struct ath12k_wmi_caps_ext_params *caps = ptr; 5126 struct ath12k_band_cap *cap_band; 5127 u32 support_320mhz; 5128 int i = 0, ret; 5129 5130 if (tag != WMI_TAG_MAC_PHY_CAPABILITIES_EXT) 5131 return -EPROTO; 5132 5133 if (ab->hw_params->single_pdev_only) { 5134 if (caps->hw_mode_id == WMI_HOST_HW_MODE_SINGLE) { 5135 support_320mhz = le32_to_cpu(caps->eht_cap_phy_info_5ghz[0]) & 5136 IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ; 5137 cap_band = &ab->pdevs[0].cap.band[NL80211_BAND_6GHZ]; 5138 cap_band->eht_cap_phy_info[0] |= support_320mhz; 5139 } 5140 5141 if (ab->wmi_ab.preferred_hw_mode != le32_to_cpu(caps->hw_mode_id)) 5142 return 0; 5143 } else { 5144 for (i = 0; i < ab->num_radios; i++) { 5145 if (ab->pdevs[i].pdev_id == 5146 ath12k_wmi_caps_ext_get_pdev_id(caps)) 5147 break; 5148 } 5149 5150 if (i == ab->num_radios) 5151 return -EINVAL; 5152 } 5153 5154 ret = ath12k_wmi_tlv_mac_phy_caps_ext_parse(ab, caps, &ab->pdevs[i]); 5155 if (ret) { 5156 ath12k_warn(ab, 5157 "failed to parse extended MAC PHY capabilities for pdev %d: %d\n", 5158 ret, ab->pdevs[i].pdev_id); 5159 return ret; 5160 } 5161 5162 return 0; 5163 } 5164 5165 static void 5166 ath12k_wmi_update_freq_info(struct ath12k_base *ab, 5167 struct ath12k_svc_ext_mac_phy_info *mac_cap, 5168 enum ath12k_hw_mode mode, 5169 u32 phy_id) 5170 { 5171 struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info; 5172 struct ath12k_hw_mode_freq_range_arg *mac_range; 5173 5174 mac_range = &hw_mode_info->freq_range_caps[mode][phy_id]; 5175 5176 if (mac_cap->supported_bands & WMI_HOST_WLAN_2GHZ_CAP) { 5177 mac_range->low_2ghz_freq = max_t(u32, 5178 mac_cap->hw_freq_range.low_2ghz_freq, 5179 ATH12K_MIN_2GHZ_FREQ); 5180 mac_range->high_2ghz_freq = mac_cap->hw_freq_range.high_2ghz_freq ? 5181 min_t(u32, 5182 mac_cap->hw_freq_range.high_2ghz_freq, 5183 ATH12K_MAX_2GHZ_FREQ) : 5184 ATH12K_MAX_2GHZ_FREQ; 5185 } 5186 5187 if (mac_cap->supported_bands & WMI_HOST_WLAN_5GHZ_CAP) { 5188 mac_range->low_5ghz_freq = max_t(u32, 5189 mac_cap->hw_freq_range.low_5ghz_freq, 5190 ATH12K_MIN_5GHZ_FREQ); 5191 mac_range->high_5ghz_freq = mac_cap->hw_freq_range.high_5ghz_freq ? 5192 min_t(u32, 5193 mac_cap->hw_freq_range.high_5ghz_freq, 5194 ATH12K_MAX_6GHZ_FREQ) : 5195 ATH12K_MAX_6GHZ_FREQ; 5196 } 5197 } 5198 5199 static bool 5200 ath12k_wmi_all_phy_range_updated(struct ath12k_base *ab, 5201 enum ath12k_hw_mode hwmode) 5202 { 5203 struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info; 5204 struct ath12k_hw_mode_freq_range_arg *mac_range; 5205 u8 phy_id; 5206 5207 for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) { 5208 mac_range = &hw_mode_info->freq_range_caps[hwmode][phy_id]; 5209 /* modify SBS/DBS range only when both phy for DBS are filled */ 5210 if (!mac_range->low_2ghz_freq && !mac_range->low_5ghz_freq) 5211 return false; 5212 } 5213 5214 return true; 5215 } 5216 5217 static void ath12k_wmi_update_dbs_freq_info(struct ath12k_base *ab) 5218 { 5219 struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info; 5220 struct ath12k_hw_mode_freq_range_arg *mac_range; 5221 u8 phy_id; 5222 5223 mac_range = hw_mode_info->freq_range_caps[ATH12K_HW_MODE_DBS]; 5224 /* Reset 5 GHz range for shared mac for DBS */ 5225 for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) { 5226 if (mac_range[phy_id].low_2ghz_freq && 5227 mac_range[phy_id].low_5ghz_freq) { 5228 mac_range[phy_id].low_5ghz_freq = 0; 5229 mac_range[phy_id].high_5ghz_freq = 0; 5230 } 5231 } 5232 } 5233 5234 static u32 5235 ath12k_wmi_get_highest_5ghz_freq_from_range(struct ath12k_hw_mode_freq_range_arg *range) 5236 { 5237 u32 highest_freq = 0; 5238 u8 phy_id; 5239 5240 for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) { 5241 if (range[phy_id].high_5ghz_freq > highest_freq) 5242 highest_freq = range[phy_id].high_5ghz_freq; 5243 } 5244 5245 return highest_freq ? highest_freq : ATH12K_MAX_6GHZ_FREQ; 5246 } 5247 5248 static u32 5249 ath12k_wmi_get_lowest_5ghz_freq_from_range(struct ath12k_hw_mode_freq_range_arg *range) 5250 { 5251 u32 lowest_freq = 0; 5252 u8 phy_id; 5253 5254 for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) { 5255 if ((!lowest_freq && range[phy_id].low_5ghz_freq) || 5256 range[phy_id].low_5ghz_freq < lowest_freq) 5257 lowest_freq = range[phy_id].low_5ghz_freq; 5258 } 5259 5260 return lowest_freq ? lowest_freq : ATH12K_MIN_5GHZ_FREQ; 5261 } 5262 5263 static void 5264 ath12k_wmi_fill_upper_share_sbs_freq(struct ath12k_base *ab, 5265 u16 sbs_range_sep, 5266 struct ath12k_hw_mode_freq_range_arg *ref_freq) 5267 { 5268 struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info; 5269 struct ath12k_hw_mode_freq_range_arg *upper_sbs_freq_range; 5270 u8 phy_id; 5271 5272 upper_sbs_freq_range = 5273 hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS_UPPER_SHARE]; 5274 5275 for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) { 5276 upper_sbs_freq_range[phy_id].low_2ghz_freq = 5277 ref_freq[phy_id].low_2ghz_freq; 5278 upper_sbs_freq_range[phy_id].high_2ghz_freq = 5279 ref_freq[phy_id].high_2ghz_freq; 5280 5281 /* update for shared mac */ 5282 if (upper_sbs_freq_range[phy_id].low_2ghz_freq) { 5283 upper_sbs_freq_range[phy_id].low_5ghz_freq = sbs_range_sep + 10; 5284 upper_sbs_freq_range[phy_id].high_5ghz_freq = 5285 ath12k_wmi_get_highest_5ghz_freq_from_range(ref_freq); 5286 } else { 5287 upper_sbs_freq_range[phy_id].low_5ghz_freq = 5288 ath12k_wmi_get_lowest_5ghz_freq_from_range(ref_freq); 5289 upper_sbs_freq_range[phy_id].high_5ghz_freq = sbs_range_sep; 5290 } 5291 } 5292 } 5293 5294 static void 5295 ath12k_wmi_fill_lower_share_sbs_freq(struct ath12k_base *ab, 5296 u16 sbs_range_sep, 5297 struct ath12k_hw_mode_freq_range_arg *ref_freq) 5298 { 5299 struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info; 5300 struct ath12k_hw_mode_freq_range_arg *lower_sbs_freq_range; 5301 u8 phy_id; 5302 5303 lower_sbs_freq_range = 5304 hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS_LOWER_SHARE]; 5305 5306 for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) { 5307 lower_sbs_freq_range[phy_id].low_2ghz_freq = 5308 ref_freq[phy_id].low_2ghz_freq; 5309 lower_sbs_freq_range[phy_id].high_2ghz_freq = 5310 ref_freq[phy_id].high_2ghz_freq; 5311 5312 /* update for shared mac */ 5313 if (lower_sbs_freq_range[phy_id].low_2ghz_freq) { 5314 lower_sbs_freq_range[phy_id].low_5ghz_freq = 5315 ath12k_wmi_get_lowest_5ghz_freq_from_range(ref_freq); 5316 lower_sbs_freq_range[phy_id].high_5ghz_freq = sbs_range_sep; 5317 } else { 5318 lower_sbs_freq_range[phy_id].low_5ghz_freq = sbs_range_sep + 10; 5319 lower_sbs_freq_range[phy_id].high_5ghz_freq = 5320 ath12k_wmi_get_highest_5ghz_freq_from_range(ref_freq); 5321 } 5322 } 5323 } 5324 5325 static const char *ath12k_wmi_hw_mode_to_str(enum ath12k_hw_mode hw_mode) 5326 { 5327 static const char * const mode_str[] = { 5328 [ATH12K_HW_MODE_SMM] = "SMM", 5329 [ATH12K_HW_MODE_DBS] = "DBS", 5330 [ATH12K_HW_MODE_SBS] = "SBS", 5331 [ATH12K_HW_MODE_SBS_UPPER_SHARE] = "SBS_UPPER_SHARE", 5332 [ATH12K_HW_MODE_SBS_LOWER_SHARE] = "SBS_LOWER_SHARE", 5333 }; 5334 5335 if (hw_mode >= ARRAY_SIZE(mode_str)) 5336 return "Unknown"; 5337 5338 return mode_str[hw_mode]; 5339 } 5340 5341 static void 5342 ath12k_wmi_dump_freq_range_per_mac(struct ath12k_base *ab, 5343 struct ath12k_hw_mode_freq_range_arg *freq_range, 5344 enum ath12k_hw_mode hw_mode) 5345 { 5346 u8 i; 5347 5348 for (i = 0; i < MAX_RADIOS; i++) 5349 if (freq_range[i].low_2ghz_freq || freq_range[i].low_5ghz_freq) 5350 ath12k_dbg(ab, ATH12K_DBG_WMI, 5351 "frequency range: %s(%d) mac %d 2 GHz [%d - %d] 5 GHz [%d - %d]", 5352 ath12k_wmi_hw_mode_to_str(hw_mode), 5353 hw_mode, i, 5354 freq_range[i].low_2ghz_freq, 5355 freq_range[i].high_2ghz_freq, 5356 freq_range[i].low_5ghz_freq, 5357 freq_range[i].high_5ghz_freq); 5358 } 5359 5360 static void ath12k_wmi_dump_freq_range(struct ath12k_base *ab) 5361 { 5362 struct ath12k_hw_mode_freq_range_arg *freq_range; 5363 u8 i; 5364 5365 for (i = ATH12K_HW_MODE_SMM; i < ATH12K_HW_MODE_MAX; i++) { 5366 freq_range = ab->wmi_ab.hw_mode_info.freq_range_caps[i]; 5367 ath12k_wmi_dump_freq_range_per_mac(ab, freq_range, i); 5368 } 5369 } 5370 5371 static int ath12k_wmi_modify_sbs_freq(struct ath12k_base *ab, u8 phy_id) 5372 { 5373 struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info; 5374 struct ath12k_hw_mode_freq_range_arg *sbs_mac_range, *shared_mac_range; 5375 struct ath12k_hw_mode_freq_range_arg *non_shared_range; 5376 u8 shared_phy_id; 5377 5378 sbs_mac_range = &hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS][phy_id]; 5379 5380 /* if SBS mac range has both 2.4 and 5 GHz ranges, i.e. shared phy_id 5381 * keep the range as it is in SBS 5382 */ 5383 if (sbs_mac_range->low_2ghz_freq && sbs_mac_range->low_5ghz_freq) 5384 return 0; 5385 5386 if (sbs_mac_range->low_2ghz_freq && !sbs_mac_range->low_5ghz_freq) { 5387 ath12k_err(ab, "Invalid DBS/SBS mode with only 2.4Ghz"); 5388 ath12k_wmi_dump_freq_range_per_mac(ab, sbs_mac_range, ATH12K_HW_MODE_SBS); 5389 return -EINVAL; 5390 } 5391 5392 non_shared_range = sbs_mac_range; 5393 /* if SBS mac range has only 5 GHz then it's the non-shared phy, so 5394 * modify the range as per the shared mac. 5395 */ 5396 shared_phy_id = phy_id ? 0 : 1; 5397 shared_mac_range = 5398 &hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS][shared_phy_id]; 5399 5400 if (shared_mac_range->low_5ghz_freq > non_shared_range->low_5ghz_freq) { 5401 ath12k_dbg(ab, ATH12K_DBG_WMI, "high 5 GHz shared"); 5402 /* If the shared mac lower 5 GHz frequency is greater than 5403 * non-shared mac lower 5 GHz frequency then the shared mac has 5404 * high 5 GHz shared with 2.4 GHz. So non-shared mac's 5 GHz high 5405 * freq should be less than the shared mac's low 5 GHz freq. 5406 */ 5407 if (non_shared_range->high_5ghz_freq >= 5408 shared_mac_range->low_5ghz_freq) 5409 non_shared_range->high_5ghz_freq = 5410 max_t(u32, shared_mac_range->low_5ghz_freq - 10, 5411 non_shared_range->low_5ghz_freq); 5412 } else if (shared_mac_range->high_5ghz_freq < 5413 non_shared_range->high_5ghz_freq) { 5414 ath12k_dbg(ab, ATH12K_DBG_WMI, "low 5 GHz shared"); 5415 /* If the shared mac high 5 GHz frequency is less than 5416 * non-shared mac high 5 GHz frequency then the shared mac has 5417 * low 5 GHz shared with 2.4 GHz. So non-shared mac's 5 GHz low 5418 * freq should be greater than the shared mac's high 5 GHz freq. 5419 */ 5420 if (shared_mac_range->high_5ghz_freq >= 5421 non_shared_range->low_5ghz_freq) 5422 non_shared_range->low_5ghz_freq = 5423 min_t(u32, shared_mac_range->high_5ghz_freq + 10, 5424 non_shared_range->high_5ghz_freq); 5425 } else { 5426 ath12k_warn(ab, "invalid SBS range with all 5 GHz shared"); 5427 return -EINVAL; 5428 } 5429 5430 return 0; 5431 } 5432 5433 static void ath12k_wmi_update_sbs_freq_info(struct ath12k_base *ab) 5434 { 5435 struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info; 5436 struct ath12k_hw_mode_freq_range_arg *mac_range; 5437 u16 sbs_range_sep; 5438 u8 phy_id; 5439 int ret; 5440 5441 mac_range = hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS]; 5442 5443 /* If sbs_lower_band_end_freq has a value, then the frequency range 5444 * will be split using that value. 5445 */ 5446 sbs_range_sep = ab->wmi_ab.sbs_lower_band_end_freq; 5447 if (sbs_range_sep) { 5448 ath12k_wmi_fill_upper_share_sbs_freq(ab, sbs_range_sep, 5449 mac_range); 5450 ath12k_wmi_fill_lower_share_sbs_freq(ab, sbs_range_sep, 5451 mac_range); 5452 /* Hardware specifies the range boundary with sbs_range_sep, 5453 * (i.e. the boundary between 5 GHz high and 5 GHz low), 5454 * reset the original one to make sure it will not get used. 5455 */ 5456 memset(mac_range, 0, sizeof(*mac_range) * MAX_RADIOS); 5457 return; 5458 } 5459 5460 /* If sbs_lower_band_end_freq is not set that means firmware will send one 5461 * shared mac range and one non-shared mac range. so update that freq. 5462 */ 5463 for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) { 5464 ret = ath12k_wmi_modify_sbs_freq(ab, phy_id); 5465 if (ret) { 5466 memset(mac_range, 0, sizeof(*mac_range) * MAX_RADIOS); 5467 break; 5468 } 5469 } 5470 } 5471 5472 static void 5473 ath12k_wmi_update_mac_freq_info(struct ath12k_base *ab, 5474 enum wmi_host_hw_mode_config_type hw_config_type, 5475 u32 phy_id, 5476 struct ath12k_svc_ext_mac_phy_info *mac_cap) 5477 { 5478 if (phy_id >= MAX_RADIOS) { 5479 ath12k_err(ab, "mac more than two not supported: %d", phy_id); 5480 return; 5481 } 5482 5483 ath12k_dbg(ab, ATH12K_DBG_WMI, 5484 "hw_mode_cfg %d mac %d band 0x%x SBS cutoff freq %d 2 GHz [%d - %d] 5 GHz [%d - %d]", 5485 hw_config_type, phy_id, mac_cap->supported_bands, 5486 ab->wmi_ab.sbs_lower_band_end_freq, 5487 mac_cap->hw_freq_range.low_2ghz_freq, 5488 mac_cap->hw_freq_range.high_2ghz_freq, 5489 mac_cap->hw_freq_range.low_5ghz_freq, 5490 mac_cap->hw_freq_range.high_5ghz_freq); 5491 5492 switch (hw_config_type) { 5493 case WMI_HOST_HW_MODE_SINGLE: 5494 if (phy_id) { 5495 ath12k_dbg(ab, ATH12K_DBG_WMI, "mac phy 1 is not supported"); 5496 break; 5497 } 5498 ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SMM, phy_id); 5499 break; 5500 5501 case WMI_HOST_HW_MODE_DBS: 5502 if (!ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_DBS)) 5503 ath12k_wmi_update_freq_info(ab, mac_cap, 5504 ATH12K_HW_MODE_DBS, phy_id); 5505 break; 5506 case WMI_HOST_HW_MODE_DBS_SBS: 5507 case WMI_HOST_HW_MODE_DBS_OR_SBS: 5508 ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_DBS, phy_id); 5509 if (ab->wmi_ab.sbs_lower_band_end_freq || 5510 mac_cap->hw_freq_range.low_5ghz_freq || 5511 mac_cap->hw_freq_range.low_2ghz_freq) 5512 ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SBS, 5513 phy_id); 5514 5515 if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_DBS)) 5516 ath12k_wmi_update_dbs_freq_info(ab); 5517 if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS)) 5518 ath12k_wmi_update_sbs_freq_info(ab); 5519 break; 5520 case WMI_HOST_HW_MODE_SBS: 5521 case WMI_HOST_HW_MODE_SBS_PASSIVE: 5522 ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SBS, phy_id); 5523 if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS)) 5524 ath12k_wmi_update_sbs_freq_info(ab); 5525 5526 break; 5527 default: 5528 break; 5529 } 5530 } 5531 5532 static bool ath12k_wmi_sbs_range_present(struct ath12k_base *ab) 5533 { 5534 if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS) || 5535 (ab->wmi_ab.sbs_lower_band_end_freq && 5536 ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS_LOWER_SHARE) && 5537 ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS_UPPER_SHARE))) 5538 return true; 5539 5540 return false; 5541 } 5542 5543 static int ath12k_wmi_update_hw_mode_list(struct ath12k_base *ab) 5544 { 5545 struct ath12k_svc_ext_info *svc_ext_info = &ab->wmi_ab.svc_ext_info; 5546 struct ath12k_hw_mode_info *info = &ab->wmi_ab.hw_mode_info; 5547 enum wmi_host_hw_mode_config_type hw_config_type; 5548 struct ath12k_svc_ext_mac_phy_info *tmp; 5549 bool dbs_mode = false, sbs_mode = false; 5550 u32 i, j = 0; 5551 5552 if (!svc_ext_info->num_hw_modes) { 5553 ath12k_err(ab, "invalid number of hw modes"); 5554 return -EINVAL; 5555 } 5556 5557 ath12k_dbg(ab, ATH12K_DBG_WMI, "updated HW mode list: num modes %d", 5558 svc_ext_info->num_hw_modes); 5559 5560 memset(info->freq_range_caps, 0, sizeof(info->freq_range_caps)); 5561 5562 for (i = 0; i < svc_ext_info->num_hw_modes; i++) { 5563 if (j >= ATH12K_MAX_MAC_PHY_CAP) 5564 return -EINVAL; 5565 5566 /* Update for MAC0 */ 5567 tmp = &svc_ext_info->mac_phy_info[j++]; 5568 hw_config_type = tmp->hw_mode_config_type; 5569 ath12k_wmi_update_mac_freq_info(ab, hw_config_type, tmp->phy_id, tmp); 5570 5571 /* SBS and DBS have dual MAC. Up to 2 MACs are considered. */ 5572 if (hw_config_type == WMI_HOST_HW_MODE_DBS || 5573 hw_config_type == WMI_HOST_HW_MODE_SBS_PASSIVE || 5574 hw_config_type == WMI_HOST_HW_MODE_SBS || 5575 hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS) { 5576 if (j >= ATH12K_MAX_MAC_PHY_CAP) 5577 return -EINVAL; 5578 /* Update for MAC1 */ 5579 tmp = &svc_ext_info->mac_phy_info[j++]; 5580 ath12k_wmi_update_mac_freq_info(ab, hw_config_type, 5581 tmp->phy_id, tmp); 5582 5583 if (hw_config_type == WMI_HOST_HW_MODE_DBS || 5584 hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS) 5585 dbs_mode = true; 5586 5587 if (ath12k_wmi_sbs_range_present(ab) && 5588 (hw_config_type == WMI_HOST_HW_MODE_SBS_PASSIVE || 5589 hw_config_type == WMI_HOST_HW_MODE_SBS || 5590 hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS)) 5591 sbs_mode = true; 5592 } 5593 } 5594 5595 info->support_dbs = dbs_mode; 5596 info->support_sbs = sbs_mode; 5597 5598 ath12k_wmi_dump_freq_range(ab); 5599 5600 return 0; 5601 } 5602 5603 static int ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base *ab, 5604 u16 tag, u16 len, 5605 const void *ptr, void *data) 5606 { 5607 const struct ath12k_wmi_dbs_or_sbs_cap_params *dbs_or_sbs_caps; 5608 struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0]; 5609 struct ath12k_wmi_svc_rdy_ext2_parse *parse = data; 5610 int ret; 5611 5612 switch (tag) { 5613 case WMI_TAG_SERVICE_READY_EXT2_EVENT: 5614 ret = ath12k_pull_svc_ready_ext2(wmi_handle, ptr, 5615 &parse->arg); 5616 if (ret) { 5617 ath12k_warn(ab, 5618 "failed to extract wmi service ready ext2 parameters: %d\n", 5619 ret); 5620 return ret; 5621 } 5622 5623 ab->wmi_ab.dp_peer_meta_data_ver = 5624 u32_get_bits(parse->arg.target_cap_flags, 5625 WMI_TARGET_CAP_FLAGS_RX_PEER_METADATA_VERSION); 5626 break; 5627 5628 case WMI_TAG_ARRAY_STRUCT: 5629 if (!parse->dma_ring_cap_done) { 5630 ret = ath12k_wmi_dma_ring_caps(ab, len, ptr, 5631 &parse->dma_caps_parse); 5632 if (ret) 5633 return ret; 5634 5635 parse->dma_ring_cap_done = true; 5636 } else if (!parse->spectral_bin_scaling_done) { 5637 /* TODO: This is a place-holder as WMI tag for 5638 * spectral scaling is before 5639 * WMI_TAG_MAC_PHY_CAPABILITIES_EXT 5640 */ 5641 parse->spectral_bin_scaling_done = true; 5642 } else if (!parse->mac_phy_caps_ext_done) { 5643 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 5644 ath12k_wmi_tlv_mac_phy_caps_ext, 5645 parse); 5646 if (ret) { 5647 ath12k_warn(ab, "failed to parse extended MAC PHY capabilities WMI TLV: %d\n", 5648 ret); 5649 return ret; 5650 } 5651 5652 parse->mac_phy_caps_ext_done = true; 5653 } else if (!parse->hal_reg_caps_ext2_done) { 5654 parse->hal_reg_caps_ext2_done = true; 5655 } else if (!parse->scan_radio_caps_ext2_done) { 5656 parse->scan_radio_caps_ext2_done = true; 5657 } else if (!parse->twt_caps_done) { 5658 parse->twt_caps_done = true; 5659 } else if (!parse->htt_msdu_idx_to_qtype_map_done) { 5660 parse->htt_msdu_idx_to_qtype_map_done = true; 5661 } else if (!parse->dbs_or_sbs_cap_ext_done) { 5662 dbs_or_sbs_caps = ptr; 5663 ab->wmi_ab.sbs_lower_band_end_freq = 5664 __le32_to_cpu(dbs_or_sbs_caps->sbs_lower_band_end_freq); 5665 5666 ath12k_dbg(ab, ATH12K_DBG_WMI, "sbs_lower_band_end_freq %u\n", 5667 ab->wmi_ab.sbs_lower_band_end_freq); 5668 5669 ret = ath12k_wmi_update_hw_mode_list(ab); 5670 if (ret) { 5671 ath12k_warn(ab, "failed to update hw mode list: %d\n", 5672 ret); 5673 return ret; 5674 } 5675 5676 parse->dbs_or_sbs_cap_ext_done = true; 5677 } 5678 5679 break; 5680 default: 5681 break; 5682 } 5683 5684 return 0; 5685 } 5686 5687 static int ath12k_service_ready_ext2_event(struct ath12k_base *ab, 5688 struct sk_buff *skb) 5689 { 5690 struct ath12k_wmi_svc_rdy_ext2_parse svc_rdy_ext2 = { }; 5691 int ret; 5692 5693 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 5694 ath12k_wmi_svc_rdy_ext2_parse, 5695 &svc_rdy_ext2); 5696 if (ret) { 5697 ath12k_warn(ab, "failed to parse ext2 event tlv %d\n", ret); 5698 goto err; 5699 } 5700 5701 complete(&ab->wmi_ab.service_ready); 5702 5703 return 0; 5704 5705 err: 5706 ath12k_wmi_free_dbring_caps(ab); 5707 return ret; 5708 } 5709 5710 static int ath12k_pull_vdev_start_resp_tlv(struct ath12k_base *ab, struct sk_buff *skb, 5711 struct wmi_vdev_start_resp_event *vdev_rsp) 5712 { 5713 const void **tb; 5714 const struct wmi_vdev_start_resp_event *ev; 5715 int ret; 5716 5717 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 5718 if (IS_ERR(tb)) { 5719 ret = PTR_ERR(tb); 5720 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 5721 return ret; 5722 } 5723 5724 ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT]; 5725 if (!ev) { 5726 ath12k_warn(ab, "failed to fetch vdev start resp ev"); 5727 kfree(tb); 5728 return -EPROTO; 5729 } 5730 5731 *vdev_rsp = *ev; 5732 5733 kfree(tb); 5734 return 0; 5735 } 5736 5737 static struct ath12k_reg_rule 5738 *create_ext_reg_rules_from_wmi(u32 num_reg_rules, 5739 struct ath12k_wmi_reg_rule_ext_params *wmi_reg_rule) 5740 { 5741 struct ath12k_reg_rule *reg_rule_ptr; 5742 u32 count; 5743 5744 reg_rule_ptr = kzalloc((num_reg_rules * sizeof(*reg_rule_ptr)), 5745 GFP_ATOMIC); 5746 5747 if (!reg_rule_ptr) 5748 return NULL; 5749 5750 for (count = 0; count < num_reg_rules; count++) { 5751 reg_rule_ptr[count].start_freq = 5752 le32_get_bits(wmi_reg_rule[count].freq_info, 5753 REG_RULE_START_FREQ); 5754 reg_rule_ptr[count].end_freq = 5755 le32_get_bits(wmi_reg_rule[count].freq_info, 5756 REG_RULE_END_FREQ); 5757 reg_rule_ptr[count].max_bw = 5758 le32_get_bits(wmi_reg_rule[count].bw_pwr_info, 5759 REG_RULE_MAX_BW); 5760 reg_rule_ptr[count].reg_power = 5761 le32_get_bits(wmi_reg_rule[count].bw_pwr_info, 5762 REG_RULE_REG_PWR); 5763 reg_rule_ptr[count].ant_gain = 5764 le32_get_bits(wmi_reg_rule[count].bw_pwr_info, 5765 REG_RULE_ANT_GAIN); 5766 reg_rule_ptr[count].flags = 5767 le32_get_bits(wmi_reg_rule[count].flag_info, 5768 REG_RULE_FLAGS); 5769 reg_rule_ptr[count].psd_flag = 5770 le32_get_bits(wmi_reg_rule[count].psd_power_info, 5771 REG_RULE_PSD_INFO); 5772 reg_rule_ptr[count].psd_eirp = 5773 le32_get_bits(wmi_reg_rule[count].psd_power_info, 5774 REG_RULE_PSD_EIRP); 5775 } 5776 5777 return reg_rule_ptr; 5778 } 5779 5780 static u8 ath12k_wmi_ignore_num_extra_rules(struct ath12k_wmi_reg_rule_ext_params *rule, 5781 u32 num_reg_rules) 5782 { 5783 u8 num_invalid_5ghz_rules = 0; 5784 u32 count, start_freq; 5785 5786 for (count = 0; count < num_reg_rules; count++) { 5787 start_freq = le32_get_bits(rule[count].freq_info, REG_RULE_START_FREQ); 5788 5789 if (start_freq >= ATH12K_MIN_6GHZ_FREQ) 5790 num_invalid_5ghz_rules++; 5791 } 5792 5793 return num_invalid_5ghz_rules; 5794 } 5795 5796 static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab, 5797 struct sk_buff *skb, 5798 struct ath12k_reg_info *reg_info) 5799 { 5800 const void **tb; 5801 const struct wmi_reg_chan_list_cc_ext_event *ev; 5802 struct ath12k_wmi_reg_rule_ext_params *ext_wmi_reg_rule; 5803 u32 num_2g_reg_rules, num_5g_reg_rules; 5804 u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE]; 5805 u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE]; 5806 u8 num_invalid_5ghz_ext_rules; 5807 u32 total_reg_rules = 0; 5808 int ret, i, j; 5809 5810 ath12k_dbg(ab, ATH12K_DBG_WMI, "processing regulatory ext channel list\n"); 5811 5812 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 5813 if (IS_ERR(tb)) { 5814 ret = PTR_ERR(tb); 5815 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 5816 return ret; 5817 } 5818 5819 ev = tb[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT]; 5820 if (!ev) { 5821 ath12k_warn(ab, "failed to fetch reg chan list ext update ev\n"); 5822 kfree(tb); 5823 return -EPROTO; 5824 } 5825 5826 reg_info->num_2g_reg_rules = le32_to_cpu(ev->num_2g_reg_rules); 5827 reg_info->num_5g_reg_rules = le32_to_cpu(ev->num_5g_reg_rules); 5828 reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP] = 5829 le32_to_cpu(ev->num_6g_reg_rules_ap_lpi); 5830 reg_info->num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP] = 5831 le32_to_cpu(ev->num_6g_reg_rules_ap_sp); 5832 reg_info->num_6g_reg_rules_ap[WMI_REG_VLP_AP] = 5833 le32_to_cpu(ev->num_6g_reg_rules_ap_vlp); 5834 5835 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { 5836 reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] = 5837 le32_to_cpu(ev->num_6g_reg_rules_cl_lpi[i]); 5838 reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] = 5839 le32_to_cpu(ev->num_6g_reg_rules_cl_sp[i]); 5840 reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] = 5841 le32_to_cpu(ev->num_6g_reg_rules_cl_vlp[i]); 5842 } 5843 5844 num_2g_reg_rules = reg_info->num_2g_reg_rules; 5845 total_reg_rules += num_2g_reg_rules; 5846 num_5g_reg_rules = reg_info->num_5g_reg_rules; 5847 total_reg_rules += num_5g_reg_rules; 5848 5849 if (num_2g_reg_rules > MAX_REG_RULES || num_5g_reg_rules > MAX_REG_RULES) { 5850 ath12k_warn(ab, "Num reg rules for 2G/5G exceeds max limit (num_2g_reg_rules: %d num_5g_reg_rules: %d max_rules: %d)\n", 5851 num_2g_reg_rules, num_5g_reg_rules, MAX_REG_RULES); 5852 kfree(tb); 5853 return -EINVAL; 5854 } 5855 5856 for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) { 5857 num_6g_reg_rules_ap[i] = reg_info->num_6g_reg_rules_ap[i]; 5858 5859 if (num_6g_reg_rules_ap[i] > MAX_6GHZ_REG_RULES) { 5860 ath12k_warn(ab, "Num 6G reg rules for AP mode(%d) exceeds max limit (num_6g_reg_rules_ap: %d, max_rules: %d)\n", 5861 i, num_6g_reg_rules_ap[i], MAX_6GHZ_REG_RULES); 5862 kfree(tb); 5863 return -EINVAL; 5864 } 5865 5866 total_reg_rules += num_6g_reg_rules_ap[i]; 5867 } 5868 5869 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { 5870 num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] = 5871 reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i]; 5872 total_reg_rules += num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i]; 5873 5874 num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] = 5875 reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i]; 5876 total_reg_rules += num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i]; 5877 5878 num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] = 5879 reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i]; 5880 total_reg_rules += num_6g_reg_rules_cl[WMI_REG_VLP_AP][i]; 5881 5882 if (num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] > MAX_6GHZ_REG_RULES || 5883 num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] > MAX_6GHZ_REG_RULES || 5884 num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] > MAX_6GHZ_REG_RULES) { 5885 ath12k_warn(ab, "Num 6g client reg rules exceeds max limit, for client(type: %d)\n", 5886 i); 5887 kfree(tb); 5888 return -EINVAL; 5889 } 5890 } 5891 5892 if (!total_reg_rules) { 5893 ath12k_warn(ab, "No reg rules available\n"); 5894 kfree(tb); 5895 return -EINVAL; 5896 } 5897 5898 memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN); 5899 5900 reg_info->dfs_region = le32_to_cpu(ev->dfs_region); 5901 reg_info->phybitmap = le32_to_cpu(ev->phybitmap); 5902 reg_info->num_phy = le32_to_cpu(ev->num_phy); 5903 reg_info->phy_id = le32_to_cpu(ev->phy_id); 5904 reg_info->ctry_code = le32_to_cpu(ev->country_id); 5905 reg_info->reg_dmn_pair = le32_to_cpu(ev->domain_code); 5906 5907 switch (le32_to_cpu(ev->status_code)) { 5908 case WMI_REG_SET_CC_STATUS_PASS: 5909 reg_info->status_code = REG_SET_CC_STATUS_PASS; 5910 break; 5911 case WMI_REG_CURRENT_ALPHA2_NOT_FOUND: 5912 reg_info->status_code = REG_CURRENT_ALPHA2_NOT_FOUND; 5913 break; 5914 case WMI_REG_INIT_ALPHA2_NOT_FOUND: 5915 reg_info->status_code = REG_INIT_ALPHA2_NOT_FOUND; 5916 break; 5917 case WMI_REG_SET_CC_CHANGE_NOT_ALLOWED: 5918 reg_info->status_code = REG_SET_CC_CHANGE_NOT_ALLOWED; 5919 break; 5920 case WMI_REG_SET_CC_STATUS_NO_MEMORY: 5921 reg_info->status_code = REG_SET_CC_STATUS_NO_MEMORY; 5922 break; 5923 case WMI_REG_SET_CC_STATUS_FAIL: 5924 reg_info->status_code = REG_SET_CC_STATUS_FAIL; 5925 break; 5926 } 5927 5928 reg_info->is_ext_reg_event = true; 5929 5930 reg_info->min_bw_2g = le32_to_cpu(ev->min_bw_2g); 5931 reg_info->max_bw_2g = le32_to_cpu(ev->max_bw_2g); 5932 reg_info->min_bw_5g = le32_to_cpu(ev->min_bw_5g); 5933 reg_info->max_bw_5g = le32_to_cpu(ev->max_bw_5g); 5934 reg_info->min_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->min_bw_6g_ap_lpi); 5935 reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->max_bw_6g_ap_lpi); 5936 reg_info->min_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->min_bw_6g_ap_sp); 5937 reg_info->max_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->max_bw_6g_ap_sp); 5938 reg_info->min_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->min_bw_6g_ap_vlp); 5939 reg_info->max_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->max_bw_6g_ap_vlp); 5940 5941 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { 5942 reg_info->min_bw_6g_client[WMI_REG_INDOOR_AP][i] = 5943 le32_to_cpu(ev->min_bw_6g_client_lpi[i]); 5944 reg_info->max_bw_6g_client[WMI_REG_INDOOR_AP][i] = 5945 le32_to_cpu(ev->max_bw_6g_client_lpi[i]); 5946 reg_info->min_bw_6g_client[WMI_REG_STD_POWER_AP][i] = 5947 le32_to_cpu(ev->min_bw_6g_client_sp[i]); 5948 reg_info->max_bw_6g_client[WMI_REG_STD_POWER_AP][i] = 5949 le32_to_cpu(ev->max_bw_6g_client_sp[i]); 5950 reg_info->min_bw_6g_client[WMI_REG_VLP_AP][i] = 5951 le32_to_cpu(ev->min_bw_6g_client_vlp[i]); 5952 reg_info->max_bw_6g_client[WMI_REG_VLP_AP][i] = 5953 le32_to_cpu(ev->max_bw_6g_client_vlp[i]); 5954 } 5955 5956 ath12k_dbg(ab, ATH12K_DBG_WMI, 5957 "%s:cc_ext %s dfs %d BW: min_2g %d max_2g %d min_5g %d max_5g %d phy_bitmap 0x%x", 5958 __func__, reg_info->alpha2, reg_info->dfs_region, 5959 reg_info->min_bw_2g, reg_info->max_bw_2g, 5960 reg_info->min_bw_5g, reg_info->max_bw_5g, 5961 reg_info->phybitmap); 5962 5963 ath12k_dbg(ab, ATH12K_DBG_WMI, 5964 "num_2g_reg_rules %d num_5g_reg_rules %d", 5965 num_2g_reg_rules, num_5g_reg_rules); 5966 5967 ath12k_dbg(ab, ATH12K_DBG_WMI, 5968 "num_6g_reg_rules_ap_lpi: %d num_6g_reg_rules_ap_sp: %d num_6g_reg_rules_ap_vlp: %d", 5969 num_6g_reg_rules_ap[WMI_REG_INDOOR_AP], 5970 num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP], 5971 num_6g_reg_rules_ap[WMI_REG_VLP_AP]); 5972 5973 ath12k_dbg(ab, ATH12K_DBG_WMI, 5974 "6g Regular client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d", 5975 num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_DEFAULT_CLIENT], 5976 num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_DEFAULT_CLIENT], 5977 num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_DEFAULT_CLIENT]); 5978 5979 ath12k_dbg(ab, ATH12K_DBG_WMI, 5980 "6g Subordinate client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d", 5981 num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_SUBORDINATE_CLIENT], 5982 num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_SUBORDINATE_CLIENT], 5983 num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_SUBORDINATE_CLIENT]); 5984 5985 ext_wmi_reg_rule = 5986 (struct ath12k_wmi_reg_rule_ext_params *)((u8 *)ev 5987 + sizeof(*ev) 5988 + sizeof(struct wmi_tlv)); 5989 5990 if (num_2g_reg_rules) { 5991 reg_info->reg_rules_2g_ptr = 5992 create_ext_reg_rules_from_wmi(num_2g_reg_rules, 5993 ext_wmi_reg_rule); 5994 5995 if (!reg_info->reg_rules_2g_ptr) { 5996 kfree(tb); 5997 ath12k_warn(ab, "Unable to Allocate memory for 2g rules\n"); 5998 return -ENOMEM; 5999 } 6000 } 6001 6002 ext_wmi_reg_rule += num_2g_reg_rules; 6003 6004 /* Firmware might include 6 GHz reg rule in 5 GHz rule list 6005 * for few countries along with separate 6 GHz rule. 6006 * Having same 6 GHz reg rule in 5 GHz and 6 GHz rules list 6007 * causes intersect check to be true, and same rules will be 6008 * shown multiple times in iw cmd. 6009 * Hence, avoid parsing 6 GHz rule from 5 GHz reg rule list 6010 */ 6011 num_invalid_5ghz_ext_rules = ath12k_wmi_ignore_num_extra_rules(ext_wmi_reg_rule, 6012 num_5g_reg_rules); 6013 6014 if (num_invalid_5ghz_ext_rules) { 6015 ath12k_dbg(ab, ATH12K_DBG_WMI, 6016 "CC: %s 5 GHz reg rules number %d from fw, %d number of invalid 5 GHz rules", 6017 reg_info->alpha2, reg_info->num_5g_reg_rules, 6018 num_invalid_5ghz_ext_rules); 6019 6020 num_5g_reg_rules = num_5g_reg_rules - num_invalid_5ghz_ext_rules; 6021 reg_info->num_5g_reg_rules = num_5g_reg_rules; 6022 } 6023 6024 if (num_5g_reg_rules) { 6025 reg_info->reg_rules_5g_ptr = 6026 create_ext_reg_rules_from_wmi(num_5g_reg_rules, 6027 ext_wmi_reg_rule); 6028 6029 if (!reg_info->reg_rules_5g_ptr) { 6030 kfree(tb); 6031 ath12k_warn(ab, "Unable to Allocate memory for 5g rules\n"); 6032 return -ENOMEM; 6033 } 6034 } 6035 6036 /* We have adjusted the number of 5 GHz reg rules above. But still those 6037 * many rules needs to be adjusted in ext_wmi_reg_rule. 6038 * 6039 * NOTE: num_invalid_5ghz_ext_rules will be 0 for rest other cases. 6040 */ 6041 ext_wmi_reg_rule += (num_5g_reg_rules + num_invalid_5ghz_ext_rules); 6042 6043 for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) { 6044 reg_info->reg_rules_6g_ap_ptr[i] = 6045 create_ext_reg_rules_from_wmi(num_6g_reg_rules_ap[i], 6046 ext_wmi_reg_rule); 6047 6048 if (!reg_info->reg_rules_6g_ap_ptr[i]) { 6049 kfree(tb); 6050 ath12k_warn(ab, "Unable to Allocate memory for 6g ap rules\n"); 6051 return -ENOMEM; 6052 } 6053 6054 ext_wmi_reg_rule += num_6g_reg_rules_ap[i]; 6055 } 6056 6057 for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++) { 6058 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { 6059 reg_info->reg_rules_6g_client_ptr[j][i] = 6060 create_ext_reg_rules_from_wmi(num_6g_reg_rules_cl[j][i], 6061 ext_wmi_reg_rule); 6062 6063 if (!reg_info->reg_rules_6g_client_ptr[j][i]) { 6064 kfree(tb); 6065 ath12k_warn(ab, "Unable to Allocate memory for 6g client rules\n"); 6066 return -ENOMEM; 6067 } 6068 6069 ext_wmi_reg_rule += num_6g_reg_rules_cl[j][i]; 6070 } 6071 } 6072 6073 reg_info->client_type = le32_to_cpu(ev->client_type); 6074 reg_info->rnr_tpe_usable = ev->rnr_tpe_usable; 6075 reg_info->unspecified_ap_usable = ev->unspecified_ap_usable; 6076 reg_info->domain_code_6g_ap[WMI_REG_INDOOR_AP] = 6077 le32_to_cpu(ev->domain_code_6g_ap_lpi); 6078 reg_info->domain_code_6g_ap[WMI_REG_STD_POWER_AP] = 6079 le32_to_cpu(ev->domain_code_6g_ap_sp); 6080 reg_info->domain_code_6g_ap[WMI_REG_VLP_AP] = 6081 le32_to_cpu(ev->domain_code_6g_ap_vlp); 6082 6083 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { 6084 reg_info->domain_code_6g_client[WMI_REG_INDOOR_AP][i] = 6085 le32_to_cpu(ev->domain_code_6g_client_lpi[i]); 6086 reg_info->domain_code_6g_client[WMI_REG_STD_POWER_AP][i] = 6087 le32_to_cpu(ev->domain_code_6g_client_sp[i]); 6088 reg_info->domain_code_6g_client[WMI_REG_VLP_AP][i] = 6089 le32_to_cpu(ev->domain_code_6g_client_vlp[i]); 6090 } 6091 6092 reg_info->domain_code_6g_super_id = le32_to_cpu(ev->domain_code_6g_super_id); 6093 6094 ath12k_dbg(ab, ATH12K_DBG_WMI, "6g client_type: %d domain_code_6g_super_id: %d", 6095 reg_info->client_type, reg_info->domain_code_6g_super_id); 6096 6097 ath12k_dbg(ab, ATH12K_DBG_WMI, "processed regulatory ext channel list\n"); 6098 6099 kfree(tb); 6100 return 0; 6101 } 6102 6103 static int ath12k_pull_peer_del_resp_ev(struct ath12k_base *ab, struct sk_buff *skb, 6104 struct wmi_peer_delete_resp_event *peer_del_resp) 6105 { 6106 const void **tb; 6107 const struct wmi_peer_delete_resp_event *ev; 6108 int ret; 6109 6110 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6111 if (IS_ERR(tb)) { 6112 ret = PTR_ERR(tb); 6113 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6114 return ret; 6115 } 6116 6117 ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT]; 6118 if (!ev) { 6119 ath12k_warn(ab, "failed to fetch peer delete resp ev"); 6120 kfree(tb); 6121 return -EPROTO; 6122 } 6123 6124 memset(peer_del_resp, 0, sizeof(*peer_del_resp)); 6125 6126 peer_del_resp->vdev_id = ev->vdev_id; 6127 ether_addr_copy(peer_del_resp->peer_macaddr.addr, 6128 ev->peer_macaddr.addr); 6129 6130 kfree(tb); 6131 return 0; 6132 } 6133 6134 static int ath12k_pull_vdev_del_resp_ev(struct ath12k_base *ab, 6135 struct sk_buff *skb, 6136 u32 *vdev_id) 6137 { 6138 const void **tb; 6139 const struct wmi_vdev_delete_resp_event *ev; 6140 int ret; 6141 6142 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6143 if (IS_ERR(tb)) { 6144 ret = PTR_ERR(tb); 6145 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6146 return ret; 6147 } 6148 6149 ev = tb[WMI_TAG_VDEV_DELETE_RESP_EVENT]; 6150 if (!ev) { 6151 ath12k_warn(ab, "failed to fetch vdev delete resp ev"); 6152 kfree(tb); 6153 return -EPROTO; 6154 } 6155 6156 *vdev_id = le32_to_cpu(ev->vdev_id); 6157 6158 kfree(tb); 6159 return 0; 6160 } 6161 6162 static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab, 6163 struct sk_buff *skb, 6164 u32 *vdev_id, u32 *tx_status) 6165 { 6166 const void **tb; 6167 const struct wmi_bcn_tx_status_event *ev; 6168 int ret; 6169 6170 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6171 if (IS_ERR(tb)) { 6172 ret = PTR_ERR(tb); 6173 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6174 return ret; 6175 } 6176 6177 ev = tb[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT]; 6178 if (!ev) { 6179 ath12k_warn(ab, "failed to fetch bcn tx status ev"); 6180 kfree(tb); 6181 return -EPROTO; 6182 } 6183 6184 *vdev_id = le32_to_cpu(ev->vdev_id); 6185 *tx_status = le32_to_cpu(ev->tx_status); 6186 6187 kfree(tb); 6188 return 0; 6189 } 6190 6191 static int ath12k_pull_vdev_stopped_param_tlv(struct ath12k_base *ab, struct sk_buff *skb, 6192 u32 *vdev_id) 6193 { 6194 const void **tb; 6195 const struct wmi_vdev_stopped_event *ev; 6196 int ret; 6197 6198 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6199 if (IS_ERR(tb)) { 6200 ret = PTR_ERR(tb); 6201 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6202 return ret; 6203 } 6204 6205 ev = tb[WMI_TAG_VDEV_STOPPED_EVENT]; 6206 if (!ev) { 6207 ath12k_warn(ab, "failed to fetch vdev stop ev"); 6208 kfree(tb); 6209 return -EPROTO; 6210 } 6211 6212 *vdev_id = le32_to_cpu(ev->vdev_id); 6213 6214 kfree(tb); 6215 return 0; 6216 } 6217 6218 static int ath12k_wmi_tlv_mgmt_rx_parse(struct ath12k_base *ab, 6219 u16 tag, u16 len, 6220 const void *ptr, void *data) 6221 { 6222 struct wmi_tlv_mgmt_rx_parse *parse = data; 6223 6224 switch (tag) { 6225 case WMI_TAG_MGMT_RX_HDR: 6226 parse->fixed = ptr; 6227 break; 6228 case WMI_TAG_ARRAY_BYTE: 6229 if (!parse->frame_buf_done) { 6230 parse->frame_buf = ptr; 6231 parse->frame_buf_done = true; 6232 } 6233 break; 6234 } 6235 return 0; 6236 } 6237 6238 static int ath12k_pull_mgmt_rx_params_tlv(struct ath12k_base *ab, 6239 struct sk_buff *skb, 6240 struct ath12k_wmi_mgmt_rx_arg *hdr) 6241 { 6242 struct wmi_tlv_mgmt_rx_parse parse = { }; 6243 const struct ath12k_wmi_mgmt_rx_params *ev; 6244 const u8 *frame; 6245 int i, ret; 6246 6247 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 6248 ath12k_wmi_tlv_mgmt_rx_parse, 6249 &parse); 6250 if (ret) { 6251 ath12k_warn(ab, "failed to parse mgmt rx tlv %d\n", ret); 6252 return ret; 6253 } 6254 6255 ev = parse.fixed; 6256 frame = parse.frame_buf; 6257 6258 if (!ev || !frame) { 6259 ath12k_warn(ab, "failed to fetch mgmt rx hdr"); 6260 return -EPROTO; 6261 } 6262 6263 hdr->pdev_id = le32_to_cpu(ev->pdev_id); 6264 hdr->chan_freq = le32_to_cpu(ev->chan_freq); 6265 hdr->channel = le32_to_cpu(ev->channel); 6266 hdr->snr = le32_to_cpu(ev->snr); 6267 hdr->rate = le32_to_cpu(ev->rate); 6268 hdr->phy_mode = le32_to_cpu(ev->phy_mode); 6269 hdr->buf_len = le32_to_cpu(ev->buf_len); 6270 hdr->status = le32_to_cpu(ev->status); 6271 hdr->flags = le32_to_cpu(ev->flags); 6272 hdr->rssi = a_sle32_to_cpu(ev->rssi); 6273 hdr->tsf_delta = le32_to_cpu(ev->tsf_delta); 6274 6275 for (i = 0; i < ATH_MAX_ANTENNA; i++) 6276 hdr->rssi_ctl[i] = le32_to_cpu(ev->rssi_ctl[i]); 6277 6278 if (skb->len < (frame - skb->data) + hdr->buf_len) { 6279 ath12k_warn(ab, "invalid length in mgmt rx hdr ev"); 6280 return -EPROTO; 6281 } 6282 6283 /* shift the sk_buff to point to `frame` */ 6284 skb_trim(skb, 0); 6285 skb_put(skb, frame - skb->data); 6286 skb_pull(skb, frame - skb->data); 6287 skb_put(skb, hdr->buf_len); 6288 6289 return 0; 6290 } 6291 6292 static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id, 6293 u32 status, u32 ack_rssi) 6294 { 6295 struct sk_buff *msdu; 6296 struct ieee80211_tx_info *info; 6297 struct ath12k_skb_cb *skb_cb; 6298 int num_mgmt; 6299 6300 spin_lock_bh(&ar->txmgmt_idr_lock); 6301 msdu = idr_find(&ar->txmgmt_idr, desc_id); 6302 6303 if (!msdu) { 6304 ath12k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d\n", 6305 desc_id); 6306 spin_unlock_bh(&ar->txmgmt_idr_lock); 6307 return -ENOENT; 6308 } 6309 6310 idr_remove(&ar->txmgmt_idr, desc_id); 6311 spin_unlock_bh(&ar->txmgmt_idr_lock); 6312 6313 skb_cb = ATH12K_SKB_CB(msdu); 6314 dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); 6315 6316 info = IEEE80211_SKB_CB(msdu); 6317 memset(&info->status, 0, sizeof(info->status)); 6318 6319 /* skip tx rate update from ieee80211_status*/ 6320 info->status.rates[0].idx = -1; 6321 6322 if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status) { 6323 info->flags |= IEEE80211_TX_STAT_ACK; 6324 info->status.ack_signal = ack_rssi; 6325 info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID; 6326 } 6327 6328 if ((info->flags & IEEE80211_TX_CTL_NO_ACK) && !status) 6329 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; 6330 6331 ieee80211_tx_status_irqsafe(ath12k_ar_to_hw(ar), msdu); 6332 6333 num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx); 6334 6335 /* WARN when we received this event without doing any mgmt tx */ 6336 if (num_mgmt < 0) 6337 WARN_ON_ONCE(1); 6338 6339 if (!num_mgmt) 6340 wake_up(&ar->txmgmt_empty_waitq); 6341 6342 return 0; 6343 } 6344 6345 static int ath12k_pull_mgmt_tx_compl_param_tlv(struct ath12k_base *ab, 6346 struct sk_buff *skb, 6347 struct wmi_mgmt_tx_compl_event *param) 6348 { 6349 const void **tb; 6350 const struct wmi_mgmt_tx_compl_event *ev; 6351 int ret; 6352 6353 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6354 if (IS_ERR(tb)) { 6355 ret = PTR_ERR(tb); 6356 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6357 return ret; 6358 } 6359 6360 ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT]; 6361 if (!ev) { 6362 ath12k_warn(ab, "failed to fetch mgmt tx compl ev"); 6363 kfree(tb); 6364 return -EPROTO; 6365 } 6366 6367 param->pdev_id = ev->pdev_id; 6368 param->desc_id = ev->desc_id; 6369 param->status = ev->status; 6370 param->ppdu_id = ev->ppdu_id; 6371 param->ack_rssi = ev->ack_rssi; 6372 6373 kfree(tb); 6374 return 0; 6375 } 6376 6377 static void ath12k_wmi_event_scan_started(struct ath12k *ar) 6378 { 6379 lockdep_assert_held(&ar->data_lock); 6380 6381 switch (ar->scan.state) { 6382 case ATH12K_SCAN_IDLE: 6383 case ATH12K_SCAN_RUNNING: 6384 case ATH12K_SCAN_ABORTING: 6385 ath12k_warn(ar->ab, "received scan started event in an invalid scan state: %s (%d)\n", 6386 ath12k_scan_state_str(ar->scan.state), 6387 ar->scan.state); 6388 break; 6389 case ATH12K_SCAN_STARTING: 6390 ar->scan.state = ATH12K_SCAN_RUNNING; 6391 6392 if (ar->scan.is_roc) 6393 ieee80211_ready_on_channel(ath12k_ar_to_hw(ar)); 6394 6395 complete(&ar->scan.started); 6396 break; 6397 } 6398 } 6399 6400 static void ath12k_wmi_event_scan_start_failed(struct ath12k *ar) 6401 { 6402 lockdep_assert_held(&ar->data_lock); 6403 6404 switch (ar->scan.state) { 6405 case ATH12K_SCAN_IDLE: 6406 case ATH12K_SCAN_RUNNING: 6407 case ATH12K_SCAN_ABORTING: 6408 ath12k_warn(ar->ab, "received scan start failed event in an invalid scan state: %s (%d)\n", 6409 ath12k_scan_state_str(ar->scan.state), 6410 ar->scan.state); 6411 break; 6412 case ATH12K_SCAN_STARTING: 6413 complete(&ar->scan.started); 6414 __ath12k_mac_scan_finish(ar); 6415 break; 6416 } 6417 } 6418 6419 static void ath12k_wmi_event_scan_completed(struct ath12k *ar) 6420 { 6421 lockdep_assert_held(&ar->data_lock); 6422 6423 switch (ar->scan.state) { 6424 case ATH12K_SCAN_IDLE: 6425 case ATH12K_SCAN_STARTING: 6426 /* One suspected reason scan can be completed while starting is 6427 * if firmware fails to deliver all scan events to the host, 6428 * e.g. when transport pipe is full. This has been observed 6429 * with spectral scan phyerr events starving wmi transport 6430 * pipe. In such case the "scan completed" event should be (and 6431 * is) ignored by the host as it may be just firmware's scan 6432 * state machine recovering. 6433 */ 6434 ath12k_warn(ar->ab, "received scan completed event in an invalid scan state: %s (%d)\n", 6435 ath12k_scan_state_str(ar->scan.state), 6436 ar->scan.state); 6437 break; 6438 case ATH12K_SCAN_RUNNING: 6439 case ATH12K_SCAN_ABORTING: 6440 __ath12k_mac_scan_finish(ar); 6441 break; 6442 } 6443 } 6444 6445 static void ath12k_wmi_event_scan_bss_chan(struct ath12k *ar) 6446 { 6447 lockdep_assert_held(&ar->data_lock); 6448 6449 switch (ar->scan.state) { 6450 case ATH12K_SCAN_IDLE: 6451 case ATH12K_SCAN_STARTING: 6452 ath12k_warn(ar->ab, "received scan bss chan event in an invalid scan state: %s (%d)\n", 6453 ath12k_scan_state_str(ar->scan.state), 6454 ar->scan.state); 6455 break; 6456 case ATH12K_SCAN_RUNNING: 6457 case ATH12K_SCAN_ABORTING: 6458 ar->scan_channel = NULL; 6459 break; 6460 } 6461 } 6462 6463 static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq) 6464 { 6465 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 6466 6467 lockdep_assert_held(&ar->data_lock); 6468 6469 switch (ar->scan.state) { 6470 case ATH12K_SCAN_IDLE: 6471 case ATH12K_SCAN_STARTING: 6472 ath12k_warn(ar->ab, "received scan foreign chan event in an invalid scan state: %s (%d)\n", 6473 ath12k_scan_state_str(ar->scan.state), 6474 ar->scan.state); 6475 break; 6476 case ATH12K_SCAN_RUNNING: 6477 case ATH12K_SCAN_ABORTING: 6478 ar->scan_channel = ieee80211_get_channel(hw->wiphy, freq); 6479 6480 if (ar->scan.is_roc && ar->scan.roc_freq == freq) 6481 complete(&ar->scan.on_channel); 6482 6483 break; 6484 } 6485 } 6486 6487 static const char * 6488 ath12k_wmi_event_scan_type_str(enum wmi_scan_event_type type, 6489 enum wmi_scan_completion_reason reason) 6490 { 6491 switch (type) { 6492 case WMI_SCAN_EVENT_STARTED: 6493 return "started"; 6494 case WMI_SCAN_EVENT_COMPLETED: 6495 switch (reason) { 6496 case WMI_SCAN_REASON_COMPLETED: 6497 return "completed"; 6498 case WMI_SCAN_REASON_CANCELLED: 6499 return "completed [cancelled]"; 6500 case WMI_SCAN_REASON_PREEMPTED: 6501 return "completed [preempted]"; 6502 case WMI_SCAN_REASON_TIMEDOUT: 6503 return "completed [timedout]"; 6504 case WMI_SCAN_REASON_INTERNAL_FAILURE: 6505 return "completed [internal err]"; 6506 case WMI_SCAN_REASON_MAX: 6507 break; 6508 } 6509 return "completed [unknown]"; 6510 case WMI_SCAN_EVENT_BSS_CHANNEL: 6511 return "bss channel"; 6512 case WMI_SCAN_EVENT_FOREIGN_CHAN: 6513 return "foreign channel"; 6514 case WMI_SCAN_EVENT_DEQUEUED: 6515 return "dequeued"; 6516 case WMI_SCAN_EVENT_PREEMPTED: 6517 return "preempted"; 6518 case WMI_SCAN_EVENT_START_FAILED: 6519 return "start failed"; 6520 case WMI_SCAN_EVENT_RESTARTED: 6521 return "restarted"; 6522 case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT: 6523 return "foreign channel exit"; 6524 default: 6525 return "unknown"; 6526 } 6527 } 6528 6529 static int ath12k_pull_scan_ev(struct ath12k_base *ab, struct sk_buff *skb, 6530 struct wmi_scan_event *scan_evt_param) 6531 { 6532 const void **tb; 6533 const struct wmi_scan_event *ev; 6534 int ret; 6535 6536 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6537 if (IS_ERR(tb)) { 6538 ret = PTR_ERR(tb); 6539 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6540 return ret; 6541 } 6542 6543 ev = tb[WMI_TAG_SCAN_EVENT]; 6544 if (!ev) { 6545 ath12k_warn(ab, "failed to fetch scan ev"); 6546 kfree(tb); 6547 return -EPROTO; 6548 } 6549 6550 scan_evt_param->event_type = ev->event_type; 6551 scan_evt_param->reason = ev->reason; 6552 scan_evt_param->channel_freq = ev->channel_freq; 6553 scan_evt_param->scan_req_id = ev->scan_req_id; 6554 scan_evt_param->scan_id = ev->scan_id; 6555 scan_evt_param->vdev_id = ev->vdev_id; 6556 scan_evt_param->tsf_timestamp = ev->tsf_timestamp; 6557 6558 kfree(tb); 6559 return 0; 6560 } 6561 6562 static int ath12k_pull_peer_sta_kickout_ev(struct ath12k_base *ab, struct sk_buff *skb, 6563 struct wmi_peer_sta_kickout_arg *arg) 6564 { 6565 const void **tb; 6566 const struct wmi_peer_sta_kickout_event *ev; 6567 int ret; 6568 6569 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6570 if (IS_ERR(tb)) { 6571 ret = PTR_ERR(tb); 6572 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6573 return ret; 6574 } 6575 6576 ev = tb[WMI_TAG_PEER_STA_KICKOUT_EVENT]; 6577 if (!ev) { 6578 ath12k_warn(ab, "failed to fetch peer sta kickout ev"); 6579 kfree(tb); 6580 return -EPROTO; 6581 } 6582 6583 arg->mac_addr = ev->peer_macaddr.addr; 6584 arg->reason = le32_to_cpu(ev->reason); 6585 arg->rssi = le32_to_cpu(ev->rssi); 6586 6587 kfree(tb); 6588 return 0; 6589 } 6590 6591 static int ath12k_pull_roam_ev(struct ath12k_base *ab, struct sk_buff *skb, 6592 struct wmi_roam_event *roam_ev) 6593 { 6594 const void **tb; 6595 const struct wmi_roam_event *ev; 6596 int ret; 6597 6598 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6599 if (IS_ERR(tb)) { 6600 ret = PTR_ERR(tb); 6601 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6602 return ret; 6603 } 6604 6605 ev = tb[WMI_TAG_ROAM_EVENT]; 6606 if (!ev) { 6607 ath12k_warn(ab, "failed to fetch roam ev"); 6608 kfree(tb); 6609 return -EPROTO; 6610 } 6611 6612 roam_ev->vdev_id = ev->vdev_id; 6613 roam_ev->reason = ev->reason; 6614 roam_ev->rssi = ev->rssi; 6615 6616 kfree(tb); 6617 return 0; 6618 } 6619 6620 static int freq_to_idx(struct ath12k *ar, int freq) 6621 { 6622 struct ieee80211_supported_band *sband; 6623 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 6624 int band, ch, idx = 0; 6625 6626 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 6627 if (!ar->mac.sbands[band].channels) 6628 continue; 6629 6630 sband = hw->wiphy->bands[band]; 6631 if (!sband) 6632 continue; 6633 6634 for (ch = 0; ch < sband->n_channels; ch++, idx++) 6635 if (sband->channels[ch].center_freq == freq) 6636 goto exit; 6637 } 6638 6639 exit: 6640 return idx; 6641 } 6642 6643 static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb, 6644 struct wmi_chan_info_event *ch_info_ev) 6645 { 6646 const void **tb; 6647 const struct wmi_chan_info_event *ev; 6648 int ret; 6649 6650 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6651 if (IS_ERR(tb)) { 6652 ret = PTR_ERR(tb); 6653 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6654 return ret; 6655 } 6656 6657 ev = tb[WMI_TAG_CHAN_INFO_EVENT]; 6658 if (!ev) { 6659 ath12k_warn(ab, "failed to fetch chan info ev"); 6660 kfree(tb); 6661 return -EPROTO; 6662 } 6663 6664 ch_info_ev->err_code = ev->err_code; 6665 ch_info_ev->freq = ev->freq; 6666 ch_info_ev->cmd_flags = ev->cmd_flags; 6667 ch_info_ev->noise_floor = ev->noise_floor; 6668 ch_info_ev->rx_clear_count = ev->rx_clear_count; 6669 ch_info_ev->cycle_count = ev->cycle_count; 6670 ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range; 6671 ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp; 6672 ch_info_ev->rx_frame_count = ev->rx_frame_count; 6673 ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt; 6674 ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz; 6675 ch_info_ev->vdev_id = ev->vdev_id; 6676 6677 kfree(tb); 6678 return 0; 6679 } 6680 6681 static int 6682 ath12k_pull_pdev_bss_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb, 6683 struct wmi_pdev_bss_chan_info_event *bss_ch_info_ev) 6684 { 6685 const void **tb; 6686 const struct wmi_pdev_bss_chan_info_event *ev; 6687 int ret; 6688 6689 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6690 if (IS_ERR(tb)) { 6691 ret = PTR_ERR(tb); 6692 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6693 return ret; 6694 } 6695 6696 ev = tb[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT]; 6697 if (!ev) { 6698 ath12k_warn(ab, "failed to fetch pdev bss chan info ev"); 6699 kfree(tb); 6700 return -EPROTO; 6701 } 6702 6703 bss_ch_info_ev->pdev_id = ev->pdev_id; 6704 bss_ch_info_ev->freq = ev->freq; 6705 bss_ch_info_ev->noise_floor = ev->noise_floor; 6706 bss_ch_info_ev->rx_clear_count_low = ev->rx_clear_count_low; 6707 bss_ch_info_ev->rx_clear_count_high = ev->rx_clear_count_high; 6708 bss_ch_info_ev->cycle_count_low = ev->cycle_count_low; 6709 bss_ch_info_ev->cycle_count_high = ev->cycle_count_high; 6710 bss_ch_info_ev->tx_cycle_count_low = ev->tx_cycle_count_low; 6711 bss_ch_info_ev->tx_cycle_count_high = ev->tx_cycle_count_high; 6712 bss_ch_info_ev->rx_cycle_count_low = ev->rx_cycle_count_low; 6713 bss_ch_info_ev->rx_cycle_count_high = ev->rx_cycle_count_high; 6714 bss_ch_info_ev->rx_bss_cycle_count_low = ev->rx_bss_cycle_count_low; 6715 bss_ch_info_ev->rx_bss_cycle_count_high = ev->rx_bss_cycle_count_high; 6716 6717 kfree(tb); 6718 return 0; 6719 } 6720 6721 static int 6722 ath12k_pull_vdev_install_key_compl_ev(struct ath12k_base *ab, struct sk_buff *skb, 6723 struct wmi_vdev_install_key_complete_arg *arg) 6724 { 6725 const void **tb; 6726 const struct wmi_vdev_install_key_compl_event *ev; 6727 int ret; 6728 6729 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6730 if (IS_ERR(tb)) { 6731 ret = PTR_ERR(tb); 6732 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6733 return ret; 6734 } 6735 6736 ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT]; 6737 if (!ev) { 6738 ath12k_warn(ab, "failed to fetch vdev install key compl ev"); 6739 kfree(tb); 6740 return -EPROTO; 6741 } 6742 6743 arg->vdev_id = le32_to_cpu(ev->vdev_id); 6744 arg->macaddr = ev->peer_macaddr.addr; 6745 arg->key_idx = le32_to_cpu(ev->key_idx); 6746 arg->key_flags = le32_to_cpu(ev->key_flags); 6747 arg->status = le32_to_cpu(ev->status); 6748 6749 kfree(tb); 6750 return 0; 6751 } 6752 6753 static int ath12k_pull_peer_assoc_conf_ev(struct ath12k_base *ab, struct sk_buff *skb, 6754 struct wmi_peer_assoc_conf_arg *peer_assoc_conf) 6755 { 6756 const void **tb; 6757 const struct wmi_peer_assoc_conf_event *ev; 6758 int ret; 6759 6760 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6761 if (IS_ERR(tb)) { 6762 ret = PTR_ERR(tb); 6763 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6764 return ret; 6765 } 6766 6767 ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT]; 6768 if (!ev) { 6769 ath12k_warn(ab, "failed to fetch peer assoc conf ev"); 6770 kfree(tb); 6771 return -EPROTO; 6772 } 6773 6774 peer_assoc_conf->vdev_id = le32_to_cpu(ev->vdev_id); 6775 peer_assoc_conf->macaddr = ev->peer_macaddr.addr; 6776 6777 kfree(tb); 6778 return 0; 6779 } 6780 6781 static int 6782 ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, struct sk_buff *skb, 6783 const struct wmi_pdev_temperature_event *ev) 6784 { 6785 const void **tb; 6786 int ret; 6787 6788 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6789 if (IS_ERR(tb)) { 6790 ret = PTR_ERR(tb); 6791 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6792 return ret; 6793 } 6794 6795 ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT]; 6796 if (!ev) { 6797 ath12k_warn(ab, "failed to fetch pdev temp ev"); 6798 kfree(tb); 6799 return -EPROTO; 6800 } 6801 6802 kfree(tb); 6803 return 0; 6804 } 6805 6806 static void ath12k_wmi_op_ep_tx_credits(struct ath12k_base *ab) 6807 { 6808 /* try to send pending beacons first. they take priority */ 6809 wake_up(&ab->wmi_ab.tx_credits_wq); 6810 } 6811 6812 static int ath12k_reg_11d_new_cc_event(struct ath12k_base *ab, struct sk_buff *skb) 6813 { 6814 const struct wmi_11d_new_cc_event *ev; 6815 struct ath12k *ar; 6816 struct ath12k_pdev *pdev; 6817 const void **tb; 6818 int ret, i; 6819 6820 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6821 if (IS_ERR(tb)) { 6822 ret = PTR_ERR(tb); 6823 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6824 return ret; 6825 } 6826 6827 ev = tb[WMI_TAG_11D_NEW_COUNTRY_EVENT]; 6828 if (!ev) { 6829 kfree(tb); 6830 ath12k_warn(ab, "failed to fetch 11d new cc ev"); 6831 return -EPROTO; 6832 } 6833 6834 spin_lock_bh(&ab->base_lock); 6835 memcpy(&ab->new_alpha2, &ev->new_alpha2, REG_ALPHA2_LEN); 6836 spin_unlock_bh(&ab->base_lock); 6837 6838 ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi 11d new cc %c%c\n", 6839 ab->new_alpha2[0], 6840 ab->new_alpha2[1]); 6841 6842 kfree(tb); 6843 6844 for (i = 0; i < ab->num_radios; i++) { 6845 pdev = &ab->pdevs[i]; 6846 ar = pdev->ar; 6847 ar->state_11d = ATH12K_11D_IDLE; 6848 ar->ah->regd_updated = false; 6849 complete(&ar->completed_11d_scan); 6850 } 6851 6852 queue_work(ab->workqueue, &ab->update_11d_work); 6853 6854 return 0; 6855 } 6856 6857 static void ath12k_wmi_htc_tx_complete(struct ath12k_base *ab, 6858 struct sk_buff *skb) 6859 { 6860 dev_kfree_skb(skb); 6861 } 6862 6863 static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *skb) 6864 { 6865 struct ath12k_reg_info *reg_info; 6866 struct ath12k *ar = NULL; 6867 u8 pdev_idx = 255; 6868 int ret; 6869 6870 reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC); 6871 if (!reg_info) { 6872 ret = -ENOMEM; 6873 goto fallback; 6874 } 6875 6876 ret = ath12k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info); 6877 if (ret) { 6878 ath12k_warn(ab, "failed to extract regulatory info from received event\n"); 6879 goto mem_free; 6880 } 6881 6882 ret = ath12k_reg_validate_reg_info(ab, reg_info); 6883 if (ret == ATH12K_REG_STATUS_FALLBACK) { 6884 ath12k_warn(ab, "failed to validate reg info %d\n", ret); 6885 /* firmware has successfully switches to new regd but host can not 6886 * continue, so free reginfo and fallback to old regd 6887 */ 6888 goto mem_free; 6889 } else if (ret == ATH12K_REG_STATUS_DROP) { 6890 /* reg info is valid but we will not store it and 6891 * not going to create new regd for it 6892 */ 6893 ret = ATH12K_REG_STATUS_VALID; 6894 goto mem_free; 6895 } 6896 6897 /* free old reg_info if it exist */ 6898 pdev_idx = reg_info->phy_id; 6899 if (ab->reg_info[pdev_idx]) { 6900 ath12k_reg_reset_reg_info(ab->reg_info[pdev_idx]); 6901 kfree(ab->reg_info[pdev_idx]); 6902 } 6903 /* reg_info is valid, we store it for later use 6904 * even below regd build failed 6905 */ 6906 ab->reg_info[pdev_idx] = reg_info; 6907 6908 ret = ath12k_reg_handle_chan_list(ab, reg_info, WMI_VDEV_TYPE_UNSPEC, 6909 IEEE80211_REG_UNSET_AP); 6910 if (ret) { 6911 ath12k_warn(ab, "failed to handle chan list %d\n", ret); 6912 goto fallback; 6913 } 6914 6915 goto out; 6916 6917 mem_free: 6918 ath12k_reg_reset_reg_info(reg_info); 6919 kfree(reg_info); 6920 6921 if (ret == ATH12K_REG_STATUS_VALID) 6922 goto out; 6923 6924 fallback: 6925 /* Fallback to older reg (by sending previous country setting 6926 * again if fw has succeeded and we failed to process here. 6927 * The Regdomain should be uniform across driver and fw. Since the 6928 * FW has processed the command and sent a success status, we expect 6929 * this function to succeed as well. If it doesn't, CTRY needs to be 6930 * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent. 6931 */ 6932 /* TODO: This is rare, but still should also be handled */ 6933 WARN_ON(1); 6934 6935 out: 6936 /* In some error cases, even a valid pdev_idx might not be available */ 6937 if (pdev_idx != 255) 6938 ar = ab->pdevs[pdev_idx].ar; 6939 6940 /* During the boot-time update, 'ar' might not be allocated, 6941 * so the completion cannot be marked at that point. 6942 * This boot-time update is handled in ath12k_mac_hw_register() 6943 * before registering the hardware. 6944 */ 6945 if (ar) 6946 complete_all(&ar->regd_update_completed); 6947 6948 return ret; 6949 } 6950 6951 static int ath12k_wmi_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len, 6952 const void *ptr, void *data) 6953 { 6954 struct ath12k_wmi_rdy_parse *rdy_parse = data; 6955 struct wmi_ready_event fixed_param; 6956 struct ath12k_wmi_mac_addr_params *addr_list; 6957 struct ath12k_pdev *pdev; 6958 u32 num_mac_addr; 6959 int i; 6960 6961 switch (tag) { 6962 case WMI_TAG_READY_EVENT: 6963 memset(&fixed_param, 0, sizeof(fixed_param)); 6964 memcpy(&fixed_param, (struct wmi_ready_event *)ptr, 6965 min_t(u16, sizeof(fixed_param), len)); 6966 ab->wlan_init_status = le32_to_cpu(fixed_param.ready_event_min.status); 6967 rdy_parse->num_extra_mac_addr = 6968 le32_to_cpu(fixed_param.ready_event_min.num_extra_mac_addr); 6969 6970 ether_addr_copy(ab->mac_addr, 6971 fixed_param.ready_event_min.mac_addr.addr); 6972 ab->pktlog_defs_checksum = le32_to_cpu(fixed_param.pktlog_defs_checksum); 6973 ab->wmi_ready = true; 6974 break; 6975 case WMI_TAG_ARRAY_FIXED_STRUCT: 6976 addr_list = (struct ath12k_wmi_mac_addr_params *)ptr; 6977 num_mac_addr = rdy_parse->num_extra_mac_addr; 6978 6979 if (!(ab->num_radios > 1 && num_mac_addr >= ab->num_radios)) 6980 break; 6981 6982 for (i = 0; i < ab->num_radios; i++) { 6983 pdev = &ab->pdevs[i]; 6984 ether_addr_copy(pdev->mac_addr, addr_list[i].addr); 6985 } 6986 ab->pdevs_macaddr_valid = true; 6987 break; 6988 default: 6989 break; 6990 } 6991 6992 return 0; 6993 } 6994 6995 static int ath12k_ready_event(struct ath12k_base *ab, struct sk_buff *skb) 6996 { 6997 struct ath12k_wmi_rdy_parse rdy_parse = { }; 6998 int ret; 6999 7000 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 7001 ath12k_wmi_rdy_parse, &rdy_parse); 7002 if (ret) { 7003 ath12k_warn(ab, "failed to parse tlv %d\n", ret); 7004 return ret; 7005 } 7006 7007 complete(&ab->wmi_ab.unified_ready); 7008 return 0; 7009 } 7010 7011 static void ath12k_peer_delete_resp_event(struct ath12k_base *ab, struct sk_buff *skb) 7012 { 7013 struct wmi_peer_delete_resp_event peer_del_resp; 7014 struct ath12k *ar; 7015 7016 if (ath12k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) { 7017 ath12k_warn(ab, "failed to extract peer delete resp"); 7018 return; 7019 } 7020 7021 rcu_read_lock(); 7022 ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(peer_del_resp.vdev_id)); 7023 if (!ar) { 7024 ath12k_warn(ab, "invalid vdev id in peer delete resp ev %d", 7025 peer_del_resp.vdev_id); 7026 rcu_read_unlock(); 7027 return; 7028 } 7029 7030 complete(&ar->peer_delete_done); 7031 rcu_read_unlock(); 7032 ath12k_dbg(ab, ATH12K_DBG_WMI, "peer delete resp for vdev id %d addr %pM\n", 7033 peer_del_resp.vdev_id, peer_del_resp.peer_macaddr.addr); 7034 } 7035 7036 static void ath12k_vdev_delete_resp_event(struct ath12k_base *ab, 7037 struct sk_buff *skb) 7038 { 7039 struct ath12k *ar; 7040 u32 vdev_id = 0; 7041 7042 if (ath12k_pull_vdev_del_resp_ev(ab, skb, &vdev_id) != 0) { 7043 ath12k_warn(ab, "failed to extract vdev delete resp"); 7044 return; 7045 } 7046 7047 rcu_read_lock(); 7048 ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id); 7049 if (!ar) { 7050 ath12k_warn(ab, "invalid vdev id in vdev delete resp ev %d", 7051 vdev_id); 7052 rcu_read_unlock(); 7053 return; 7054 } 7055 7056 complete(&ar->vdev_delete_done); 7057 7058 rcu_read_unlock(); 7059 7060 ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev delete resp for vdev id %d\n", 7061 vdev_id); 7062 } 7063 7064 static const char *ath12k_wmi_vdev_resp_print(u32 vdev_resp_status) 7065 { 7066 switch (vdev_resp_status) { 7067 case WMI_VDEV_START_RESPONSE_INVALID_VDEVID: 7068 return "invalid vdev id"; 7069 case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED: 7070 return "not supported"; 7071 case WMI_VDEV_START_RESPONSE_DFS_VIOLATION: 7072 return "dfs violation"; 7073 case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN: 7074 return "invalid regdomain"; 7075 default: 7076 return "unknown"; 7077 } 7078 } 7079 7080 static void ath12k_vdev_start_resp_event(struct ath12k_base *ab, struct sk_buff *skb) 7081 { 7082 struct wmi_vdev_start_resp_event vdev_start_resp; 7083 struct ath12k *ar; 7084 u32 status; 7085 7086 if (ath12k_pull_vdev_start_resp_tlv(ab, skb, &vdev_start_resp) != 0) { 7087 ath12k_warn(ab, "failed to extract vdev start resp"); 7088 return; 7089 } 7090 7091 rcu_read_lock(); 7092 ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(vdev_start_resp.vdev_id)); 7093 if (!ar) { 7094 ath12k_warn(ab, "invalid vdev id in vdev start resp ev %d", 7095 vdev_start_resp.vdev_id); 7096 rcu_read_unlock(); 7097 return; 7098 } 7099 7100 ar->last_wmi_vdev_start_status = 0; 7101 7102 status = le32_to_cpu(vdev_start_resp.status); 7103 if (WARN_ON_ONCE(status)) { 7104 ath12k_warn(ab, "vdev start resp error status %d (%s)\n", 7105 status, ath12k_wmi_vdev_resp_print(status)); 7106 ar->last_wmi_vdev_start_status = status; 7107 } 7108 7109 ar->max_allowed_tx_power = (s8)le32_to_cpu(vdev_start_resp.max_allowed_tx_power); 7110 7111 complete(&ar->vdev_setup_done); 7112 7113 rcu_read_unlock(); 7114 7115 ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev start resp for vdev id %d", 7116 vdev_start_resp.vdev_id); 7117 } 7118 7119 static void ath12k_bcn_tx_status_event(struct ath12k_base *ab, struct sk_buff *skb) 7120 { 7121 struct ath12k_link_vif *arvif; 7122 struct ath12k *ar; 7123 u32 vdev_id, tx_status; 7124 7125 if (ath12k_pull_bcn_tx_status_ev(ab, skb, &vdev_id, &tx_status) != 0) { 7126 ath12k_warn(ab, "failed to extract bcn tx status"); 7127 return; 7128 } 7129 7130 guard(rcu)(); 7131 7132 arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_id); 7133 if (!arvif) { 7134 ath12k_warn(ab, "invalid vdev %u in bcn tx status\n", 7135 vdev_id); 7136 return; 7137 } 7138 7139 ar = arvif->ar; 7140 wiphy_work_queue(ath12k_ar_to_hw(ar)->wiphy, &arvif->bcn_tx_work); 7141 } 7142 7143 static void ath12k_vdev_stopped_event(struct ath12k_base *ab, struct sk_buff *skb) 7144 { 7145 struct ath12k *ar; 7146 u32 vdev_id = 0; 7147 7148 if (ath12k_pull_vdev_stopped_param_tlv(ab, skb, &vdev_id) != 0) { 7149 ath12k_warn(ab, "failed to extract vdev stopped event"); 7150 return; 7151 } 7152 7153 rcu_read_lock(); 7154 ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id); 7155 if (!ar) { 7156 ath12k_warn(ab, "invalid vdev id in vdev stopped ev %d", 7157 vdev_id); 7158 rcu_read_unlock(); 7159 return; 7160 } 7161 7162 complete(&ar->vdev_setup_done); 7163 7164 rcu_read_unlock(); 7165 7166 ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev stopped for vdev id %d", vdev_id); 7167 } 7168 7169 static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb) 7170 { 7171 struct ath12k_wmi_mgmt_rx_arg rx_ev = {}; 7172 struct ath12k *ar; 7173 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 7174 struct ieee80211_hdr *hdr; 7175 u16 fc; 7176 struct ieee80211_supported_band *sband; 7177 s32 noise_floor; 7178 7179 if (ath12k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) { 7180 ath12k_warn(ab, "failed to extract mgmt rx event"); 7181 dev_kfree_skb(skb); 7182 return; 7183 } 7184 7185 memset(status, 0, sizeof(*status)); 7186 7187 ath12k_dbg(ab, ATH12K_DBG_MGMT, "mgmt rx event status %08x\n", 7188 rx_ev.status); 7189 7190 rcu_read_lock(); 7191 ar = ath12k_mac_get_ar_by_pdev_id(ab, rx_ev.pdev_id); 7192 7193 if (!ar) { 7194 ath12k_warn(ab, "invalid pdev_id %d in mgmt_rx_event\n", 7195 rx_ev.pdev_id); 7196 dev_kfree_skb(skb); 7197 goto exit; 7198 } 7199 7200 if ((test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) || 7201 (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT | 7202 WMI_RX_STATUS_ERR_KEY_CACHE_MISS | 7203 WMI_RX_STATUS_ERR_CRC))) { 7204 dev_kfree_skb(skb); 7205 goto exit; 7206 } 7207 7208 if (rx_ev.status & WMI_RX_STATUS_ERR_MIC) 7209 status->flag |= RX_FLAG_MMIC_ERROR; 7210 7211 if (rx_ev.chan_freq >= ATH12K_MIN_6GHZ_FREQ && 7212 rx_ev.chan_freq <= ATH12K_MAX_6GHZ_FREQ) { 7213 status->band = NL80211_BAND_6GHZ; 7214 status->freq = rx_ev.chan_freq; 7215 } else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) { 7216 status->band = NL80211_BAND_2GHZ; 7217 } else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5GHZ_CHAN) { 7218 status->band = NL80211_BAND_5GHZ; 7219 } else { 7220 /* Shouldn't happen unless list of advertised channels to 7221 * mac80211 has been changed. 7222 */ 7223 WARN_ON_ONCE(1); 7224 dev_kfree_skb(skb); 7225 goto exit; 7226 } 7227 7228 if (rx_ev.phy_mode == MODE_11B && 7229 (status->band == NL80211_BAND_5GHZ || status->band == NL80211_BAND_6GHZ)) 7230 ath12k_dbg(ab, ATH12K_DBG_WMI, 7231 "wmi mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band); 7232 7233 sband = &ar->mac.sbands[status->band]; 7234 7235 if (status->band != NL80211_BAND_6GHZ) 7236 status->freq = ieee80211_channel_to_frequency(rx_ev.channel, 7237 status->band); 7238 7239 spin_lock_bh(&ar->data_lock); 7240 noise_floor = ath12k_pdev_get_noise_floor(ar); 7241 spin_unlock_bh(&ar->data_lock); 7242 7243 status->signal = rx_ev.snr + noise_floor; 7244 status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev.rate / 100); 7245 7246 hdr = (struct ieee80211_hdr *)skb->data; 7247 fc = le16_to_cpu(hdr->frame_control); 7248 7249 /* Firmware is guaranteed to report all essential management frames via 7250 * WMI while it can deliver some extra via HTT. Since there can be 7251 * duplicates split the reporting wrt monitor/sniffing. 7252 */ 7253 status->flag |= RX_FLAG_SKIP_MONITOR; 7254 7255 /* In case of PMF, FW delivers decrypted frames with Protected Bit set 7256 * including group privacy action frames. 7257 */ 7258 if (ieee80211_has_protected(hdr->frame_control)) { 7259 status->flag |= RX_FLAG_DECRYPTED; 7260 7261 if (!ieee80211_is_robust_mgmt_frame(skb)) { 7262 status->flag |= RX_FLAG_IV_STRIPPED | 7263 RX_FLAG_MMIC_STRIPPED; 7264 hdr->frame_control = __cpu_to_le16(fc & 7265 ~IEEE80211_FCTL_PROTECTED); 7266 } 7267 } 7268 7269 if (ieee80211_is_beacon(hdr->frame_control)) 7270 ath12k_mac_handle_beacon(ar, skb); 7271 7272 ath12k_dbg(ab, ATH12K_DBG_MGMT, 7273 "event mgmt rx skb %p len %d ftype %02x stype %02x\n", 7274 skb, skb->len, 7275 fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE); 7276 7277 ath12k_dbg(ab, ATH12K_DBG_MGMT, 7278 "event mgmt rx freq %d band %d snr %d, rate_idx %d\n", 7279 status->freq, status->band, status->signal, 7280 status->rate_idx); 7281 7282 ieee80211_rx_ni(ath12k_ar_to_hw(ar), skb); 7283 7284 exit: 7285 rcu_read_unlock(); 7286 } 7287 7288 static void ath12k_mgmt_tx_compl_event(struct ath12k_base *ab, struct sk_buff *skb) 7289 { 7290 struct wmi_mgmt_tx_compl_event tx_compl_param = {}; 7291 struct ath12k *ar; 7292 7293 if (ath12k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) { 7294 ath12k_warn(ab, "failed to extract mgmt tx compl event"); 7295 return; 7296 } 7297 7298 rcu_read_lock(); 7299 ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(tx_compl_param.pdev_id)); 7300 if (!ar) { 7301 ath12k_warn(ab, "invalid pdev id %d in mgmt_tx_compl_event\n", 7302 tx_compl_param.pdev_id); 7303 goto exit; 7304 } 7305 7306 wmi_process_mgmt_tx_comp(ar, le32_to_cpu(tx_compl_param.desc_id), 7307 le32_to_cpu(tx_compl_param.status), 7308 le32_to_cpu(tx_compl_param.ack_rssi)); 7309 7310 ath12k_dbg(ab, ATH12K_DBG_MGMT, 7311 "mgmt tx compl ev pdev_id %d, desc_id %d, status %d", 7312 tx_compl_param.pdev_id, tx_compl_param.desc_id, 7313 tx_compl_param.status); 7314 7315 exit: 7316 rcu_read_unlock(); 7317 } 7318 7319 static struct ath12k *ath12k_get_ar_on_scan_state(struct ath12k_base *ab, 7320 u32 vdev_id, 7321 enum ath12k_scan_state state) 7322 { 7323 int i; 7324 struct ath12k_pdev *pdev; 7325 struct ath12k *ar; 7326 7327 for (i = 0; i < ab->num_radios; i++) { 7328 pdev = rcu_dereference(ab->pdevs_active[i]); 7329 if (pdev && pdev->ar) { 7330 ar = pdev->ar; 7331 7332 spin_lock_bh(&ar->data_lock); 7333 if (ar->scan.state == state && 7334 ar->scan.arvif && 7335 ar->scan.arvif->vdev_id == vdev_id) { 7336 spin_unlock_bh(&ar->data_lock); 7337 return ar; 7338 } 7339 spin_unlock_bh(&ar->data_lock); 7340 } 7341 } 7342 return NULL; 7343 } 7344 7345 static void ath12k_scan_event(struct ath12k_base *ab, struct sk_buff *skb) 7346 { 7347 struct ath12k *ar; 7348 struct wmi_scan_event scan_ev = {}; 7349 7350 if (ath12k_pull_scan_ev(ab, skb, &scan_ev) != 0) { 7351 ath12k_warn(ab, "failed to extract scan event"); 7352 return; 7353 } 7354 7355 rcu_read_lock(); 7356 7357 /* In case the scan was cancelled, ex. during interface teardown, 7358 * the interface will not be found in active interfaces. 7359 * Rather, in such scenarios, iterate over the active pdev's to 7360 * search 'ar' if the corresponding 'ar' scan is ABORTING and the 7361 * aborting scan's vdev id matches this event info. 7362 */ 7363 if (le32_to_cpu(scan_ev.event_type) == WMI_SCAN_EVENT_COMPLETED && 7364 le32_to_cpu(scan_ev.reason) == WMI_SCAN_REASON_CANCELLED) { 7365 ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id), 7366 ATH12K_SCAN_ABORTING); 7367 if (!ar) 7368 ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id), 7369 ATH12K_SCAN_RUNNING); 7370 } else { 7371 ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(scan_ev.vdev_id)); 7372 } 7373 7374 if (!ar) { 7375 ath12k_warn(ab, "Received scan event for unknown vdev"); 7376 rcu_read_unlock(); 7377 return; 7378 } 7379 7380 spin_lock_bh(&ar->data_lock); 7381 7382 ath12k_dbg(ab, ATH12K_DBG_WMI, 7383 "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n", 7384 ath12k_wmi_event_scan_type_str(le32_to_cpu(scan_ev.event_type), 7385 le32_to_cpu(scan_ev.reason)), 7386 le32_to_cpu(scan_ev.event_type), 7387 le32_to_cpu(scan_ev.reason), 7388 le32_to_cpu(scan_ev.channel_freq), 7389 le32_to_cpu(scan_ev.scan_req_id), 7390 le32_to_cpu(scan_ev.scan_id), 7391 le32_to_cpu(scan_ev.vdev_id), 7392 ath12k_scan_state_str(ar->scan.state), ar->scan.state); 7393 7394 switch (le32_to_cpu(scan_ev.event_type)) { 7395 case WMI_SCAN_EVENT_STARTED: 7396 ath12k_wmi_event_scan_started(ar); 7397 break; 7398 case WMI_SCAN_EVENT_COMPLETED: 7399 ath12k_wmi_event_scan_completed(ar); 7400 break; 7401 case WMI_SCAN_EVENT_BSS_CHANNEL: 7402 ath12k_wmi_event_scan_bss_chan(ar); 7403 break; 7404 case WMI_SCAN_EVENT_FOREIGN_CHAN: 7405 ath12k_wmi_event_scan_foreign_chan(ar, le32_to_cpu(scan_ev.channel_freq)); 7406 break; 7407 case WMI_SCAN_EVENT_START_FAILED: 7408 ath12k_warn(ab, "received scan start failure event\n"); 7409 ath12k_wmi_event_scan_start_failed(ar); 7410 break; 7411 case WMI_SCAN_EVENT_DEQUEUED: 7412 __ath12k_mac_scan_finish(ar); 7413 break; 7414 case WMI_SCAN_EVENT_PREEMPTED: 7415 case WMI_SCAN_EVENT_RESTARTED: 7416 case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT: 7417 default: 7418 break; 7419 } 7420 7421 spin_unlock_bh(&ar->data_lock); 7422 7423 rcu_read_unlock(); 7424 } 7425 7426 static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff *skb) 7427 { 7428 struct wmi_peer_sta_kickout_arg arg = {}; 7429 struct ath12k_link_vif *arvif; 7430 struct ieee80211_sta *sta; 7431 struct ath12k_sta *ahsta; 7432 struct ath12k_link_sta *arsta; 7433 struct ath12k *ar; 7434 7435 if (ath12k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) { 7436 ath12k_warn(ab, "failed to extract peer sta kickout event"); 7437 return; 7438 } 7439 7440 rcu_read_lock(); 7441 7442 spin_lock_bh(&ab->base_lock); 7443 7444 arsta = ath12k_link_sta_find_by_addr(ab, arg.mac_addr); 7445 7446 if (!arsta) { 7447 ath12k_warn(ab, "arsta not found %pM\n", 7448 arg.mac_addr); 7449 goto exit; 7450 } 7451 7452 arvif = arsta->arvif; 7453 if (!arvif) { 7454 ath12k_warn(ab, "invalid arvif in peer sta kickout ev for STA %pM", 7455 arg.mac_addr); 7456 goto exit; 7457 } 7458 7459 ar = arvif->ar; 7460 ahsta = arsta->ahsta; 7461 sta = ath12k_ahsta_to_sta(ahsta); 7462 7463 ath12k_dbg(ab, ATH12K_DBG_WMI, 7464 "peer sta kickout event %pM reason: %d rssi: %d\n", 7465 arg.mac_addr, arg.reason, arg.rssi); 7466 7467 switch (arg.reason) { 7468 case WMI_PEER_STA_KICKOUT_REASON_INACTIVITY: 7469 if (arvif->ahvif->vif->type == NL80211_IFTYPE_STATION) { 7470 ath12k_mac_handle_beacon_miss(ar, arvif); 7471 break; 7472 } 7473 fallthrough; 7474 default: 7475 ieee80211_report_low_ack(sta, 10); 7476 } 7477 7478 exit: 7479 spin_unlock_bh(&ab->base_lock); 7480 rcu_read_unlock(); 7481 } 7482 7483 static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb) 7484 { 7485 struct ath12k_link_vif *arvif; 7486 struct wmi_roam_event roam_ev = {}; 7487 struct ath12k *ar; 7488 u32 vdev_id; 7489 u8 roam_reason; 7490 7491 if (ath12k_pull_roam_ev(ab, skb, &roam_ev) != 0) { 7492 ath12k_warn(ab, "failed to extract roam event"); 7493 return; 7494 } 7495 7496 vdev_id = le32_to_cpu(roam_ev.vdev_id); 7497 roam_reason = u32_get_bits(le32_to_cpu(roam_ev.reason), 7498 WMI_ROAM_REASON_MASK); 7499 7500 ath12k_dbg(ab, ATH12K_DBG_WMI, 7501 "wmi roam event vdev %u reason %d rssi %d\n", 7502 vdev_id, roam_reason, roam_ev.rssi); 7503 7504 guard(rcu)(); 7505 arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_id); 7506 if (!arvif) { 7507 ath12k_warn(ab, "invalid vdev id in roam ev %d", vdev_id); 7508 return; 7509 } 7510 7511 ar = arvif->ar; 7512 7513 if (roam_reason >= WMI_ROAM_REASON_MAX) 7514 ath12k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n", 7515 roam_reason, vdev_id); 7516 7517 switch (roam_reason) { 7518 case WMI_ROAM_REASON_BEACON_MISS: 7519 ath12k_mac_handle_beacon_miss(ar, arvif); 7520 break; 7521 case WMI_ROAM_REASON_BETTER_AP: 7522 case WMI_ROAM_REASON_LOW_RSSI: 7523 case WMI_ROAM_REASON_SUITABLE_AP_FOUND: 7524 case WMI_ROAM_REASON_HO_FAILED: 7525 ath12k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n", 7526 roam_reason, vdev_id); 7527 break; 7528 } 7529 } 7530 7531 static void ath12k_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb) 7532 { 7533 struct wmi_chan_info_event ch_info_ev = {}; 7534 struct ath12k *ar; 7535 struct survey_info *survey; 7536 int idx; 7537 /* HW channel counters frequency value in hertz */ 7538 u32 cc_freq_hz = ab->cc_freq_hz; 7539 7540 if (ath12k_pull_chan_info_ev(ab, skb, &ch_info_ev) != 0) { 7541 ath12k_warn(ab, "failed to extract chan info event"); 7542 return; 7543 } 7544 7545 ath12k_dbg(ab, ATH12K_DBG_WMI, 7546 "chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n", 7547 ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq, 7548 ch_info_ev.cmd_flags, ch_info_ev.noise_floor, 7549 ch_info_ev.rx_clear_count, ch_info_ev.cycle_count, 7550 ch_info_ev.mac_clk_mhz); 7551 7552 if (le32_to_cpu(ch_info_ev.cmd_flags) == WMI_CHAN_INFO_END_RESP) { 7553 ath12k_dbg(ab, ATH12K_DBG_WMI, "chan info report completed\n"); 7554 return; 7555 } 7556 7557 rcu_read_lock(); 7558 ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(ch_info_ev.vdev_id)); 7559 if (!ar) { 7560 ath12k_warn(ab, "invalid vdev id in chan info ev %d", 7561 ch_info_ev.vdev_id); 7562 rcu_read_unlock(); 7563 return; 7564 } 7565 spin_lock_bh(&ar->data_lock); 7566 7567 switch (ar->scan.state) { 7568 case ATH12K_SCAN_IDLE: 7569 case ATH12K_SCAN_STARTING: 7570 ath12k_warn(ab, "received chan info event without a scan request, ignoring\n"); 7571 goto exit; 7572 case ATH12K_SCAN_RUNNING: 7573 case ATH12K_SCAN_ABORTING: 7574 break; 7575 } 7576 7577 idx = freq_to_idx(ar, le32_to_cpu(ch_info_ev.freq)); 7578 if (idx >= ARRAY_SIZE(ar->survey)) { 7579 ath12k_warn(ab, "chan info: invalid frequency %d (idx %d out of bounds)\n", 7580 ch_info_ev.freq, idx); 7581 goto exit; 7582 } 7583 7584 /* If FW provides MAC clock frequency in Mhz, overriding the initialized 7585 * HW channel counters frequency value 7586 */ 7587 if (ch_info_ev.mac_clk_mhz) 7588 cc_freq_hz = (le32_to_cpu(ch_info_ev.mac_clk_mhz) * 1000); 7589 7590 if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) { 7591 survey = &ar->survey[idx]; 7592 memset(survey, 0, sizeof(*survey)); 7593 survey->noise = le32_to_cpu(ch_info_ev.noise_floor); 7594 survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME | 7595 SURVEY_INFO_TIME_BUSY; 7596 survey->time = div_u64(le32_to_cpu(ch_info_ev.cycle_count), cc_freq_hz); 7597 survey->time_busy = div_u64(le32_to_cpu(ch_info_ev.rx_clear_count), 7598 cc_freq_hz); 7599 } 7600 exit: 7601 spin_unlock_bh(&ar->data_lock); 7602 rcu_read_unlock(); 7603 } 7604 7605 static void 7606 ath12k_pdev_bss_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb) 7607 { 7608 struct wmi_pdev_bss_chan_info_event bss_ch_info_ev = {}; 7609 struct survey_info *survey; 7610 struct ath12k *ar; 7611 u32 cc_freq_hz = ab->cc_freq_hz; 7612 u64 busy, total, tx, rx, rx_bss; 7613 int idx; 7614 7615 if (ath12k_pull_pdev_bss_chan_info_ev(ab, skb, &bss_ch_info_ev) != 0) { 7616 ath12k_warn(ab, "failed to extract pdev bss chan info event"); 7617 return; 7618 } 7619 7620 busy = (u64)(le32_to_cpu(bss_ch_info_ev.rx_clear_count_high)) << 32 | 7621 le32_to_cpu(bss_ch_info_ev.rx_clear_count_low); 7622 7623 total = (u64)(le32_to_cpu(bss_ch_info_ev.cycle_count_high)) << 32 | 7624 le32_to_cpu(bss_ch_info_ev.cycle_count_low); 7625 7626 tx = (u64)(le32_to_cpu(bss_ch_info_ev.tx_cycle_count_high)) << 32 | 7627 le32_to_cpu(bss_ch_info_ev.tx_cycle_count_low); 7628 7629 rx = (u64)(le32_to_cpu(bss_ch_info_ev.rx_cycle_count_high)) << 32 | 7630 le32_to_cpu(bss_ch_info_ev.rx_cycle_count_low); 7631 7632 rx_bss = (u64)(le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_high)) << 32 | 7633 le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_low); 7634 7635 ath12k_dbg(ab, ATH12K_DBG_WMI, 7636 "pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n", 7637 bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq, 7638 bss_ch_info_ev.noise_floor, busy, total, 7639 tx, rx, rx_bss); 7640 7641 rcu_read_lock(); 7642 ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(bss_ch_info_ev.pdev_id)); 7643 7644 if (!ar) { 7645 ath12k_warn(ab, "invalid pdev id %d in bss_chan_info event\n", 7646 bss_ch_info_ev.pdev_id); 7647 rcu_read_unlock(); 7648 return; 7649 } 7650 7651 spin_lock_bh(&ar->data_lock); 7652 idx = freq_to_idx(ar, le32_to_cpu(bss_ch_info_ev.freq)); 7653 if (idx >= ARRAY_SIZE(ar->survey)) { 7654 ath12k_warn(ab, "bss chan info: invalid frequency %d (idx %d out of bounds)\n", 7655 bss_ch_info_ev.freq, idx); 7656 goto exit; 7657 } 7658 7659 survey = &ar->survey[idx]; 7660 7661 survey->noise = le32_to_cpu(bss_ch_info_ev.noise_floor); 7662 survey->time = div_u64(total, cc_freq_hz); 7663 survey->time_busy = div_u64(busy, cc_freq_hz); 7664 survey->time_rx = div_u64(rx_bss, cc_freq_hz); 7665 survey->time_tx = div_u64(tx, cc_freq_hz); 7666 survey->filled |= (SURVEY_INFO_NOISE_DBM | 7667 SURVEY_INFO_TIME | 7668 SURVEY_INFO_TIME_BUSY | 7669 SURVEY_INFO_TIME_RX | 7670 SURVEY_INFO_TIME_TX); 7671 exit: 7672 spin_unlock_bh(&ar->data_lock); 7673 complete(&ar->bss_survey_done); 7674 7675 rcu_read_unlock(); 7676 } 7677 7678 static void ath12k_vdev_install_key_compl_event(struct ath12k_base *ab, 7679 struct sk_buff *skb) 7680 { 7681 struct wmi_vdev_install_key_complete_arg install_key_compl = {}; 7682 struct ath12k *ar; 7683 7684 if (ath12k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) { 7685 ath12k_warn(ab, "failed to extract install key compl event"); 7686 return; 7687 } 7688 7689 ath12k_dbg(ab, ATH12K_DBG_WMI, 7690 "vdev install key ev idx %d flags %08x macaddr %pM status %d\n", 7691 install_key_compl.key_idx, install_key_compl.key_flags, 7692 install_key_compl.macaddr, install_key_compl.status); 7693 7694 rcu_read_lock(); 7695 ar = ath12k_mac_get_ar_by_vdev_id(ab, install_key_compl.vdev_id); 7696 if (!ar) { 7697 ath12k_warn(ab, "invalid vdev id in install key compl ev %d", 7698 install_key_compl.vdev_id); 7699 rcu_read_unlock(); 7700 return; 7701 } 7702 7703 ar->install_key_status = 0; 7704 7705 if (install_key_compl.status != WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) { 7706 ath12k_warn(ab, "install key failed for %pM status %d\n", 7707 install_key_compl.macaddr, install_key_compl.status); 7708 ar->install_key_status = install_key_compl.status; 7709 } 7710 7711 complete(&ar->install_key_done); 7712 rcu_read_unlock(); 7713 } 7714 7715 static int ath12k_wmi_tlv_services_parser(struct ath12k_base *ab, 7716 u16 tag, u16 len, 7717 const void *ptr, 7718 void *data) 7719 { 7720 const struct wmi_service_available_event *ev; 7721 u16 wmi_ext2_service_words; 7722 __le32 *wmi_ext2_service_bitmap; 7723 int i, j; 7724 u16 expected_len; 7725 7726 expected_len = WMI_SERVICE_SEGMENT_BM_SIZE32 * sizeof(u32); 7727 if (len < expected_len) { 7728 ath12k_warn(ab, "invalid length %d for the WMI services available tag 0x%x\n", 7729 len, tag); 7730 return -EINVAL; 7731 } 7732 7733 switch (tag) { 7734 case WMI_TAG_SERVICE_AVAILABLE_EVENT: 7735 ev = (struct wmi_service_available_event *)ptr; 7736 for (i = 0, j = WMI_MAX_SERVICE; 7737 i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE; 7738 i++) { 7739 do { 7740 if (le32_to_cpu(ev->wmi_service_segment_bitmap[i]) & 7741 BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32)) 7742 set_bit(j, ab->wmi_ab.svc_map); 7743 } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32); 7744 } 7745 7746 ath12k_dbg(ab, ATH12K_DBG_WMI, 7747 "wmi_ext_service_bitmap 0x%x 0x%x 0x%x 0x%x", 7748 ev->wmi_service_segment_bitmap[0], 7749 ev->wmi_service_segment_bitmap[1], 7750 ev->wmi_service_segment_bitmap[2], 7751 ev->wmi_service_segment_bitmap[3]); 7752 break; 7753 case WMI_TAG_ARRAY_UINT32: 7754 wmi_ext2_service_bitmap = (__le32 *)ptr; 7755 wmi_ext2_service_words = len / sizeof(u32); 7756 for (i = 0, j = WMI_MAX_EXT_SERVICE; 7757 i < wmi_ext2_service_words && j < WMI_MAX_EXT2_SERVICE; 7758 i++) { 7759 do { 7760 if (__le32_to_cpu(wmi_ext2_service_bitmap[i]) & 7761 BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32)) 7762 set_bit(j, ab->wmi_ab.svc_map); 7763 } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32); 7764 ath12k_dbg(ab, ATH12K_DBG_WMI, 7765 "wmi_ext2_service bitmap 0x%08x\n", 7766 __le32_to_cpu(wmi_ext2_service_bitmap[i])); 7767 } 7768 7769 break; 7770 } 7771 return 0; 7772 } 7773 7774 static int ath12k_service_available_event(struct ath12k_base *ab, struct sk_buff *skb) 7775 { 7776 int ret; 7777 7778 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 7779 ath12k_wmi_tlv_services_parser, 7780 NULL); 7781 return ret; 7782 } 7783 7784 static void ath12k_peer_assoc_conf_event(struct ath12k_base *ab, struct sk_buff *skb) 7785 { 7786 struct wmi_peer_assoc_conf_arg peer_assoc_conf = {}; 7787 struct ath12k *ar; 7788 7789 if (ath12k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) { 7790 ath12k_warn(ab, "failed to extract peer assoc conf event"); 7791 return; 7792 } 7793 7794 ath12k_dbg(ab, ATH12K_DBG_WMI, 7795 "peer assoc conf ev vdev id %d macaddr %pM\n", 7796 peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr); 7797 7798 rcu_read_lock(); 7799 ar = ath12k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id); 7800 7801 if (!ar) { 7802 ath12k_warn(ab, "invalid vdev id in peer assoc conf ev %d", 7803 peer_assoc_conf.vdev_id); 7804 rcu_read_unlock(); 7805 return; 7806 } 7807 7808 complete(&ar->peer_assoc_done); 7809 rcu_read_unlock(); 7810 } 7811 7812 static void 7813 ath12k_wmi_fw_vdev_stats_dump(struct ath12k *ar, 7814 struct ath12k_fw_stats *fw_stats, 7815 char *buf, u32 *length) 7816 { 7817 const struct ath12k_fw_stats_vdev *vdev; 7818 u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; 7819 struct ath12k_link_vif *arvif; 7820 u32 len = *length; 7821 u8 *vif_macaddr; 7822 int i; 7823 7824 len += scnprintf(buf + len, buf_len - len, "\n"); 7825 len += scnprintf(buf + len, buf_len - len, "%30s\n", 7826 "ath12k VDEV stats"); 7827 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 7828 "================="); 7829 7830 list_for_each_entry(vdev, &fw_stats->vdevs, list) { 7831 arvif = ath12k_mac_get_arvif(ar, vdev->vdev_id); 7832 if (!arvif) 7833 continue; 7834 vif_macaddr = arvif->ahvif->vif->addr; 7835 7836 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7837 "VDEV ID", vdev->vdev_id); 7838 len += scnprintf(buf + len, buf_len - len, "%30s %pM\n", 7839 "VDEV MAC address", vif_macaddr); 7840 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7841 "beacon snr", vdev->beacon_snr); 7842 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7843 "data snr", vdev->data_snr); 7844 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7845 "num rx frames", vdev->num_rx_frames); 7846 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7847 "num rts fail", vdev->num_rts_fail); 7848 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7849 "num rts success", vdev->num_rts_success); 7850 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7851 "num rx err", vdev->num_rx_err); 7852 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7853 "num rx discard", vdev->num_rx_discard); 7854 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7855 "num tx not acked", vdev->num_tx_not_acked); 7856 7857 for (i = 0 ; i < WLAN_MAX_AC; i++) 7858 len += scnprintf(buf + len, buf_len - len, 7859 "%25s [%02d] %u\n", 7860 "num tx frames", i, 7861 vdev->num_tx_frames[i]); 7862 7863 for (i = 0 ; i < WLAN_MAX_AC; i++) 7864 len += scnprintf(buf + len, buf_len - len, 7865 "%25s [%02d] %u\n", 7866 "num tx frames retries", i, 7867 vdev->num_tx_frames_retries[i]); 7868 7869 for (i = 0 ; i < WLAN_MAX_AC; i++) 7870 len += scnprintf(buf + len, buf_len - len, 7871 "%25s [%02d] %u\n", 7872 "num tx frames failures", i, 7873 vdev->num_tx_frames_failures[i]); 7874 7875 for (i = 0 ; i < MAX_TX_RATE_VALUES; i++) 7876 len += scnprintf(buf + len, buf_len - len, 7877 "%25s [%02d] 0x%08x\n", 7878 "tx rate history", i, 7879 vdev->tx_rate_history[i]); 7880 for (i = 0 ; i < MAX_TX_RATE_VALUES; i++) 7881 len += scnprintf(buf + len, buf_len - len, 7882 "%25s [%02d] %u\n", 7883 "beacon rssi history", i, 7884 vdev->beacon_rssi_history[i]); 7885 7886 len += scnprintf(buf + len, buf_len - len, "\n"); 7887 *length = len; 7888 } 7889 } 7890 7891 static void 7892 ath12k_wmi_fw_bcn_stats_dump(struct ath12k *ar, 7893 struct ath12k_fw_stats *fw_stats, 7894 char *buf, u32 *length) 7895 { 7896 const struct ath12k_fw_stats_bcn *bcn; 7897 u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; 7898 struct ath12k_link_vif *arvif; 7899 u32 len = *length; 7900 size_t num_bcn; 7901 7902 num_bcn = list_count_nodes(&fw_stats->bcn); 7903 7904 len += scnprintf(buf + len, buf_len - len, "\n"); 7905 len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n", 7906 "ath12k Beacon stats", num_bcn); 7907 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 7908 "==================="); 7909 7910 list_for_each_entry(bcn, &fw_stats->bcn, list) { 7911 arvif = ath12k_mac_get_arvif(ar, bcn->vdev_id); 7912 if (!arvif) 7913 continue; 7914 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7915 "VDEV ID", bcn->vdev_id); 7916 len += scnprintf(buf + len, buf_len - len, "%30s %pM\n", 7917 "VDEV MAC address", arvif->ahvif->vif->addr); 7918 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 7919 "================"); 7920 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7921 "Num of beacon tx success", bcn->tx_bcn_succ_cnt); 7922 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7923 "Num of beacon tx failures", bcn->tx_bcn_outage_cnt); 7924 7925 len += scnprintf(buf + len, buf_len - len, "\n"); 7926 *length = len; 7927 } 7928 } 7929 7930 static void 7931 ath12k_wmi_fw_pdev_base_stats_dump(const struct ath12k_fw_stats_pdev *pdev, 7932 char *buf, u32 *length, u64 fw_soc_drop_cnt) 7933 { 7934 u32 len = *length; 7935 u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; 7936 7937 len = scnprintf(buf + len, buf_len - len, "\n"); 7938 len += scnprintf(buf + len, buf_len - len, "%30s\n", 7939 "ath12k PDEV stats"); 7940 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 7941 "================="); 7942 7943 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7944 "Channel noise floor", pdev->ch_noise_floor); 7945 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7946 "Channel TX power", pdev->chan_tx_power); 7947 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7948 "TX frame count", pdev->tx_frame_count); 7949 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7950 "RX frame count", pdev->rx_frame_count); 7951 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7952 "RX clear count", pdev->rx_clear_count); 7953 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7954 "Cycle count", pdev->cycle_count); 7955 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7956 "PHY error count", pdev->phy_err_count); 7957 len += scnprintf(buf + len, buf_len - len, "%30s %10llu\n", 7958 "soc drop count", fw_soc_drop_cnt); 7959 7960 *length = len; 7961 } 7962 7963 static void 7964 ath12k_wmi_fw_pdev_tx_stats_dump(const struct ath12k_fw_stats_pdev *pdev, 7965 char *buf, u32 *length) 7966 { 7967 u32 len = *length; 7968 u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; 7969 7970 len += scnprintf(buf + len, buf_len - len, "\n%30s\n", 7971 "ath12k PDEV TX stats"); 7972 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 7973 "===================="); 7974 7975 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7976 "HTT cookies queued", pdev->comp_queued); 7977 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7978 "HTT cookies disp.", pdev->comp_delivered); 7979 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7980 "MSDU queued", pdev->msdu_enqued); 7981 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7982 "MPDU queued", pdev->mpdu_enqued); 7983 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7984 "MSDUs dropped", pdev->wmm_drop); 7985 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7986 "Local enqued", pdev->local_enqued); 7987 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7988 "Local freed", pdev->local_freed); 7989 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7990 "HW queued", pdev->hw_queued); 7991 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7992 "PPDUs reaped", pdev->hw_reaped); 7993 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7994 "Num underruns", pdev->underrun); 7995 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7996 "PPDUs cleaned", pdev->tx_abort); 7997 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7998 "MPDUs requeued", pdev->mpdus_requed); 7999 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 8000 "Excessive retries", pdev->tx_ko); 8001 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 8002 "HW rate", pdev->data_rc); 8003 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 8004 "Sched self triggers", pdev->self_triggers); 8005 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 8006 "Dropped due to SW retries", 8007 pdev->sw_retry_failure); 8008 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 8009 "Illegal rate phy errors", 8010 pdev->illgl_rate_phy_err); 8011 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 8012 "PDEV continuous xretry", pdev->pdev_cont_xretry); 8013 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 8014 "TX timeout", pdev->pdev_tx_timeout); 8015 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 8016 "PDEV resets", pdev->pdev_resets); 8017 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 8018 "Stateless TIDs alloc failures", 8019 pdev->stateless_tid_alloc_failure); 8020 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 8021 "PHY underrun", pdev->phy_underrun); 8022 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 8023 "MPDU is more than txop limit", pdev->txop_ovf); 8024 *length = len; 8025 } 8026 8027 static void 8028 ath12k_wmi_fw_pdev_rx_stats_dump(const struct ath12k_fw_stats_pdev *pdev, 8029 char *buf, u32 *length) 8030 { 8031 u32 len = *length; 8032 u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; 8033 8034 len += scnprintf(buf + len, buf_len - len, "\n%30s\n", 8035 "ath12k PDEV RX stats"); 8036 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 8037 "===================="); 8038 8039 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8040 "Mid PPDU route change", 8041 pdev->mid_ppdu_route_change); 8042 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8043 "Tot. number of statuses", pdev->status_rcvd); 8044 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8045 "Extra frags on rings 0", pdev->r0_frags); 8046 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8047 "Extra frags on rings 1", pdev->r1_frags); 8048 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8049 "Extra frags on rings 2", pdev->r2_frags); 8050 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8051 "Extra frags on rings 3", pdev->r3_frags); 8052 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8053 "MSDUs delivered to HTT", pdev->htt_msdus); 8054 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8055 "MPDUs delivered to HTT", pdev->htt_mpdus); 8056 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8057 "MSDUs delivered to stack", pdev->loc_msdus); 8058 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8059 "MPDUs delivered to stack", pdev->loc_mpdus); 8060 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8061 "Oversized AMSUs", pdev->oversize_amsdu); 8062 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8063 "PHY errors", pdev->phy_errs); 8064 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8065 "PHY errors drops", pdev->phy_err_drop); 8066 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8067 "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs); 8068 *length = len; 8069 } 8070 8071 static void 8072 ath12k_wmi_fw_pdev_stats_dump(struct ath12k *ar, 8073 struct ath12k_fw_stats *fw_stats, 8074 char *buf, u32 *length) 8075 { 8076 const struct ath12k_fw_stats_pdev *pdev; 8077 u32 len = *length; 8078 8079 pdev = list_first_entry_or_null(&fw_stats->pdevs, 8080 struct ath12k_fw_stats_pdev, list); 8081 if (!pdev) { 8082 ath12k_warn(ar->ab, "failed to get pdev stats\n"); 8083 return; 8084 } 8085 8086 ath12k_wmi_fw_pdev_base_stats_dump(pdev, buf, &len, 8087 ar->ab->fw_soc_drop_count); 8088 ath12k_wmi_fw_pdev_tx_stats_dump(pdev, buf, &len); 8089 ath12k_wmi_fw_pdev_rx_stats_dump(pdev, buf, &len); 8090 8091 *length = len; 8092 } 8093 8094 void ath12k_wmi_fw_stats_dump(struct ath12k *ar, 8095 struct ath12k_fw_stats *fw_stats, 8096 u32 stats_id, char *buf) 8097 { 8098 u32 len = 0; 8099 u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; 8100 8101 spin_lock_bh(&ar->data_lock); 8102 8103 switch (stats_id) { 8104 case WMI_REQUEST_VDEV_STAT: 8105 ath12k_wmi_fw_vdev_stats_dump(ar, fw_stats, buf, &len); 8106 break; 8107 case WMI_REQUEST_BCN_STAT: 8108 ath12k_wmi_fw_bcn_stats_dump(ar, fw_stats, buf, &len); 8109 break; 8110 case WMI_REQUEST_PDEV_STAT: 8111 ath12k_wmi_fw_pdev_stats_dump(ar, fw_stats, buf, &len); 8112 break; 8113 default: 8114 break; 8115 } 8116 8117 spin_unlock_bh(&ar->data_lock); 8118 8119 if (len >= buf_len) 8120 buf[len - 1] = 0; 8121 else 8122 buf[len] = 0; 8123 } 8124 8125 static void 8126 ath12k_wmi_pull_vdev_stats(const struct wmi_vdev_stats_params *src, 8127 struct ath12k_fw_stats_vdev *dst) 8128 { 8129 int i; 8130 8131 dst->vdev_id = le32_to_cpu(src->vdev_id); 8132 dst->beacon_snr = le32_to_cpu(src->beacon_snr); 8133 dst->data_snr = le32_to_cpu(src->data_snr); 8134 dst->num_rx_frames = le32_to_cpu(src->num_rx_frames); 8135 dst->num_rts_fail = le32_to_cpu(src->num_rts_fail); 8136 dst->num_rts_success = le32_to_cpu(src->num_rts_success); 8137 dst->num_rx_err = le32_to_cpu(src->num_rx_err); 8138 dst->num_rx_discard = le32_to_cpu(src->num_rx_discard); 8139 dst->num_tx_not_acked = le32_to_cpu(src->num_tx_not_acked); 8140 8141 for (i = 0; i < WLAN_MAX_AC; i++) 8142 dst->num_tx_frames[i] = 8143 le32_to_cpu(src->num_tx_frames[i]); 8144 8145 for (i = 0; i < WLAN_MAX_AC; i++) 8146 dst->num_tx_frames_retries[i] = 8147 le32_to_cpu(src->num_tx_frames_retries[i]); 8148 8149 for (i = 0; i < WLAN_MAX_AC; i++) 8150 dst->num_tx_frames_failures[i] = 8151 le32_to_cpu(src->num_tx_frames_failures[i]); 8152 8153 for (i = 0; i < MAX_TX_RATE_VALUES; i++) 8154 dst->tx_rate_history[i] = 8155 le32_to_cpu(src->tx_rate_history[i]); 8156 8157 for (i = 0; i < MAX_TX_RATE_VALUES; i++) 8158 dst->beacon_rssi_history[i] = 8159 le32_to_cpu(src->beacon_rssi_history[i]); 8160 } 8161 8162 static void 8163 ath12k_wmi_pull_bcn_stats(const struct ath12k_wmi_bcn_stats_params *src, 8164 struct ath12k_fw_stats_bcn *dst) 8165 { 8166 dst->vdev_id = le32_to_cpu(src->vdev_id); 8167 dst->tx_bcn_succ_cnt = le32_to_cpu(src->tx_bcn_succ_cnt); 8168 dst->tx_bcn_outage_cnt = le32_to_cpu(src->tx_bcn_outage_cnt); 8169 } 8170 8171 static void 8172 ath12k_wmi_pull_pdev_stats_base(const struct ath12k_wmi_pdev_base_stats_params *src, 8173 struct ath12k_fw_stats_pdev *dst) 8174 { 8175 dst->ch_noise_floor = a_sle32_to_cpu(src->chan_nf); 8176 dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count); 8177 dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count); 8178 dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count); 8179 dst->cycle_count = __le32_to_cpu(src->cycle_count); 8180 dst->phy_err_count = __le32_to_cpu(src->phy_err_count); 8181 dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr); 8182 } 8183 8184 static void 8185 ath12k_wmi_pull_pdev_stats_tx(const struct ath12k_wmi_pdev_tx_stats_params *src, 8186 struct ath12k_fw_stats_pdev *dst) 8187 { 8188 dst->comp_queued = a_sle32_to_cpu(src->comp_queued); 8189 dst->comp_delivered = a_sle32_to_cpu(src->comp_delivered); 8190 dst->msdu_enqued = a_sle32_to_cpu(src->msdu_enqued); 8191 dst->mpdu_enqued = a_sle32_to_cpu(src->mpdu_enqued); 8192 dst->wmm_drop = a_sle32_to_cpu(src->wmm_drop); 8193 dst->local_enqued = a_sle32_to_cpu(src->local_enqued); 8194 dst->local_freed = a_sle32_to_cpu(src->local_freed); 8195 dst->hw_queued = a_sle32_to_cpu(src->hw_queued); 8196 dst->hw_reaped = a_sle32_to_cpu(src->hw_reaped); 8197 dst->underrun = a_sle32_to_cpu(src->underrun); 8198 dst->tx_abort = a_sle32_to_cpu(src->tx_abort); 8199 dst->mpdus_requed = a_sle32_to_cpu(src->mpdus_requed); 8200 dst->tx_ko = __le32_to_cpu(src->tx_ko); 8201 dst->data_rc = __le32_to_cpu(src->data_rc); 8202 dst->self_triggers = __le32_to_cpu(src->self_triggers); 8203 dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure); 8204 dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err); 8205 dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry); 8206 dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout); 8207 dst->pdev_resets = __le32_to_cpu(src->pdev_resets); 8208 dst->stateless_tid_alloc_failure = 8209 __le32_to_cpu(src->stateless_tid_alloc_failure); 8210 dst->phy_underrun = __le32_to_cpu(src->phy_underrun); 8211 dst->txop_ovf = __le32_to_cpu(src->txop_ovf); 8212 } 8213 8214 static void 8215 ath12k_wmi_pull_pdev_stats_rx(const struct ath12k_wmi_pdev_rx_stats_params *src, 8216 struct ath12k_fw_stats_pdev *dst) 8217 { 8218 dst->mid_ppdu_route_change = 8219 a_sle32_to_cpu(src->mid_ppdu_route_change); 8220 dst->status_rcvd = a_sle32_to_cpu(src->status_rcvd); 8221 dst->r0_frags = a_sle32_to_cpu(src->r0_frags); 8222 dst->r1_frags = a_sle32_to_cpu(src->r1_frags); 8223 dst->r2_frags = a_sle32_to_cpu(src->r2_frags); 8224 dst->r3_frags = a_sle32_to_cpu(src->r3_frags); 8225 dst->htt_msdus = a_sle32_to_cpu(src->htt_msdus); 8226 dst->htt_mpdus = a_sle32_to_cpu(src->htt_mpdus); 8227 dst->loc_msdus = a_sle32_to_cpu(src->loc_msdus); 8228 dst->loc_mpdus = a_sle32_to_cpu(src->loc_mpdus); 8229 dst->oversize_amsdu = a_sle32_to_cpu(src->oversize_amsdu); 8230 dst->phy_errs = a_sle32_to_cpu(src->phy_errs); 8231 dst->phy_err_drop = a_sle32_to_cpu(src->phy_err_drop); 8232 dst->mpdu_errs = a_sle32_to_cpu(src->mpdu_errs); 8233 } 8234 8235 static int ath12k_wmi_tlv_fw_stats_data_parse(struct ath12k_base *ab, 8236 struct wmi_tlv_fw_stats_parse *parse, 8237 const void *ptr, 8238 u16 len) 8239 { 8240 const struct wmi_stats_event *ev = parse->ev; 8241 struct ath12k_fw_stats *stats = parse->stats; 8242 struct ath12k *ar; 8243 struct ath12k_link_vif *arvif; 8244 struct ieee80211_sta *sta; 8245 struct ath12k_sta *ahsta; 8246 struct ath12k_link_sta *arsta; 8247 int i, ret = 0; 8248 const void *data = ptr; 8249 8250 if (!ev) { 8251 ath12k_warn(ab, "failed to fetch update stats ev"); 8252 return -EPROTO; 8253 } 8254 8255 if (!stats) 8256 return -EINVAL; 8257 8258 rcu_read_lock(); 8259 8260 stats->pdev_id = le32_to_cpu(ev->pdev_id); 8261 ar = ath12k_mac_get_ar_by_pdev_id(ab, stats->pdev_id); 8262 if (!ar) { 8263 ath12k_warn(ab, "invalid pdev id %d in update stats event\n", 8264 le32_to_cpu(ev->pdev_id)); 8265 ret = -EPROTO; 8266 goto exit; 8267 } 8268 8269 for (i = 0; i < le32_to_cpu(ev->num_vdev_stats); i++) { 8270 const struct wmi_vdev_stats_params *src; 8271 struct ath12k_fw_stats_vdev *dst; 8272 8273 src = data; 8274 if (len < sizeof(*src)) { 8275 ret = -EPROTO; 8276 goto exit; 8277 } 8278 8279 arvif = ath12k_mac_get_arvif(ar, le32_to_cpu(src->vdev_id)); 8280 if (arvif) { 8281 sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar), 8282 arvif->bssid, 8283 NULL); 8284 if (sta) { 8285 ahsta = ath12k_sta_to_ahsta(sta); 8286 arsta = &ahsta->deflink; 8287 arsta->rssi_beacon = le32_to_cpu(src->beacon_snr); 8288 ath12k_dbg(ab, ATH12K_DBG_WMI, 8289 "wmi stats vdev id %d snr %d\n", 8290 src->vdev_id, src->beacon_snr); 8291 } else { 8292 ath12k_dbg(ab, ATH12K_DBG_WMI, 8293 "not found station bssid %pM for vdev stat\n", 8294 arvif->bssid); 8295 } 8296 } 8297 8298 data += sizeof(*src); 8299 len -= sizeof(*src); 8300 dst = kzalloc(sizeof(*dst), GFP_ATOMIC); 8301 if (!dst) 8302 continue; 8303 ath12k_wmi_pull_vdev_stats(src, dst); 8304 stats->stats_id = WMI_REQUEST_VDEV_STAT; 8305 list_add_tail(&dst->list, &stats->vdevs); 8306 } 8307 for (i = 0; i < le32_to_cpu(ev->num_bcn_stats); i++) { 8308 const struct ath12k_wmi_bcn_stats_params *src; 8309 struct ath12k_fw_stats_bcn *dst; 8310 8311 src = data; 8312 if (len < sizeof(*src)) { 8313 ret = -EPROTO; 8314 goto exit; 8315 } 8316 8317 data += sizeof(*src); 8318 len -= sizeof(*src); 8319 dst = kzalloc(sizeof(*dst), GFP_ATOMIC); 8320 if (!dst) 8321 continue; 8322 ath12k_wmi_pull_bcn_stats(src, dst); 8323 stats->stats_id = WMI_REQUEST_BCN_STAT; 8324 list_add_tail(&dst->list, &stats->bcn); 8325 } 8326 for (i = 0; i < le32_to_cpu(ev->num_pdev_stats); i++) { 8327 const struct ath12k_wmi_pdev_stats_params *src; 8328 struct ath12k_fw_stats_pdev *dst; 8329 8330 src = data; 8331 if (len < sizeof(*src)) { 8332 ret = -EPROTO; 8333 goto exit; 8334 } 8335 8336 stats->stats_id = WMI_REQUEST_PDEV_STAT; 8337 8338 data += sizeof(*src); 8339 len -= sizeof(*src); 8340 8341 dst = kzalloc(sizeof(*dst), GFP_ATOMIC); 8342 if (!dst) 8343 continue; 8344 8345 ath12k_wmi_pull_pdev_stats_base(&src->base, dst); 8346 ath12k_wmi_pull_pdev_stats_tx(&src->tx, dst); 8347 ath12k_wmi_pull_pdev_stats_rx(&src->rx, dst); 8348 list_add_tail(&dst->list, &stats->pdevs); 8349 } 8350 8351 exit: 8352 rcu_read_unlock(); 8353 return ret; 8354 } 8355 8356 static int ath12k_wmi_tlv_rssi_chain_parse(struct ath12k_base *ab, 8357 u16 tag, u16 len, 8358 const void *ptr, void *data) 8359 { 8360 const struct wmi_rssi_stat_params *stats_rssi = ptr; 8361 struct wmi_tlv_fw_stats_parse *parse = data; 8362 const struct wmi_stats_event *ev = parse->ev; 8363 struct ath12k_fw_stats *stats = parse->stats; 8364 struct ath12k_link_vif *arvif; 8365 struct ath12k_link_sta *arsta; 8366 struct ieee80211_sta *sta; 8367 struct ath12k_sta *ahsta; 8368 struct ath12k *ar; 8369 int vdev_id; 8370 int j; 8371 8372 if (!ev) { 8373 ath12k_warn(ab, "failed to fetch update stats ev"); 8374 return -EPROTO; 8375 } 8376 8377 if (tag != WMI_TAG_RSSI_STATS) 8378 return -EPROTO; 8379 8380 if (!stats) 8381 return -EINVAL; 8382 8383 stats->pdev_id = le32_to_cpu(ev->pdev_id); 8384 vdev_id = le32_to_cpu(stats_rssi->vdev_id); 8385 guard(rcu)(); 8386 ar = ath12k_mac_get_ar_by_pdev_id(ab, stats->pdev_id); 8387 if (!ar) { 8388 ath12k_warn(ab, "invalid pdev id %d in rssi chain parse\n", 8389 stats->pdev_id); 8390 return -EPROTO; 8391 } 8392 8393 arvif = ath12k_mac_get_arvif(ar, vdev_id); 8394 if (!arvif) { 8395 ath12k_warn(ab, "not found vif for vdev id %d\n", vdev_id); 8396 return -EPROTO; 8397 } 8398 8399 ath12k_dbg(ab, ATH12K_DBG_WMI, 8400 "stats bssid %pM vif %p\n", 8401 arvif->bssid, arvif->ahvif->vif); 8402 8403 sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar), 8404 arvif->bssid, 8405 NULL); 8406 if (!sta) { 8407 ath12k_dbg(ab, ATH12K_DBG_WMI, 8408 "not found station of bssid %pM for rssi chain\n", 8409 arvif->bssid); 8410 return -EPROTO; 8411 } 8412 8413 ahsta = ath12k_sta_to_ahsta(sta); 8414 arsta = &ahsta->deflink; 8415 8416 BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) > 8417 ARRAY_SIZE(stats_rssi->rssi_avg_beacon)); 8418 8419 for (j = 0; j < ARRAY_SIZE(arsta->chain_signal); j++) 8420 arsta->chain_signal[j] = le32_to_cpu(stats_rssi->rssi_avg_beacon[j]); 8421 8422 stats->stats_id = WMI_REQUEST_RSSI_PER_CHAIN_STAT; 8423 8424 return 0; 8425 } 8426 8427 static int ath12k_wmi_tlv_fw_stats_parse(struct ath12k_base *ab, 8428 u16 tag, u16 len, 8429 const void *ptr, void *data) 8430 { 8431 struct wmi_tlv_fw_stats_parse *parse = data; 8432 int ret = 0; 8433 8434 switch (tag) { 8435 case WMI_TAG_STATS_EVENT: 8436 parse->ev = ptr; 8437 break; 8438 case WMI_TAG_ARRAY_BYTE: 8439 ret = ath12k_wmi_tlv_fw_stats_data_parse(ab, parse, ptr, len); 8440 break; 8441 case WMI_TAG_PER_CHAIN_RSSI_STATS: 8442 parse->rssi = ptr; 8443 if (le32_to_cpu(parse->ev->stats_id) & WMI_REQUEST_RSSI_PER_CHAIN_STAT) 8444 parse->rssi_num = le32_to_cpu(parse->rssi->num_per_chain_rssi); 8445 break; 8446 case WMI_TAG_ARRAY_STRUCT: 8447 if (parse->rssi_num && !parse->chain_rssi_done) { 8448 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 8449 ath12k_wmi_tlv_rssi_chain_parse, 8450 parse); 8451 if (ret) 8452 return ret; 8453 8454 parse->chain_rssi_done = true; 8455 } 8456 break; 8457 default: 8458 break; 8459 } 8460 return ret; 8461 } 8462 8463 static int ath12k_wmi_pull_fw_stats(struct ath12k_base *ab, struct sk_buff *skb, 8464 struct ath12k_fw_stats *stats) 8465 { 8466 struct wmi_tlv_fw_stats_parse parse = {}; 8467 8468 stats->stats_id = 0; 8469 parse.stats = stats; 8470 8471 return ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 8472 ath12k_wmi_tlv_fw_stats_parse, 8473 &parse); 8474 } 8475 8476 static void ath12k_wmi_fw_stats_process(struct ath12k *ar, 8477 struct ath12k_fw_stats *stats) 8478 { 8479 struct ath12k_base *ab = ar->ab; 8480 struct ath12k_pdev *pdev; 8481 bool is_end = true; 8482 size_t total_vdevs_started = 0; 8483 int i; 8484 8485 if (stats->stats_id == WMI_REQUEST_VDEV_STAT) { 8486 if (list_empty(&stats->vdevs)) { 8487 ath12k_warn(ab, "empty vdev stats"); 8488 return; 8489 } 8490 /* FW sends all the active VDEV stats irrespective of PDEV, 8491 * hence limit until the count of all VDEVs started 8492 */ 8493 rcu_read_lock(); 8494 for (i = 0; i < ab->num_radios; i++) { 8495 pdev = rcu_dereference(ab->pdevs_active[i]); 8496 if (pdev && pdev->ar) 8497 total_vdevs_started += pdev->ar->num_started_vdevs; 8498 } 8499 rcu_read_unlock(); 8500 8501 if (total_vdevs_started) 8502 is_end = ((++ar->fw_stats.num_vdev_recvd) == 8503 total_vdevs_started); 8504 8505 list_splice_tail_init(&stats->vdevs, 8506 &ar->fw_stats.vdevs); 8507 8508 if (is_end) 8509 complete(&ar->fw_stats_done); 8510 8511 return; 8512 } 8513 8514 if (stats->stats_id == WMI_REQUEST_BCN_STAT) { 8515 if (list_empty(&stats->bcn)) { 8516 ath12k_warn(ab, "empty beacon stats"); 8517 return; 8518 } 8519 8520 list_splice_tail_init(&stats->bcn, 8521 &ar->fw_stats.bcn); 8522 complete(&ar->fw_stats_done); 8523 } 8524 } 8525 8526 static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *skb) 8527 { 8528 struct ath12k_fw_stats stats = {}; 8529 struct ath12k *ar; 8530 int ret; 8531 8532 INIT_LIST_HEAD(&stats.pdevs); 8533 INIT_LIST_HEAD(&stats.vdevs); 8534 INIT_LIST_HEAD(&stats.bcn); 8535 8536 ret = ath12k_wmi_pull_fw_stats(ab, skb, &stats); 8537 if (ret) { 8538 ath12k_warn(ab, "failed to pull fw stats: %d\n", ret); 8539 goto free; 8540 } 8541 8542 ath12k_dbg(ab, ATH12K_DBG_WMI, "event update stats"); 8543 8544 rcu_read_lock(); 8545 ar = ath12k_mac_get_ar_by_pdev_id(ab, stats.pdev_id); 8546 if (!ar) { 8547 rcu_read_unlock(); 8548 ath12k_warn(ab, "failed to get ar for pdev_id %d: %d\n", 8549 stats.pdev_id, ret); 8550 goto free; 8551 } 8552 8553 spin_lock_bh(&ar->data_lock); 8554 8555 /* Handle WMI_REQUEST_PDEV_STAT status update */ 8556 if (stats.stats_id == WMI_REQUEST_PDEV_STAT) { 8557 list_splice_tail_init(&stats.pdevs, &ar->fw_stats.pdevs); 8558 complete(&ar->fw_stats_done); 8559 goto complete; 8560 } 8561 8562 /* Handle WMI_REQUEST_RSSI_PER_CHAIN_STAT status update */ 8563 if (stats.stats_id == WMI_REQUEST_RSSI_PER_CHAIN_STAT) { 8564 complete(&ar->fw_stats_done); 8565 goto complete; 8566 } 8567 8568 /* Handle WMI_REQUEST_VDEV_STAT and WMI_REQUEST_BCN_STAT updates. */ 8569 ath12k_wmi_fw_stats_process(ar, &stats); 8570 8571 complete: 8572 complete(&ar->fw_stats_complete); 8573 spin_unlock_bh(&ar->data_lock); 8574 rcu_read_unlock(); 8575 8576 /* Since the stats's pdev, vdev and beacon list are spliced and reinitialised 8577 * at this point, no need to free the individual list. 8578 */ 8579 return; 8580 8581 free: 8582 ath12k_fw_stats_free(&stats); 8583 } 8584 8585 /* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned 8586 * is not part of BDF CTL(Conformance test limits) table entries. 8587 */ 8588 static void ath12k_pdev_ctl_failsafe_check_event(struct ath12k_base *ab, 8589 struct sk_buff *skb) 8590 { 8591 const void **tb; 8592 const struct wmi_pdev_ctl_failsafe_chk_event *ev; 8593 int ret; 8594 8595 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8596 if (IS_ERR(tb)) { 8597 ret = PTR_ERR(tb); 8598 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 8599 return; 8600 } 8601 8602 ev = tb[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT]; 8603 if (!ev) { 8604 ath12k_warn(ab, "failed to fetch pdev ctl failsafe check ev"); 8605 kfree(tb); 8606 return; 8607 } 8608 8609 ath12k_dbg(ab, ATH12K_DBG_WMI, 8610 "pdev ctl failsafe check ev status %d\n", 8611 ev->ctl_failsafe_status); 8612 8613 /* If ctl_failsafe_status is set to 1 FW will max out the Transmit power 8614 * to 10 dBm else the CTL power entry in the BDF would be picked up. 8615 */ 8616 if (ev->ctl_failsafe_status != 0) 8617 ath12k_warn(ab, "pdev ctl failsafe failure status %d", 8618 ev->ctl_failsafe_status); 8619 8620 kfree(tb); 8621 } 8622 8623 static void 8624 ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab, 8625 const struct ath12k_wmi_pdev_csa_event *ev, 8626 const u32 *vdev_ids) 8627 { 8628 u32 current_switch_count = le32_to_cpu(ev->current_switch_count); 8629 u32 num_vdevs = le32_to_cpu(ev->num_vdevs); 8630 struct ieee80211_bss_conf *conf; 8631 struct ath12k_link_vif *arvif; 8632 struct ath12k_vif *ahvif; 8633 int i; 8634 8635 rcu_read_lock(); 8636 for (i = 0; i < num_vdevs; i++) { 8637 arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]); 8638 8639 if (!arvif) { 8640 ath12k_warn(ab, "Recvd csa status for unknown vdev %d", 8641 vdev_ids[i]); 8642 continue; 8643 } 8644 ahvif = arvif->ahvif; 8645 8646 if (arvif->link_id >= IEEE80211_MLD_MAX_NUM_LINKS) { 8647 ath12k_warn(ab, "Invalid CSA switch count even link id: %d\n", 8648 arvif->link_id); 8649 continue; 8650 } 8651 8652 conf = rcu_dereference(ahvif->vif->link_conf[arvif->link_id]); 8653 if (!conf) { 8654 ath12k_warn(ab, "unable to access bss link conf in process csa for vif %pM link %u\n", 8655 ahvif->vif->addr, arvif->link_id); 8656 continue; 8657 } 8658 8659 if (!arvif->is_up || !conf->csa_active) 8660 continue; 8661 8662 /* Finish CSA when counter reaches zero */ 8663 if (!current_switch_count) { 8664 ieee80211_csa_finish(ahvif->vif, arvif->link_id); 8665 arvif->current_cntdown_counter = 0; 8666 } else if (current_switch_count > 1) { 8667 /* If the count in event is not what we expect, don't update the 8668 * mac80211 count. Since during beacon Tx failure, count in the 8669 * firmware will not decrement and this event will come with the 8670 * previous count value again 8671 */ 8672 if (current_switch_count != arvif->current_cntdown_counter) 8673 continue; 8674 8675 arvif->current_cntdown_counter = 8676 ieee80211_beacon_update_cntdwn(ahvif->vif, 8677 arvif->link_id); 8678 } 8679 } 8680 rcu_read_unlock(); 8681 } 8682 8683 static void 8684 ath12k_wmi_pdev_csa_switch_count_status_event(struct ath12k_base *ab, 8685 struct sk_buff *skb) 8686 { 8687 const void **tb; 8688 const struct ath12k_wmi_pdev_csa_event *ev; 8689 const u32 *vdev_ids; 8690 int ret; 8691 8692 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8693 if (IS_ERR(tb)) { 8694 ret = PTR_ERR(tb); 8695 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 8696 return; 8697 } 8698 8699 ev = tb[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT]; 8700 vdev_ids = tb[WMI_TAG_ARRAY_UINT32]; 8701 8702 if (!ev || !vdev_ids) { 8703 ath12k_warn(ab, "failed to fetch pdev csa switch count ev"); 8704 kfree(tb); 8705 return; 8706 } 8707 8708 ath12k_dbg(ab, ATH12K_DBG_WMI, 8709 "pdev csa switch count %d for pdev %d, num_vdevs %d", 8710 ev->current_switch_count, ev->pdev_id, 8711 ev->num_vdevs); 8712 8713 ath12k_wmi_process_csa_switch_count_event(ab, ev, vdev_ids); 8714 8715 kfree(tb); 8716 } 8717 8718 static void 8719 ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff *skb) 8720 { 8721 const void **tb; 8722 struct ath12k_mac_get_any_chanctx_conf_arg arg; 8723 const struct ath12k_wmi_pdev_radar_event *ev; 8724 struct ath12k *ar; 8725 int ret; 8726 8727 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8728 if (IS_ERR(tb)) { 8729 ret = PTR_ERR(tb); 8730 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 8731 return; 8732 } 8733 8734 ev = tb[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT]; 8735 8736 if (!ev) { 8737 ath12k_warn(ab, "failed to fetch pdev dfs radar detected ev"); 8738 kfree(tb); 8739 return; 8740 } 8741 8742 ath12k_dbg(ab, ATH12K_DBG_WMI, 8743 "pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d", 8744 ev->pdev_id, ev->detection_mode, ev->chan_freq, ev->chan_width, 8745 ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp, 8746 ev->freq_offset, ev->sidx); 8747 8748 rcu_read_lock(); 8749 8750 ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id)); 8751 8752 if (!ar) { 8753 ath12k_warn(ab, "radar detected in invalid pdev %d\n", 8754 ev->pdev_id); 8755 goto exit; 8756 } 8757 8758 arg.ar = ar; 8759 arg.chanctx_conf = NULL; 8760 ieee80211_iter_chan_contexts_atomic(ath12k_ar_to_hw(ar), 8761 ath12k_mac_get_any_chanctx_conf_iter, &arg); 8762 if (!arg.chanctx_conf) { 8763 ath12k_warn(ab, "failed to find valid chanctx_conf in radar detected event\n"); 8764 goto exit; 8765 } 8766 8767 ath12k_dbg(ar->ab, ATH12K_DBG_REG, "DFS Radar Detected in pdev %d\n", 8768 ev->pdev_id); 8769 8770 if (ar->dfs_block_radar_events) 8771 ath12k_info(ab, "DFS Radar detected, but ignored as requested\n"); 8772 else 8773 ieee80211_radar_detected(ath12k_ar_to_hw(ar), arg.chanctx_conf); 8774 8775 exit: 8776 rcu_read_unlock(); 8777 8778 kfree(tb); 8779 } 8780 8781 static void ath12k_tm_wmi_event_segmented(struct ath12k_base *ab, u32 cmd_id, 8782 struct sk_buff *skb) 8783 { 8784 const struct ath12k_wmi_ftm_event *ev; 8785 const void **tb; 8786 int ret; 8787 u16 length; 8788 8789 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8790 8791 if (IS_ERR(tb)) { 8792 ret = PTR_ERR(tb); 8793 ath12k_warn(ab, "failed to parse ftm event tlv: %d\n", ret); 8794 return; 8795 } 8796 8797 ev = tb[WMI_TAG_ARRAY_BYTE]; 8798 if (!ev) { 8799 ath12k_warn(ab, "failed to fetch ftm msg\n"); 8800 kfree(tb); 8801 return; 8802 } 8803 8804 length = skb->len - TLV_HDR_SIZE; 8805 ath12k_tm_process_event(ab, cmd_id, ev, length); 8806 kfree(tb); 8807 tb = NULL; 8808 } 8809 8810 static void 8811 ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab, 8812 struct sk_buff *skb) 8813 { 8814 struct ath12k *ar; 8815 struct wmi_pdev_temperature_event ev = {}; 8816 8817 if (ath12k_pull_pdev_temp_ev(ab, skb, &ev) != 0) { 8818 ath12k_warn(ab, "failed to extract pdev temperature event"); 8819 return; 8820 } 8821 8822 ath12k_dbg(ab, ATH12K_DBG_WMI, 8823 "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id); 8824 8825 rcu_read_lock(); 8826 8827 ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev.pdev_id)); 8828 if (!ar) { 8829 ath12k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id); 8830 goto exit; 8831 } 8832 8833 exit: 8834 rcu_read_unlock(); 8835 } 8836 8837 static void ath12k_fils_discovery_event(struct ath12k_base *ab, 8838 struct sk_buff *skb) 8839 { 8840 const void **tb; 8841 const struct wmi_fils_discovery_event *ev; 8842 int ret; 8843 8844 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8845 if (IS_ERR(tb)) { 8846 ret = PTR_ERR(tb); 8847 ath12k_warn(ab, 8848 "failed to parse FILS discovery event tlv %d\n", 8849 ret); 8850 return; 8851 } 8852 8853 ev = tb[WMI_TAG_HOST_SWFDA_EVENT]; 8854 if (!ev) { 8855 ath12k_warn(ab, "failed to fetch FILS discovery event\n"); 8856 kfree(tb); 8857 return; 8858 } 8859 8860 ath12k_warn(ab, 8861 "FILS discovery frame expected from host for vdev_id: %u, transmission scheduled at %u, next TBTT: %u\n", 8862 ev->vdev_id, ev->fils_tt, ev->tbtt); 8863 8864 kfree(tb); 8865 } 8866 8867 static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab, 8868 struct sk_buff *skb) 8869 { 8870 const void **tb; 8871 const struct wmi_probe_resp_tx_status_event *ev; 8872 int ret; 8873 8874 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8875 if (IS_ERR(tb)) { 8876 ret = PTR_ERR(tb); 8877 ath12k_warn(ab, 8878 "failed to parse probe response transmission status event tlv: %d\n", 8879 ret); 8880 return; 8881 } 8882 8883 ev = tb[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT]; 8884 if (!ev) { 8885 ath12k_warn(ab, 8886 "failed to fetch probe response transmission status event"); 8887 kfree(tb); 8888 return; 8889 } 8890 8891 if (ev->tx_status) 8892 ath12k_warn(ab, 8893 "Probe response transmission failed for vdev_id %u, status %u\n", 8894 ev->vdev_id, ev->tx_status); 8895 8896 kfree(tb); 8897 } 8898 8899 static int ath12k_wmi_p2p_noa_event(struct ath12k_base *ab, 8900 struct sk_buff *skb) 8901 { 8902 const void **tb; 8903 const struct wmi_p2p_noa_event *ev; 8904 const struct ath12k_wmi_p2p_noa_info *noa; 8905 struct ath12k *ar; 8906 int ret, vdev_id; 8907 8908 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8909 if (IS_ERR(tb)) { 8910 ret = PTR_ERR(tb); 8911 ath12k_warn(ab, "failed to parse P2P NoA TLV: %d\n", ret); 8912 return ret; 8913 } 8914 8915 ev = tb[WMI_TAG_P2P_NOA_EVENT]; 8916 noa = tb[WMI_TAG_P2P_NOA_INFO]; 8917 8918 if (!ev || !noa) { 8919 ret = -EPROTO; 8920 goto out; 8921 } 8922 8923 vdev_id = __le32_to_cpu(ev->vdev_id); 8924 8925 ath12k_dbg(ab, ATH12K_DBG_WMI, 8926 "wmi tlv p2p noa vdev_id %i descriptors %u\n", 8927 vdev_id, le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_DESC_NUM)); 8928 8929 rcu_read_lock(); 8930 ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id); 8931 if (!ar) { 8932 ath12k_warn(ab, "invalid vdev id %d in P2P NoA event\n", 8933 vdev_id); 8934 ret = -EINVAL; 8935 goto unlock; 8936 } 8937 8938 ath12k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa); 8939 8940 ret = 0; 8941 8942 unlock: 8943 rcu_read_unlock(); 8944 out: 8945 kfree(tb); 8946 return ret; 8947 } 8948 8949 static void ath12k_rfkill_state_change_event(struct ath12k_base *ab, 8950 struct sk_buff *skb) 8951 { 8952 const struct wmi_rfkill_state_change_event *ev; 8953 const void **tb; 8954 int ret; 8955 8956 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8957 if (IS_ERR(tb)) { 8958 ret = PTR_ERR(tb); 8959 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 8960 return; 8961 } 8962 8963 ev = tb[WMI_TAG_RFKILL_EVENT]; 8964 if (!ev) { 8965 kfree(tb); 8966 return; 8967 } 8968 8969 ath12k_dbg(ab, ATH12K_DBG_MAC, 8970 "wmi tlv rfkill state change gpio %d type %d radio_state %d\n", 8971 le32_to_cpu(ev->gpio_pin_num), 8972 le32_to_cpu(ev->int_type), 8973 le32_to_cpu(ev->radio_state)); 8974 8975 spin_lock_bh(&ab->base_lock); 8976 ab->rfkill_radio_on = (ev->radio_state == cpu_to_le32(WMI_RFKILL_RADIO_STATE_ON)); 8977 spin_unlock_bh(&ab->base_lock); 8978 8979 queue_work(ab->workqueue, &ab->rfkill_work); 8980 kfree(tb); 8981 } 8982 8983 static void 8984 ath12k_wmi_diag_event(struct ath12k_base *ab, struct sk_buff *skb) 8985 { 8986 trace_ath12k_wmi_diag(ab, skb->data, skb->len); 8987 } 8988 8989 static void ath12k_wmi_twt_enable_event(struct ath12k_base *ab, 8990 struct sk_buff *skb) 8991 { 8992 const void **tb; 8993 const struct wmi_twt_enable_event *ev; 8994 int ret; 8995 8996 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8997 if (IS_ERR(tb)) { 8998 ret = PTR_ERR(tb); 8999 ath12k_warn(ab, "failed to parse wmi twt enable status event tlv: %d\n", 9000 ret); 9001 return; 9002 } 9003 9004 ev = tb[WMI_TAG_TWT_ENABLE_COMPLETE_EVENT]; 9005 if (!ev) { 9006 ath12k_warn(ab, "failed to fetch twt enable wmi event\n"); 9007 goto exit; 9008 } 9009 9010 ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt enable event pdev id %u status %u\n", 9011 le32_to_cpu(ev->pdev_id), 9012 le32_to_cpu(ev->status)); 9013 9014 exit: 9015 kfree(tb); 9016 } 9017 9018 static void ath12k_wmi_twt_disable_event(struct ath12k_base *ab, 9019 struct sk_buff *skb) 9020 { 9021 const void **tb; 9022 const struct wmi_twt_disable_event *ev; 9023 int ret; 9024 9025 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 9026 if (IS_ERR(tb)) { 9027 ret = PTR_ERR(tb); 9028 ath12k_warn(ab, "failed to parse wmi twt disable status event tlv: %d\n", 9029 ret); 9030 return; 9031 } 9032 9033 ev = tb[WMI_TAG_TWT_DISABLE_COMPLETE_EVENT]; 9034 if (!ev) { 9035 ath12k_warn(ab, "failed to fetch twt disable wmi event\n"); 9036 goto exit; 9037 } 9038 9039 ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt disable event pdev id %d status %u\n", 9040 le32_to_cpu(ev->pdev_id), 9041 le32_to_cpu(ev->status)); 9042 9043 exit: 9044 kfree(tb); 9045 } 9046 9047 static int ath12k_wmi_wow_wakeup_host_parse(struct ath12k_base *ab, 9048 u16 tag, u16 len, 9049 const void *ptr, void *data) 9050 { 9051 const struct wmi_wow_ev_pg_fault_param *pf_param; 9052 const struct wmi_wow_ev_param *param; 9053 struct wmi_wow_ev_arg *arg = data; 9054 int pf_len; 9055 9056 switch (tag) { 9057 case WMI_TAG_WOW_EVENT_INFO: 9058 param = ptr; 9059 arg->wake_reason = le32_to_cpu(param->wake_reason); 9060 ath12k_dbg(ab, ATH12K_DBG_WMI, "wow wakeup host reason %d %s\n", 9061 arg->wake_reason, wow_reason(arg->wake_reason)); 9062 break; 9063 9064 case WMI_TAG_ARRAY_BYTE: 9065 if (arg && arg->wake_reason == WOW_REASON_PAGE_FAULT) { 9066 pf_param = ptr; 9067 pf_len = le32_to_cpu(pf_param->len); 9068 if (pf_len > len - sizeof(pf_len) || 9069 pf_len < 0) { 9070 ath12k_warn(ab, "invalid wo reason page fault buffer len %d\n", 9071 pf_len); 9072 return -EINVAL; 9073 } 9074 ath12k_dbg(ab, ATH12K_DBG_WMI, "wow_reason_page_fault len %d\n", 9075 pf_len); 9076 ath12k_dbg_dump(ab, ATH12K_DBG_WMI, 9077 "wow_reason_page_fault packet present", 9078 "wow_pg_fault ", 9079 pf_param->data, 9080 pf_len); 9081 } 9082 break; 9083 default: 9084 break; 9085 } 9086 9087 return 0; 9088 } 9089 9090 static void ath12k_wmi_event_wow_wakeup_host(struct ath12k_base *ab, struct sk_buff *skb) 9091 { 9092 struct wmi_wow_ev_arg arg = { }; 9093 int ret; 9094 9095 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 9096 ath12k_wmi_wow_wakeup_host_parse, 9097 &arg); 9098 if (ret) { 9099 ath12k_warn(ab, "failed to parse wmi wow wakeup host event tlv: %d\n", 9100 ret); 9101 return; 9102 } 9103 9104 complete(&ab->wow.wakeup_completed); 9105 } 9106 9107 static void ath12k_wmi_gtk_offload_status_event(struct ath12k_base *ab, 9108 struct sk_buff *skb) 9109 { 9110 const struct wmi_gtk_offload_status_event *ev; 9111 struct ath12k_link_vif *arvif; 9112 __be64 replay_ctr_be; 9113 u64 replay_ctr; 9114 const void **tb; 9115 int ret; 9116 9117 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 9118 if (IS_ERR(tb)) { 9119 ret = PTR_ERR(tb); 9120 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 9121 return; 9122 } 9123 9124 ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT]; 9125 if (!ev) { 9126 ath12k_warn(ab, "failed to fetch gtk offload status ev"); 9127 kfree(tb); 9128 return; 9129 } 9130 9131 rcu_read_lock(); 9132 arvif = ath12k_mac_get_arvif_by_vdev_id(ab, le32_to_cpu(ev->vdev_id)); 9133 if (!arvif) { 9134 rcu_read_unlock(); 9135 ath12k_warn(ab, "failed to get arvif for vdev_id:%d\n", 9136 le32_to_cpu(ev->vdev_id)); 9137 kfree(tb); 9138 return; 9139 } 9140 9141 replay_ctr = le64_to_cpu(ev->replay_ctr); 9142 arvif->rekey_data.replay_ctr = replay_ctr; 9143 ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi gtk offload event refresh_cnt %d replay_ctr %llu\n", 9144 le32_to_cpu(ev->refresh_cnt), replay_ctr); 9145 9146 /* supplicant expects big-endian replay counter */ 9147 replay_ctr_be = cpu_to_be64(replay_ctr); 9148 9149 ieee80211_gtk_rekey_notify(arvif->ahvif->vif, arvif->bssid, 9150 (void *)&replay_ctr_be, GFP_ATOMIC); 9151 9152 rcu_read_unlock(); 9153 9154 kfree(tb); 9155 } 9156 9157 static void ath12k_wmi_event_mlo_setup_complete(struct ath12k_base *ab, 9158 struct sk_buff *skb) 9159 { 9160 const struct wmi_mlo_setup_complete_event *ev; 9161 struct ath12k *ar = NULL; 9162 struct ath12k_pdev *pdev; 9163 const void **tb; 9164 int ret, i; 9165 9166 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 9167 if (IS_ERR(tb)) { 9168 ret = PTR_ERR(tb); 9169 ath12k_warn(ab, "failed to parse mlo setup complete event tlv: %d\n", 9170 ret); 9171 return; 9172 } 9173 9174 ev = tb[WMI_TAG_MLO_SETUP_COMPLETE_EVENT]; 9175 if (!ev) { 9176 ath12k_warn(ab, "failed to fetch mlo setup complete event\n"); 9177 kfree(tb); 9178 return; 9179 } 9180 9181 if (le32_to_cpu(ev->pdev_id) > ab->num_radios) 9182 goto skip_lookup; 9183 9184 for (i = 0; i < ab->num_radios; i++) { 9185 pdev = &ab->pdevs[i]; 9186 if (pdev && pdev->pdev_id == le32_to_cpu(ev->pdev_id)) { 9187 ar = pdev->ar; 9188 break; 9189 } 9190 } 9191 9192 skip_lookup: 9193 if (!ar) { 9194 ath12k_warn(ab, "invalid pdev_id %d status %u in setup complete event\n", 9195 ev->pdev_id, ev->status); 9196 goto out; 9197 } 9198 9199 ar->mlo_setup_status = le32_to_cpu(ev->status); 9200 complete(&ar->mlo_setup_done); 9201 9202 out: 9203 kfree(tb); 9204 } 9205 9206 static void ath12k_wmi_event_teardown_complete(struct ath12k_base *ab, 9207 struct sk_buff *skb) 9208 { 9209 const struct wmi_mlo_teardown_complete_event *ev; 9210 const void **tb; 9211 int ret; 9212 9213 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 9214 if (IS_ERR(tb)) { 9215 ret = PTR_ERR(tb); 9216 ath12k_warn(ab, "failed to parse teardown complete event tlv: %d\n", ret); 9217 return; 9218 } 9219 9220 ev = tb[WMI_TAG_MLO_TEARDOWN_COMPLETE]; 9221 if (!ev) { 9222 ath12k_warn(ab, "failed to fetch teardown complete event\n"); 9223 kfree(tb); 9224 return; 9225 } 9226 9227 kfree(tb); 9228 } 9229 9230 #ifdef CONFIG_ATH12K_DEBUGFS 9231 static int ath12k_wmi_tpc_stats_copy_buffer(struct ath12k_base *ab, 9232 const void *ptr, u16 tag, u16 len, 9233 struct wmi_tpc_stats_arg *tpc_stats) 9234 { 9235 u32 len1, len2, len3, len4; 9236 s16 *dst_ptr; 9237 s8 *dst_ptr_ctl; 9238 9239 len1 = le32_to_cpu(tpc_stats->max_reg_allowed_power.tpc_reg_pwr.reg_array_len); 9240 len2 = le32_to_cpu(tpc_stats->rates_array1.tpc_rates_array.rate_array_len); 9241 len3 = le32_to_cpu(tpc_stats->rates_array2.tpc_rates_array.rate_array_len); 9242 len4 = le32_to_cpu(tpc_stats->ctl_array.tpc_ctl_pwr.ctl_array_len); 9243 9244 switch (tpc_stats->event_count) { 9245 case ATH12K_TPC_STATS_CONFIG_REG_PWR_EVENT: 9246 if (len1 > len) 9247 return -ENOBUFS; 9248 9249 if (tpc_stats->tlvs_rcvd & WMI_TPC_REG_PWR_ALLOWED) { 9250 dst_ptr = tpc_stats->max_reg_allowed_power.reg_pwr_array; 9251 memcpy(dst_ptr, ptr, len1); 9252 } 9253 break; 9254 case ATH12K_TPC_STATS_RATES_EVENT1: 9255 if (len2 > len) 9256 return -ENOBUFS; 9257 9258 if (tpc_stats->tlvs_rcvd & WMI_TPC_RATES_ARRAY1) { 9259 dst_ptr = tpc_stats->rates_array1.rate_array; 9260 memcpy(dst_ptr, ptr, len2); 9261 } 9262 break; 9263 case ATH12K_TPC_STATS_RATES_EVENT2: 9264 if (len3 > len) 9265 return -ENOBUFS; 9266 9267 if (tpc_stats->tlvs_rcvd & WMI_TPC_RATES_ARRAY2) { 9268 dst_ptr = tpc_stats->rates_array2.rate_array; 9269 memcpy(dst_ptr, ptr, len3); 9270 } 9271 break; 9272 case ATH12K_TPC_STATS_CTL_TABLE_EVENT: 9273 if (len4 > len) 9274 return -ENOBUFS; 9275 9276 if (tpc_stats->tlvs_rcvd & WMI_TPC_CTL_PWR_ARRAY) { 9277 dst_ptr_ctl = tpc_stats->ctl_array.ctl_pwr_table; 9278 memcpy(dst_ptr_ctl, ptr, len4); 9279 } 9280 break; 9281 } 9282 return 0; 9283 } 9284 9285 static int ath12k_tpc_get_reg_pwr(struct ath12k_base *ab, 9286 struct wmi_tpc_stats_arg *tpc_stats, 9287 struct wmi_max_reg_power_fixed_params *ev) 9288 { 9289 struct wmi_max_reg_power_allowed_arg *reg_pwr; 9290 u32 total_size; 9291 9292 ath12k_dbg(ab, ATH12K_DBG_WMI, 9293 "Received reg power array type %d length %d for tpc stats\n", 9294 ev->reg_power_type, ev->reg_array_len); 9295 9296 switch (le32_to_cpu(ev->reg_power_type)) { 9297 case TPC_STATS_REG_PWR_ALLOWED_TYPE: 9298 reg_pwr = &tpc_stats->max_reg_allowed_power; 9299 break; 9300 default: 9301 return -EINVAL; 9302 } 9303 9304 /* Each entry is 2 byte hence multiplying the indices with 2 */ 9305 total_size = le32_to_cpu(ev->d1) * le32_to_cpu(ev->d2) * 9306 le32_to_cpu(ev->d3) * le32_to_cpu(ev->d4) * 2; 9307 if (le32_to_cpu(ev->reg_array_len) != total_size) { 9308 ath12k_warn(ab, 9309 "Total size and reg_array_len doesn't match for tpc stats\n"); 9310 return -EINVAL; 9311 } 9312 9313 memcpy(®_pwr->tpc_reg_pwr, ev, sizeof(struct wmi_max_reg_power_fixed_params)); 9314 9315 reg_pwr->reg_pwr_array = kzalloc(le32_to_cpu(reg_pwr->tpc_reg_pwr.reg_array_len), 9316 GFP_ATOMIC); 9317 if (!reg_pwr->reg_pwr_array) 9318 return -ENOMEM; 9319 9320 tpc_stats->tlvs_rcvd |= WMI_TPC_REG_PWR_ALLOWED; 9321 9322 return 0; 9323 } 9324 9325 static int ath12k_tpc_get_rate_array(struct ath12k_base *ab, 9326 struct wmi_tpc_stats_arg *tpc_stats, 9327 struct wmi_tpc_rates_array_fixed_params *ev) 9328 { 9329 struct wmi_tpc_rates_array_arg *rates_array; 9330 u32 flag = 0, rate_array_len; 9331 9332 ath12k_dbg(ab, ATH12K_DBG_WMI, 9333 "Received rates array type %d length %d for tpc stats\n", 9334 ev->rate_array_type, ev->rate_array_len); 9335 9336 switch (le32_to_cpu(ev->rate_array_type)) { 9337 case ATH12K_TPC_STATS_RATES_ARRAY1: 9338 rates_array = &tpc_stats->rates_array1; 9339 flag = WMI_TPC_RATES_ARRAY1; 9340 break; 9341 case ATH12K_TPC_STATS_RATES_ARRAY2: 9342 rates_array = &tpc_stats->rates_array2; 9343 flag = WMI_TPC_RATES_ARRAY2; 9344 break; 9345 default: 9346 ath12k_warn(ab, 9347 "Received invalid type of rates array for tpc stats\n"); 9348 return -EINVAL; 9349 } 9350 memcpy(&rates_array->tpc_rates_array, ev, 9351 sizeof(struct wmi_tpc_rates_array_fixed_params)); 9352 rate_array_len = le32_to_cpu(rates_array->tpc_rates_array.rate_array_len); 9353 rates_array->rate_array = kzalloc(rate_array_len, GFP_ATOMIC); 9354 if (!rates_array->rate_array) 9355 return -ENOMEM; 9356 9357 tpc_stats->tlvs_rcvd |= flag; 9358 return 0; 9359 } 9360 9361 static int ath12k_tpc_get_ctl_pwr_tbl(struct ath12k_base *ab, 9362 struct wmi_tpc_stats_arg *tpc_stats, 9363 struct wmi_tpc_ctl_pwr_fixed_params *ev) 9364 { 9365 struct wmi_tpc_ctl_pwr_table_arg *ctl_array; 9366 u32 total_size, ctl_array_len, flag = 0; 9367 9368 ath12k_dbg(ab, ATH12K_DBG_WMI, 9369 "Received ctl array type %d length %d for tpc stats\n", 9370 ev->ctl_array_type, ev->ctl_array_len); 9371 9372 switch (le32_to_cpu(ev->ctl_array_type)) { 9373 case ATH12K_TPC_STATS_CTL_ARRAY: 9374 ctl_array = &tpc_stats->ctl_array; 9375 flag = WMI_TPC_CTL_PWR_ARRAY; 9376 break; 9377 default: 9378 ath12k_warn(ab, 9379 "Received invalid type of ctl pwr table for tpc stats\n"); 9380 return -EINVAL; 9381 } 9382 9383 total_size = le32_to_cpu(ev->d1) * le32_to_cpu(ev->d2) * 9384 le32_to_cpu(ev->d3) * le32_to_cpu(ev->d4); 9385 if (le32_to_cpu(ev->ctl_array_len) != total_size) { 9386 ath12k_warn(ab, 9387 "Total size and ctl_array_len doesn't match for tpc stats\n"); 9388 return -EINVAL; 9389 } 9390 9391 memcpy(&ctl_array->tpc_ctl_pwr, ev, sizeof(struct wmi_tpc_ctl_pwr_fixed_params)); 9392 ctl_array_len = le32_to_cpu(ctl_array->tpc_ctl_pwr.ctl_array_len); 9393 ctl_array->ctl_pwr_table = kzalloc(ctl_array_len, GFP_ATOMIC); 9394 if (!ctl_array->ctl_pwr_table) 9395 return -ENOMEM; 9396 9397 tpc_stats->tlvs_rcvd |= flag; 9398 return 0; 9399 } 9400 9401 static int ath12k_wmi_tpc_stats_subtlv_parser(struct ath12k_base *ab, 9402 u16 tag, u16 len, 9403 const void *ptr, void *data) 9404 { 9405 struct wmi_tpc_rates_array_fixed_params *tpc_rates_array; 9406 struct wmi_max_reg_power_fixed_params *tpc_reg_pwr; 9407 struct wmi_tpc_ctl_pwr_fixed_params *tpc_ctl_pwr; 9408 struct wmi_tpc_stats_arg *tpc_stats = data; 9409 struct wmi_tpc_config_params *tpc_config; 9410 int ret = 0; 9411 9412 if (!tpc_stats) { 9413 ath12k_warn(ab, "tpc stats memory unavailable\n"); 9414 return -EINVAL; 9415 } 9416 9417 switch (tag) { 9418 case WMI_TAG_TPC_STATS_CONFIG_EVENT: 9419 tpc_config = (struct wmi_tpc_config_params *)ptr; 9420 memcpy(&tpc_stats->tpc_config, tpc_config, 9421 sizeof(struct wmi_tpc_config_params)); 9422 break; 9423 case WMI_TAG_TPC_STATS_REG_PWR_ALLOWED: 9424 tpc_reg_pwr = (struct wmi_max_reg_power_fixed_params *)ptr; 9425 ret = ath12k_tpc_get_reg_pwr(ab, tpc_stats, tpc_reg_pwr); 9426 break; 9427 case WMI_TAG_TPC_STATS_RATES_ARRAY: 9428 tpc_rates_array = (struct wmi_tpc_rates_array_fixed_params *)ptr; 9429 ret = ath12k_tpc_get_rate_array(ab, tpc_stats, tpc_rates_array); 9430 break; 9431 case WMI_TAG_TPC_STATS_CTL_PWR_TABLE_EVENT: 9432 tpc_ctl_pwr = (struct wmi_tpc_ctl_pwr_fixed_params *)ptr; 9433 ret = ath12k_tpc_get_ctl_pwr_tbl(ab, tpc_stats, tpc_ctl_pwr); 9434 break; 9435 default: 9436 ath12k_warn(ab, 9437 "Received invalid tag for tpc stats in subtlvs\n"); 9438 return -EINVAL; 9439 } 9440 return ret; 9441 } 9442 9443 static int ath12k_wmi_tpc_stats_event_parser(struct ath12k_base *ab, 9444 u16 tag, u16 len, 9445 const void *ptr, void *data) 9446 { 9447 struct wmi_tpc_stats_arg *tpc_stats = (struct wmi_tpc_stats_arg *)data; 9448 int ret; 9449 9450 switch (tag) { 9451 case WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM: 9452 ret = 0; 9453 /* Fixed param is already processed*/ 9454 break; 9455 case WMI_TAG_ARRAY_STRUCT: 9456 /* len 0 is expected for array of struct when there 9457 * is no content of that type to pack inside that tlv 9458 */ 9459 if (len == 0) 9460 return 0; 9461 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 9462 ath12k_wmi_tpc_stats_subtlv_parser, 9463 tpc_stats); 9464 break; 9465 case WMI_TAG_ARRAY_INT16: 9466 if (len == 0) 9467 return 0; 9468 ret = ath12k_wmi_tpc_stats_copy_buffer(ab, ptr, 9469 WMI_TAG_ARRAY_INT16, 9470 len, tpc_stats); 9471 break; 9472 case WMI_TAG_ARRAY_BYTE: 9473 if (len == 0) 9474 return 0; 9475 ret = ath12k_wmi_tpc_stats_copy_buffer(ab, ptr, 9476 WMI_TAG_ARRAY_BYTE, 9477 len, tpc_stats); 9478 break; 9479 default: 9480 ath12k_warn(ab, "Received invalid tag for tpc stats\n"); 9481 ret = -EINVAL; 9482 break; 9483 } 9484 return ret; 9485 } 9486 9487 void ath12k_wmi_free_tpc_stats_mem(struct ath12k *ar) 9488 { 9489 struct wmi_tpc_stats_arg *tpc_stats = ar->debug.tpc_stats; 9490 9491 lockdep_assert_held(&ar->data_lock); 9492 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "tpc stats mem free\n"); 9493 if (tpc_stats) { 9494 kfree(tpc_stats->max_reg_allowed_power.reg_pwr_array); 9495 kfree(tpc_stats->rates_array1.rate_array); 9496 kfree(tpc_stats->rates_array2.rate_array); 9497 kfree(tpc_stats->ctl_array.ctl_pwr_table); 9498 kfree(tpc_stats); 9499 ar->debug.tpc_stats = NULL; 9500 } 9501 } 9502 9503 static void ath12k_wmi_process_tpc_stats(struct ath12k_base *ab, 9504 struct sk_buff *skb) 9505 { 9506 struct ath12k_wmi_pdev_tpc_stats_event_fixed_params *fixed_param; 9507 struct wmi_tpc_stats_arg *tpc_stats; 9508 const struct wmi_tlv *tlv; 9509 void *ptr = skb->data; 9510 struct ath12k *ar; 9511 u16 tlv_tag; 9512 u32 event_count; 9513 int ret; 9514 9515 if (!skb->data) { 9516 ath12k_warn(ab, "No data present in tpc stats event\n"); 9517 return; 9518 } 9519 9520 if (skb->len < (sizeof(*fixed_param) + TLV_HDR_SIZE)) { 9521 ath12k_warn(ab, "TPC stats event size invalid\n"); 9522 return; 9523 } 9524 9525 tlv = (struct wmi_tlv *)ptr; 9526 tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG); 9527 ptr += sizeof(*tlv); 9528 9529 if (tlv_tag != WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM) { 9530 ath12k_warn(ab, "TPC stats without fixed param tlv at start\n"); 9531 return; 9532 } 9533 9534 fixed_param = (struct ath12k_wmi_pdev_tpc_stats_event_fixed_params *)ptr; 9535 rcu_read_lock(); 9536 ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(fixed_param->pdev_id) + 1); 9537 if (!ar) { 9538 ath12k_warn(ab, "Failed to get ar for tpc stats\n"); 9539 rcu_read_unlock(); 9540 return; 9541 } 9542 spin_lock_bh(&ar->data_lock); 9543 if (!ar->debug.tpc_request) { 9544 /* Event is received either without request or the 9545 * timeout, if memory is already allocated free it 9546 */ 9547 if (ar->debug.tpc_stats) { 9548 ath12k_warn(ab, "Freeing memory for tpc_stats\n"); 9549 ath12k_wmi_free_tpc_stats_mem(ar); 9550 } 9551 goto unlock; 9552 } 9553 9554 event_count = le32_to_cpu(fixed_param->event_count); 9555 if (event_count == 0) { 9556 if (ar->debug.tpc_stats) { 9557 ath12k_warn(ab, 9558 "Invalid tpc memory present\n"); 9559 goto unlock; 9560 } 9561 ar->debug.tpc_stats = 9562 kzalloc(sizeof(struct wmi_tpc_stats_arg), 9563 GFP_ATOMIC); 9564 if (!ar->debug.tpc_stats) { 9565 ath12k_warn(ab, 9566 "Failed to allocate memory for tpc stats\n"); 9567 goto unlock; 9568 } 9569 } 9570 9571 tpc_stats = ar->debug.tpc_stats; 9572 if (!tpc_stats) { 9573 ath12k_warn(ab, "tpc stats memory unavailable\n"); 9574 goto unlock; 9575 } 9576 9577 if (!(event_count == 0)) { 9578 if (event_count != tpc_stats->event_count + 1) { 9579 ath12k_warn(ab, 9580 "Invalid tpc event received\n"); 9581 goto unlock; 9582 } 9583 } 9584 tpc_stats->pdev_id = le32_to_cpu(fixed_param->pdev_id); 9585 tpc_stats->end_of_event = le32_to_cpu(fixed_param->end_of_event); 9586 tpc_stats->event_count = le32_to_cpu(fixed_param->event_count); 9587 ath12k_dbg(ab, ATH12K_DBG_WMI, 9588 "tpc stats event_count %d\n", 9589 tpc_stats->event_count); 9590 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 9591 ath12k_wmi_tpc_stats_event_parser, 9592 tpc_stats); 9593 if (ret) { 9594 ath12k_wmi_free_tpc_stats_mem(ar); 9595 ath12k_warn(ab, "failed to parse tpc_stats tlv: %d\n", ret); 9596 goto unlock; 9597 } 9598 9599 if (tpc_stats->end_of_event) 9600 complete(&ar->debug.tpc_complete); 9601 9602 unlock: 9603 spin_unlock_bh(&ar->data_lock); 9604 rcu_read_unlock(); 9605 } 9606 #else 9607 static void ath12k_wmi_process_tpc_stats(struct ath12k_base *ab, 9608 struct sk_buff *skb) 9609 { 9610 } 9611 #endif 9612 9613 static int 9614 ath12k_wmi_rssi_dbm_conv_info_evt_subtlv_parser(struct ath12k_base *ab, 9615 u16 tag, u16 len, 9616 const void *ptr, void *data) 9617 { 9618 const struct ath12k_wmi_rssi_dbm_conv_temp_info_params *temp_info; 9619 const struct ath12k_wmi_rssi_dbm_conv_info_params *param_info; 9620 struct ath12k_wmi_rssi_dbm_conv_info_arg *rssi_info = data; 9621 struct ath12k_wmi_rssi_dbm_conv_param_arg param_arg; 9622 s32 nf_hw_dbm[ATH12K_MAX_NUM_NF_HW_DBM]; 9623 u8 num_20mhz_segments; 9624 s8 min_nf, *nf_ptr; 9625 int i, j; 9626 9627 switch (tag) { 9628 case WMI_TAG_RSSI_DBM_CONVERSION_PARAMS_INFO: 9629 if (len < sizeof(*param_info)) { 9630 ath12k_warn(ab, 9631 "RSSI dbm conv subtlv 0x%x invalid len %d rcvd", 9632 tag, len); 9633 return -EINVAL; 9634 } 9635 9636 param_info = ptr; 9637 9638 param_arg.curr_bw = le32_to_cpu(param_info->curr_bw); 9639 param_arg.curr_rx_chainmask = le32_to_cpu(param_info->curr_rx_chainmask); 9640 9641 /* The received array is actually a 2D byte-array for per chain, 9642 * per 20MHz subband. Convert to 2D byte-array 9643 */ 9644 nf_ptr = ¶m_arg.nf_hw_dbm[0][0]; 9645 9646 for (i = 0; i < ATH12K_MAX_NUM_NF_HW_DBM; i++) { 9647 nf_hw_dbm[i] = a_sle32_to_cpu(param_info->nf_hw_dbm[i]); 9648 9649 for (j = 0; j < 4; j++) { 9650 *nf_ptr = (nf_hw_dbm[i] >> (j * 8)) & 0xFF; 9651 nf_ptr++; 9652 } 9653 } 9654 9655 switch (param_arg.curr_bw) { 9656 case WMI_CHAN_WIDTH_20: 9657 num_20mhz_segments = 1; 9658 break; 9659 case WMI_CHAN_WIDTH_40: 9660 num_20mhz_segments = 2; 9661 break; 9662 case WMI_CHAN_WIDTH_80: 9663 num_20mhz_segments = 4; 9664 break; 9665 case WMI_CHAN_WIDTH_160: 9666 num_20mhz_segments = 8; 9667 break; 9668 case WMI_CHAN_WIDTH_320: 9669 num_20mhz_segments = 16; 9670 break; 9671 default: 9672 ath12k_warn(ab, "Invalid current bandwidth %d in RSSI dbm event", 9673 param_arg.curr_bw); 9674 /* In error case, still consider the primary 20 MHz segment since 9675 * that would be much better than instead of dropping the whole 9676 * event 9677 */ 9678 num_20mhz_segments = 1; 9679 } 9680 9681 min_nf = ATH12K_DEFAULT_NOISE_FLOOR; 9682 9683 for (i = 0; i < ATH12K_MAX_NUM_ANTENNA; i++) { 9684 if (!(param_arg.curr_rx_chainmask & BIT(i))) 9685 continue; 9686 9687 for (j = 0; j < num_20mhz_segments; j++) { 9688 if (param_arg.nf_hw_dbm[i][j] < min_nf) 9689 min_nf = param_arg.nf_hw_dbm[i][j]; 9690 } 9691 } 9692 9693 rssi_info->min_nf_dbm = min_nf; 9694 rssi_info->nf_dbm_present = true; 9695 break; 9696 case WMI_TAG_RSSI_DBM_CONVERSION_TEMP_OFFSET_INFO: 9697 if (len < sizeof(*temp_info)) { 9698 ath12k_warn(ab, 9699 "RSSI dbm conv subtlv 0x%x invalid len %d rcvd", 9700 tag, len); 9701 return -EINVAL; 9702 } 9703 9704 temp_info = ptr; 9705 rssi_info->temp_offset = a_sle32_to_cpu(temp_info->offset); 9706 rssi_info->temp_offset_present = true; 9707 break; 9708 default: 9709 ath12k_dbg(ab, ATH12K_DBG_WMI, 9710 "Unknown subtlv 0x%x in RSSI dbm conversion event\n", tag); 9711 } 9712 9713 return 0; 9714 } 9715 9716 static int 9717 ath12k_wmi_rssi_dbm_conv_info_event_parser(struct ath12k_base *ab, 9718 u16 tag, u16 len, 9719 const void *ptr, void *data) 9720 { 9721 int ret = 0; 9722 9723 switch (tag) { 9724 case WMI_TAG_RSSI_DBM_CONVERSION_PARAMS_INFO_FIXED_PARAM: 9725 /* Fixed param is already processed*/ 9726 break; 9727 case WMI_TAG_ARRAY_STRUCT: 9728 /* len 0 is expected for array of struct when there 9729 * is no content of that type inside that tlv 9730 */ 9731 if (len == 0) 9732 return 0; 9733 9734 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 9735 ath12k_wmi_rssi_dbm_conv_info_evt_subtlv_parser, 9736 data); 9737 break; 9738 default: 9739 ath12k_dbg(ab, ATH12K_DBG_WMI, 9740 "Received invalid tag 0x%x for RSSI dbm conv info event\n", 9741 tag); 9742 break; 9743 } 9744 9745 return ret; 9746 } 9747 9748 static int 9749 ath12k_wmi_rssi_dbm_conv_info_process_fixed_param(struct ath12k_base *ab, u8 *ptr, 9750 size_t len, int *pdev_id) 9751 { 9752 struct ath12k_wmi_rssi_dbm_conv_info_fixed_params *fixed_param; 9753 const struct wmi_tlv *tlv; 9754 u16 tlv_tag; 9755 9756 if (len < (sizeof(*fixed_param) + TLV_HDR_SIZE)) { 9757 ath12k_warn(ab, "invalid RSSI dbm conv event size %zu\n", len); 9758 return -EINVAL; 9759 } 9760 9761 tlv = (struct wmi_tlv *)ptr; 9762 tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG); 9763 ptr += sizeof(*tlv); 9764 9765 if (tlv_tag != WMI_TAG_RSSI_DBM_CONVERSION_PARAMS_INFO_FIXED_PARAM) { 9766 ath12k_warn(ab, "RSSI dbm conv event received without fixed param tlv\n"); 9767 return -EINVAL; 9768 } 9769 9770 fixed_param = (struct ath12k_wmi_rssi_dbm_conv_info_fixed_params *)ptr; 9771 *pdev_id = le32_to_cpu(fixed_param->pdev_id); 9772 9773 return 0; 9774 } 9775 9776 static void 9777 ath12k_wmi_update_rssi_offsets(struct ath12k *ar, 9778 struct ath12k_wmi_rssi_dbm_conv_info_arg *rssi_info) 9779 { 9780 struct ath12k_pdev_rssi_offsets *info = &ar->rssi_info; 9781 9782 lockdep_assert_held(&ar->data_lock); 9783 9784 if (rssi_info->temp_offset_present) 9785 info->temp_offset = rssi_info->temp_offset; 9786 9787 if (rssi_info->nf_dbm_present) 9788 info->min_nf_dbm = rssi_info->min_nf_dbm; 9789 9790 info->noise_floor = info->min_nf_dbm + info->temp_offset; 9791 } 9792 9793 static void 9794 ath12k_wmi_rssi_dbm_conversion_params_info_event(struct ath12k_base *ab, 9795 struct sk_buff *skb) 9796 { 9797 struct ath12k_wmi_rssi_dbm_conv_info_arg rssi_info; 9798 struct ath12k *ar; 9799 s32 noise_floor; 9800 u32 pdev_id; 9801 int ret; 9802 9803 ret = ath12k_wmi_rssi_dbm_conv_info_process_fixed_param(ab, skb->data, skb->len, 9804 &pdev_id); 9805 if (ret) { 9806 ath12k_warn(ab, "failed to parse fixed param in RSSI dbm conv event: %d\n", 9807 ret); 9808 return; 9809 } 9810 9811 rcu_read_lock(); 9812 ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id); 9813 /* If pdev is not active, ignore the event */ 9814 if (!ar) 9815 goto out_unlock; 9816 9817 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 9818 ath12k_wmi_rssi_dbm_conv_info_event_parser, 9819 &rssi_info); 9820 if (ret) { 9821 ath12k_warn(ab, "unable to parse RSSI dbm conversion event\n"); 9822 goto out_unlock; 9823 } 9824 9825 spin_lock_bh(&ar->data_lock); 9826 ath12k_wmi_update_rssi_offsets(ar, &rssi_info); 9827 noise_floor = ath12k_pdev_get_noise_floor(ar); 9828 spin_unlock_bh(&ar->data_lock); 9829 9830 ath12k_dbg(ab, ATH12K_DBG_WMI, 9831 "RSSI noise floor updated, new value is %d dbm\n", noise_floor); 9832 out_unlock: 9833 rcu_read_unlock(); 9834 } 9835 9836 static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb) 9837 { 9838 struct wmi_cmd_hdr *cmd_hdr; 9839 enum wmi_tlv_event_id id; 9840 9841 cmd_hdr = (struct wmi_cmd_hdr *)skb->data; 9842 id = le32_get_bits(cmd_hdr->cmd_id, WMI_CMD_HDR_CMD_ID); 9843 9844 if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr))) 9845 goto out; 9846 9847 switch (id) { 9848 /* Process all the WMI events here */ 9849 case WMI_SERVICE_READY_EVENTID: 9850 ath12k_service_ready_event(ab, skb); 9851 break; 9852 case WMI_SERVICE_READY_EXT_EVENTID: 9853 ath12k_service_ready_ext_event(ab, skb); 9854 break; 9855 case WMI_SERVICE_READY_EXT2_EVENTID: 9856 ath12k_service_ready_ext2_event(ab, skb); 9857 break; 9858 case WMI_REG_CHAN_LIST_CC_EXT_EVENTID: 9859 ath12k_reg_chan_list_event(ab, skb); 9860 break; 9861 case WMI_READY_EVENTID: 9862 ath12k_ready_event(ab, skb); 9863 break; 9864 case WMI_PEER_DELETE_RESP_EVENTID: 9865 ath12k_peer_delete_resp_event(ab, skb); 9866 break; 9867 case WMI_VDEV_START_RESP_EVENTID: 9868 ath12k_vdev_start_resp_event(ab, skb); 9869 break; 9870 case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID: 9871 ath12k_bcn_tx_status_event(ab, skb); 9872 break; 9873 case WMI_VDEV_STOPPED_EVENTID: 9874 ath12k_vdev_stopped_event(ab, skb); 9875 break; 9876 case WMI_MGMT_RX_EVENTID: 9877 ath12k_mgmt_rx_event(ab, skb); 9878 /* mgmt_rx_event() owns the skb now! */ 9879 return; 9880 case WMI_MGMT_TX_COMPLETION_EVENTID: 9881 ath12k_mgmt_tx_compl_event(ab, skb); 9882 break; 9883 case WMI_SCAN_EVENTID: 9884 ath12k_scan_event(ab, skb); 9885 break; 9886 case WMI_PEER_STA_KICKOUT_EVENTID: 9887 ath12k_peer_sta_kickout_event(ab, skb); 9888 break; 9889 case WMI_ROAM_EVENTID: 9890 ath12k_roam_event(ab, skb); 9891 break; 9892 case WMI_CHAN_INFO_EVENTID: 9893 ath12k_chan_info_event(ab, skb); 9894 break; 9895 case WMI_PDEV_BSS_CHAN_INFO_EVENTID: 9896 ath12k_pdev_bss_chan_info_event(ab, skb); 9897 break; 9898 case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID: 9899 ath12k_vdev_install_key_compl_event(ab, skb); 9900 break; 9901 case WMI_SERVICE_AVAILABLE_EVENTID: 9902 ath12k_service_available_event(ab, skb); 9903 break; 9904 case WMI_PEER_ASSOC_CONF_EVENTID: 9905 ath12k_peer_assoc_conf_event(ab, skb); 9906 break; 9907 case WMI_UPDATE_STATS_EVENTID: 9908 ath12k_update_stats_event(ab, skb); 9909 break; 9910 case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID: 9911 ath12k_pdev_ctl_failsafe_check_event(ab, skb); 9912 break; 9913 case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID: 9914 ath12k_wmi_pdev_csa_switch_count_status_event(ab, skb); 9915 break; 9916 case WMI_PDEV_TEMPERATURE_EVENTID: 9917 ath12k_wmi_pdev_temperature_event(ab, skb); 9918 break; 9919 case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID: 9920 ath12k_wmi_pdev_dma_ring_buf_release_event(ab, skb); 9921 break; 9922 case WMI_HOST_FILS_DISCOVERY_EVENTID: 9923 ath12k_fils_discovery_event(ab, skb); 9924 break; 9925 case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID: 9926 ath12k_probe_resp_tx_status_event(ab, skb); 9927 break; 9928 case WMI_RFKILL_STATE_CHANGE_EVENTID: 9929 ath12k_rfkill_state_change_event(ab, skb); 9930 break; 9931 case WMI_TWT_ENABLE_EVENTID: 9932 ath12k_wmi_twt_enable_event(ab, skb); 9933 break; 9934 case WMI_TWT_DISABLE_EVENTID: 9935 ath12k_wmi_twt_disable_event(ab, skb); 9936 break; 9937 case WMI_P2P_NOA_EVENTID: 9938 ath12k_wmi_p2p_noa_event(ab, skb); 9939 break; 9940 case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID: 9941 ath12k_wmi_pdev_dfs_radar_detected_event(ab, skb); 9942 break; 9943 case WMI_VDEV_DELETE_RESP_EVENTID: 9944 ath12k_vdev_delete_resp_event(ab, skb); 9945 break; 9946 case WMI_DIAG_EVENTID: 9947 ath12k_wmi_diag_event(ab, skb); 9948 break; 9949 case WMI_WOW_WAKEUP_HOST_EVENTID: 9950 ath12k_wmi_event_wow_wakeup_host(ab, skb); 9951 break; 9952 case WMI_GTK_OFFLOAD_STATUS_EVENTID: 9953 ath12k_wmi_gtk_offload_status_event(ab, skb); 9954 break; 9955 case WMI_MLO_SETUP_COMPLETE_EVENTID: 9956 ath12k_wmi_event_mlo_setup_complete(ab, skb); 9957 break; 9958 case WMI_MLO_TEARDOWN_COMPLETE_EVENTID: 9959 ath12k_wmi_event_teardown_complete(ab, skb); 9960 break; 9961 case WMI_HALPHY_STATS_CTRL_PATH_EVENTID: 9962 ath12k_wmi_process_tpc_stats(ab, skb); 9963 break; 9964 case WMI_11D_NEW_COUNTRY_EVENTID: 9965 ath12k_reg_11d_new_cc_event(ab, skb); 9966 break; 9967 case WMI_PDEV_RSSI_DBM_CONVERSION_PARAMS_INFO_EVENTID: 9968 ath12k_wmi_rssi_dbm_conversion_params_info_event(ab, skb); 9969 break; 9970 case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID: 9971 ath12k_wmi_obss_color_collision_event(ab, skb); 9972 break; 9973 /* add Unsupported events (rare) here */ 9974 case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID: 9975 case WMI_PEER_OPER_MODE_CHANGE_EVENTID: 9976 case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID: 9977 ath12k_dbg(ab, ATH12K_DBG_WMI, 9978 "ignoring unsupported event 0x%x\n", id); 9979 break; 9980 /* add Unsupported events (frequent) here */ 9981 case WMI_PDEV_GET_HALPHY_CAL_STATUS_EVENTID: 9982 case WMI_MGMT_RX_FW_CONSUMED_EVENTID: 9983 /* debug might flood hence silently ignore (no-op) */ 9984 break; 9985 case WMI_PDEV_UTF_EVENTID: 9986 if (test_bit(ATH12K_FLAG_FTM_SEGMENTED, &ab->dev_flags)) 9987 ath12k_tm_wmi_event_segmented(ab, id, skb); 9988 else 9989 ath12k_tm_wmi_event_unsegmented(ab, id, skb); 9990 break; 9991 default: 9992 ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id); 9993 break; 9994 } 9995 9996 out: 9997 dev_kfree_skb(skb); 9998 } 9999 10000 static int ath12k_connect_pdev_htc_service(struct ath12k_base *ab, 10001 u32 pdev_idx) 10002 { 10003 int status; 10004 static const u32 svc_id[] = { 10005 ATH12K_HTC_SVC_ID_WMI_CONTROL, 10006 ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1, 10007 ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2 10008 }; 10009 struct ath12k_htc_svc_conn_req conn_req = {}; 10010 struct ath12k_htc_svc_conn_resp conn_resp = {}; 10011 10012 /* these fields are the same for all service endpoints */ 10013 conn_req.ep_ops.ep_tx_complete = ath12k_wmi_htc_tx_complete; 10014 conn_req.ep_ops.ep_rx_complete = ath12k_wmi_op_rx; 10015 conn_req.ep_ops.ep_tx_credits = ath12k_wmi_op_ep_tx_credits; 10016 10017 /* connect to control service */ 10018 conn_req.service_id = svc_id[pdev_idx]; 10019 10020 status = ath12k_htc_connect_service(&ab->htc, &conn_req, &conn_resp); 10021 if (status) { 10022 ath12k_warn(ab, "failed to connect to WMI CONTROL service status: %d\n", 10023 status); 10024 return status; 10025 } 10026 10027 ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid; 10028 ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid; 10029 ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len; 10030 10031 return 0; 10032 } 10033 10034 static int 10035 ath12k_wmi_send_unit_test_cmd(struct ath12k *ar, 10036 struct wmi_unit_test_cmd ut_cmd, 10037 u32 *test_args) 10038 { 10039 struct ath12k_wmi_pdev *wmi = ar->wmi; 10040 struct wmi_unit_test_cmd *cmd; 10041 struct sk_buff *skb; 10042 struct wmi_tlv *tlv; 10043 void *ptr; 10044 u32 *ut_cmd_args; 10045 int buf_len, arg_len; 10046 int ret; 10047 int i; 10048 10049 arg_len = sizeof(u32) * le32_to_cpu(ut_cmd.num_args); 10050 buf_len = sizeof(ut_cmd) + arg_len + TLV_HDR_SIZE; 10051 10052 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len); 10053 if (!skb) 10054 return -ENOMEM; 10055 10056 cmd = (struct wmi_unit_test_cmd *)skb->data; 10057 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_UNIT_TEST_CMD, 10058 sizeof(ut_cmd)); 10059 10060 cmd->vdev_id = ut_cmd.vdev_id; 10061 cmd->module_id = ut_cmd.module_id; 10062 cmd->num_args = ut_cmd.num_args; 10063 cmd->diag_token = ut_cmd.diag_token; 10064 10065 ptr = skb->data + sizeof(ut_cmd); 10066 10067 tlv = ptr; 10068 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len); 10069 10070 ptr += TLV_HDR_SIZE; 10071 10072 ut_cmd_args = ptr; 10073 for (i = 0; i < le32_to_cpu(ut_cmd.num_args); i++) 10074 ut_cmd_args[i] = test_args[i]; 10075 10076 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 10077 "WMI unit test : module %d vdev %d n_args %d token %d\n", 10078 cmd->module_id, cmd->vdev_id, cmd->num_args, 10079 cmd->diag_token); 10080 10081 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_UNIT_TEST_CMDID); 10082 10083 if (ret) { 10084 ath12k_warn(ar->ab, "failed to send WMI_UNIT_TEST CMD :%d\n", 10085 ret); 10086 dev_kfree_skb(skb); 10087 } 10088 10089 return ret; 10090 } 10091 10092 int ath12k_wmi_simulate_radar(struct ath12k *ar) 10093 { 10094 struct ath12k_link_vif *arvif; 10095 u32 dfs_args[DFS_MAX_TEST_ARGS]; 10096 struct wmi_unit_test_cmd wmi_ut; 10097 bool arvif_found = false; 10098 10099 list_for_each_entry(arvif, &ar->arvifs, list) { 10100 if (arvif->is_started && arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP) { 10101 arvif_found = true; 10102 break; 10103 } 10104 } 10105 10106 if (!arvif_found) 10107 return -EINVAL; 10108 10109 dfs_args[DFS_TEST_CMDID] = 0; 10110 dfs_args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id; 10111 /* Currently we could pass segment_id(b0 - b1), chirp(b2) 10112 * freq offset (b3 - b10) to unit test. For simulation 10113 * purpose this can be set to 0 which is valid. 10114 */ 10115 dfs_args[DFS_TEST_RADAR_PARAM] = 0; 10116 10117 wmi_ut.vdev_id = cpu_to_le32(arvif->vdev_id); 10118 wmi_ut.module_id = cpu_to_le32(DFS_UNIT_TEST_MODULE); 10119 wmi_ut.num_args = cpu_to_le32(DFS_MAX_TEST_ARGS); 10120 wmi_ut.diag_token = cpu_to_le32(DFS_UNIT_TEST_TOKEN); 10121 10122 ath12k_dbg(ar->ab, ATH12K_DBG_REG, "Triggering Radar Simulation\n"); 10123 10124 return ath12k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args); 10125 } 10126 10127 int ath12k_wmi_send_tpc_stats_request(struct ath12k *ar, 10128 enum wmi_halphy_ctrl_path_stats_id tpc_stats_type) 10129 { 10130 struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params *cmd; 10131 struct ath12k_wmi_pdev *wmi = ar->wmi; 10132 struct sk_buff *skb; 10133 struct wmi_tlv *tlv; 10134 __le32 *pdev_id; 10135 u32 buf_len; 10136 void *ptr; 10137 int ret; 10138 10139 buf_len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(u32) + TLV_HDR_SIZE + TLV_HDR_SIZE; 10140 10141 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len); 10142 if (!skb) 10143 return -ENOMEM; 10144 cmd = (struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params *)skb->data; 10145 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HALPHY_CTRL_PATH_CMD_FIXED_PARAM, 10146 sizeof(*cmd)); 10147 10148 cmd->stats_id_mask = cpu_to_le32(WMI_REQ_CTRL_PATH_PDEV_TX_STAT); 10149 cmd->action = cpu_to_le32(WMI_REQUEST_CTRL_PATH_STAT_GET); 10150 cmd->subid = cpu_to_le32(tpc_stats_type); 10151 10152 ptr = skb->data + sizeof(*cmd); 10153 10154 /* The below TLV arrays optionally follow this fixed param TLV structure 10155 * 1. ARRAY_UINT32 pdev_ids[] 10156 * If this array is present and non-zero length, stats should only 10157 * be provided from the pdevs identified in the array. 10158 * 2. ARRAY_UNIT32 vdev_ids[] 10159 * If this array is present and non-zero length, stats should only 10160 * be provided from the vdevs identified in the array. 10161 * 3. ath12k_wmi_mac_addr_params peer_macaddr[]; 10162 * If this array is present and non-zero length, stats should only 10163 * be provided from the peers with the MAC addresses specified 10164 * in the array 10165 */ 10166 tlv = ptr; 10167 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32)); 10168 ptr += TLV_HDR_SIZE; 10169 10170 pdev_id = ptr; 10171 *pdev_id = cpu_to_le32(ath12k_mac_get_target_pdev_id(ar)); 10172 ptr += sizeof(*pdev_id); 10173 10174 tlv = ptr; 10175 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0); 10176 ptr += TLV_HDR_SIZE; 10177 10178 tlv = ptr; 10179 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, 0); 10180 ptr += TLV_HDR_SIZE; 10181 10182 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_REQUEST_HALPHY_CTRL_PATH_STATS_CMDID); 10183 if (ret) { 10184 ath12k_warn(ar->ab, 10185 "failed to submit WMI_REQUEST_STATS_CTRL_PATH_CMDID\n"); 10186 dev_kfree_skb(skb); 10187 return ret; 10188 } 10189 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI get TPC STATS sent on pdev %d\n", 10190 ar->pdev->pdev_id); 10191 10192 return ret; 10193 } 10194 10195 int ath12k_wmi_connect(struct ath12k_base *ab) 10196 { 10197 u32 i; 10198 u8 wmi_ep_count; 10199 10200 wmi_ep_count = ab->htc.wmi_ep_count; 10201 if (wmi_ep_count > ab->hw_params->max_radios) 10202 return -1; 10203 10204 for (i = 0; i < wmi_ep_count; i++) 10205 ath12k_connect_pdev_htc_service(ab, i); 10206 10207 return 0; 10208 } 10209 10210 static void ath12k_wmi_pdev_detach(struct ath12k_base *ab, u8 pdev_id) 10211 { 10212 if (WARN_ON(pdev_id >= MAX_RADIOS)) 10213 return; 10214 10215 /* TODO: Deinit any pdev specific wmi resource */ 10216 } 10217 10218 int ath12k_wmi_pdev_attach(struct ath12k_base *ab, 10219 u8 pdev_id) 10220 { 10221 struct ath12k_wmi_pdev *wmi_handle; 10222 10223 if (pdev_id >= ab->hw_params->max_radios) 10224 return -EINVAL; 10225 10226 wmi_handle = &ab->wmi_ab.wmi[pdev_id]; 10227 10228 wmi_handle->wmi_ab = &ab->wmi_ab; 10229 10230 ab->wmi_ab.ab = ab; 10231 /* TODO: Init remaining resource specific to pdev */ 10232 10233 return 0; 10234 } 10235 10236 int ath12k_wmi_attach(struct ath12k_base *ab) 10237 { 10238 int ret; 10239 10240 ret = ath12k_wmi_pdev_attach(ab, 0); 10241 if (ret) 10242 return ret; 10243 10244 ab->wmi_ab.ab = ab; 10245 ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX; 10246 10247 /* It's overwritten when service_ext_ready is handled */ 10248 if (ab->hw_params->single_pdev_only) 10249 ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE; 10250 10251 /* TODO: Init remaining wmi soc resources required */ 10252 init_completion(&ab->wmi_ab.service_ready); 10253 init_completion(&ab->wmi_ab.unified_ready); 10254 10255 return 0; 10256 } 10257 10258 void ath12k_wmi_detach(struct ath12k_base *ab) 10259 { 10260 int i; 10261 10262 /* TODO: Deinit wmi resource specific to SOC as required */ 10263 10264 for (i = 0; i < ab->htc.wmi_ep_count; i++) 10265 ath12k_wmi_pdev_detach(ab, i); 10266 10267 ath12k_wmi_free_dbring_caps(ab); 10268 } 10269 10270 int ath12k_wmi_hw_data_filter_cmd(struct ath12k *ar, struct wmi_hw_data_filter_arg *arg) 10271 { 10272 struct wmi_hw_data_filter_cmd *cmd; 10273 struct sk_buff *skb; 10274 int len; 10275 10276 len = sizeof(*cmd); 10277 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10278 10279 if (!skb) 10280 return -ENOMEM; 10281 10282 cmd = (struct wmi_hw_data_filter_cmd *)skb->data; 10283 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HW_DATA_FILTER_CMD, 10284 sizeof(*cmd)); 10285 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 10286 cmd->enable = cpu_to_le32(arg->enable ? 1 : 0); 10287 10288 /* Set all modes in case of disable */ 10289 if (arg->enable) 10290 cmd->hw_filter_bitmap = cpu_to_le32(arg->hw_filter_bitmap); 10291 else 10292 cmd->hw_filter_bitmap = cpu_to_le32((u32)~0U); 10293 10294 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 10295 "wmi hw data filter enable %d filter_bitmap 0x%x\n", 10296 arg->enable, arg->hw_filter_bitmap); 10297 10298 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID); 10299 } 10300 10301 int ath12k_wmi_wow_host_wakeup_ind(struct ath12k *ar) 10302 { 10303 struct wmi_wow_host_wakeup_cmd *cmd; 10304 struct sk_buff *skb; 10305 size_t len; 10306 10307 len = sizeof(*cmd); 10308 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10309 if (!skb) 10310 return -ENOMEM; 10311 10312 cmd = (struct wmi_wow_host_wakeup_cmd *)skb->data; 10313 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD, 10314 sizeof(*cmd)); 10315 10316 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow host wakeup ind\n"); 10317 10318 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID); 10319 } 10320 10321 int ath12k_wmi_wow_enable(struct ath12k *ar) 10322 { 10323 struct wmi_wow_enable_cmd *cmd; 10324 struct sk_buff *skb; 10325 int len; 10326 10327 len = sizeof(*cmd); 10328 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10329 if (!skb) 10330 return -ENOMEM; 10331 10332 cmd = (struct wmi_wow_enable_cmd *)skb->data; 10333 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ENABLE_CMD, 10334 sizeof(*cmd)); 10335 10336 cmd->enable = cpu_to_le32(1); 10337 cmd->pause_iface_config = cpu_to_le32(WOW_IFACE_PAUSE_ENABLED); 10338 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow enable\n"); 10339 10340 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID); 10341 } 10342 10343 int ath12k_wmi_wow_add_wakeup_event(struct ath12k *ar, u32 vdev_id, 10344 enum wmi_wow_wakeup_event event, 10345 u32 enable) 10346 { 10347 struct wmi_wow_add_del_event_cmd *cmd; 10348 struct sk_buff *skb; 10349 size_t len; 10350 10351 len = sizeof(*cmd); 10352 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10353 if (!skb) 10354 return -ENOMEM; 10355 10356 cmd = (struct wmi_wow_add_del_event_cmd *)skb->data; 10357 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_DEL_EVT_CMD, 10358 sizeof(*cmd)); 10359 cmd->vdev_id = cpu_to_le32(vdev_id); 10360 cmd->is_add = cpu_to_le32(enable); 10361 cmd->event_bitmap = cpu_to_le32((1 << event)); 10362 10363 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n", 10364 wow_wakeup_event(event), enable, vdev_id); 10365 10366 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID); 10367 } 10368 10369 int ath12k_wmi_wow_add_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id, 10370 const u8 *pattern, const u8 *mask, 10371 int pattern_len, int pattern_offset) 10372 { 10373 struct wmi_wow_add_pattern_cmd *cmd; 10374 struct wmi_wow_bitmap_pattern_params *bitmap; 10375 struct wmi_tlv *tlv; 10376 struct sk_buff *skb; 10377 void *ptr; 10378 size_t len; 10379 10380 len = sizeof(*cmd) + 10381 sizeof(*tlv) + /* array struct */ 10382 sizeof(*bitmap) + /* bitmap */ 10383 sizeof(*tlv) + /* empty ipv4 sync */ 10384 sizeof(*tlv) + /* empty ipv6 sync */ 10385 sizeof(*tlv) + /* empty magic */ 10386 sizeof(*tlv) + /* empty info timeout */ 10387 sizeof(*tlv) + sizeof(u32); /* ratelimit interval */ 10388 10389 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10390 if (!skb) 10391 return -ENOMEM; 10392 10393 /* cmd */ 10394 ptr = skb->data; 10395 cmd = ptr; 10396 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_PATTERN_CMD, 10397 sizeof(*cmd)); 10398 cmd->vdev_id = cpu_to_le32(vdev_id); 10399 cmd->pattern_id = cpu_to_le32(pattern_id); 10400 cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN); 10401 10402 ptr += sizeof(*cmd); 10403 10404 /* bitmap */ 10405 tlv = ptr; 10406 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, sizeof(*bitmap)); 10407 10408 ptr += sizeof(*tlv); 10409 10410 bitmap = ptr; 10411 bitmap->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_BITMAP_PATTERN_T, 10412 sizeof(*bitmap)); 10413 memcpy(bitmap->patternbuf, pattern, pattern_len); 10414 memcpy(bitmap->bitmaskbuf, mask, pattern_len); 10415 bitmap->pattern_offset = cpu_to_le32(pattern_offset); 10416 bitmap->pattern_len = cpu_to_le32(pattern_len); 10417 bitmap->bitmask_len = cpu_to_le32(pattern_len); 10418 bitmap->pattern_id = cpu_to_le32(pattern_id); 10419 10420 ptr += sizeof(*bitmap); 10421 10422 /* ipv4 sync */ 10423 tlv = ptr; 10424 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0); 10425 10426 ptr += sizeof(*tlv); 10427 10428 /* ipv6 sync */ 10429 tlv = ptr; 10430 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0); 10431 10432 ptr += sizeof(*tlv); 10433 10434 /* magic */ 10435 tlv = ptr; 10436 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0); 10437 10438 ptr += sizeof(*tlv); 10439 10440 /* pattern info timeout */ 10441 tlv = ptr; 10442 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0); 10443 10444 ptr += sizeof(*tlv); 10445 10446 /* ratelimit interval */ 10447 tlv = ptr; 10448 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32)); 10449 10450 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d pattern_len %d\n", 10451 vdev_id, pattern_id, pattern_offset, pattern_len); 10452 10453 ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow pattern: ", 10454 bitmap->patternbuf, pattern_len); 10455 ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow bitmask: ", 10456 bitmap->bitmaskbuf, pattern_len); 10457 10458 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID); 10459 } 10460 10461 int ath12k_wmi_wow_del_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id) 10462 { 10463 struct wmi_wow_del_pattern_cmd *cmd; 10464 struct sk_buff *skb; 10465 size_t len; 10466 10467 len = sizeof(*cmd); 10468 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10469 if (!skb) 10470 return -ENOMEM; 10471 10472 cmd = (struct wmi_wow_del_pattern_cmd *)skb->data; 10473 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_DEL_PATTERN_CMD, 10474 sizeof(*cmd)); 10475 cmd->vdev_id = cpu_to_le32(vdev_id); 10476 cmd->pattern_id = cpu_to_le32(pattern_id); 10477 cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN); 10478 10479 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n", 10480 vdev_id, pattern_id); 10481 10482 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID); 10483 } 10484 10485 static struct sk_buff * 10486 ath12k_wmi_op_gen_config_pno_start(struct ath12k *ar, u32 vdev_id, 10487 struct wmi_pno_scan_req_arg *pno) 10488 { 10489 struct nlo_configured_params *nlo_list; 10490 size_t len, nlo_list_len, channel_list_len; 10491 struct wmi_wow_nlo_config_cmd *cmd; 10492 __le32 *channel_list; 10493 struct wmi_tlv *tlv; 10494 struct sk_buff *skb; 10495 void *ptr; 10496 u32 i; 10497 10498 len = sizeof(*cmd) + 10499 sizeof(*tlv) + 10500 /* TLV place holder for array of structures 10501 * nlo_configured_params(nlo_list) 10502 */ 10503 sizeof(*tlv); 10504 /* TLV place holder for array of uint32 channel_list */ 10505 10506 channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count; 10507 len += channel_list_len; 10508 10509 nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count; 10510 len += nlo_list_len; 10511 10512 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10513 if (!skb) 10514 return ERR_PTR(-ENOMEM); 10515 10516 ptr = skb->data; 10517 cmd = ptr; 10518 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, sizeof(*cmd)); 10519 10520 cmd->vdev_id = cpu_to_le32(pno->vdev_id); 10521 cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN); 10522 10523 /* current FW does not support min-max range for dwell time */ 10524 cmd->active_dwell_time = cpu_to_le32(pno->active_max_time); 10525 cmd->passive_dwell_time = cpu_to_le32(pno->passive_max_time); 10526 10527 if (pno->do_passive_scan) 10528 cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE); 10529 10530 cmd->fast_scan_period = cpu_to_le32(pno->fast_scan_period); 10531 cmd->slow_scan_period = cpu_to_le32(pno->slow_scan_period); 10532 cmd->fast_scan_max_cycles = cpu_to_le32(pno->fast_scan_max_cycles); 10533 cmd->delay_start_time = cpu_to_le32(pno->delay_start_time); 10534 10535 if (pno->enable_pno_scan_randomization) { 10536 cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ | 10537 WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ); 10538 ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr); 10539 ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask); 10540 } 10541 10542 ptr += sizeof(*cmd); 10543 10544 /* nlo_configured_params(nlo_list) */ 10545 cmd->no_of_ssids = cpu_to_le32(pno->uc_networks_count); 10546 tlv = ptr; 10547 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, nlo_list_len); 10548 10549 ptr += sizeof(*tlv); 10550 nlo_list = ptr; 10551 for (i = 0; i < pno->uc_networks_count; i++) { 10552 tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header); 10553 tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE, 10554 sizeof(*nlo_list)); 10555 10556 nlo_list[i].ssid.valid = cpu_to_le32(1); 10557 nlo_list[i].ssid.ssid.ssid_len = 10558 cpu_to_le32(pno->a_networks[i].ssid.ssid_len); 10559 memcpy(nlo_list[i].ssid.ssid.ssid, 10560 pno->a_networks[i].ssid.ssid, 10561 le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len)); 10562 10563 if (pno->a_networks[i].rssi_threshold && 10564 pno->a_networks[i].rssi_threshold > -300) { 10565 nlo_list[i].rssi_cond.valid = cpu_to_le32(1); 10566 nlo_list[i].rssi_cond.rssi = 10567 cpu_to_le32(pno->a_networks[i].rssi_threshold); 10568 } 10569 10570 nlo_list[i].bcast_nw_type.valid = cpu_to_le32(1); 10571 nlo_list[i].bcast_nw_type.bcast_nw_type = 10572 cpu_to_le32(pno->a_networks[i].bcast_nw_type); 10573 } 10574 10575 ptr += nlo_list_len; 10576 cmd->num_of_channels = cpu_to_le32(pno->a_networks[0].channel_count); 10577 tlv = ptr; 10578 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, channel_list_len); 10579 ptr += sizeof(*tlv); 10580 channel_list = ptr; 10581 10582 for (i = 0; i < pno->a_networks[0].channel_count; i++) 10583 channel_list[i] = cpu_to_le32(pno->a_networks[0].channels[i]); 10584 10585 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n", 10586 vdev_id); 10587 10588 return skb; 10589 } 10590 10591 static struct sk_buff *ath12k_wmi_op_gen_config_pno_stop(struct ath12k *ar, 10592 u32 vdev_id) 10593 { 10594 struct wmi_wow_nlo_config_cmd *cmd; 10595 struct sk_buff *skb; 10596 size_t len; 10597 10598 len = sizeof(*cmd); 10599 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10600 if (!skb) 10601 return ERR_PTR(-ENOMEM); 10602 10603 cmd = (struct wmi_wow_nlo_config_cmd *)skb->data; 10604 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, len); 10605 10606 cmd->vdev_id = cpu_to_le32(vdev_id); 10607 cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_STOP); 10608 10609 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 10610 "wmi tlv stop pno config vdev_id %d\n", vdev_id); 10611 return skb; 10612 } 10613 10614 int ath12k_wmi_wow_config_pno(struct ath12k *ar, u32 vdev_id, 10615 struct wmi_pno_scan_req_arg *pno_scan) 10616 { 10617 struct sk_buff *skb; 10618 10619 if (pno_scan->enable) 10620 skb = ath12k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan); 10621 else 10622 skb = ath12k_wmi_op_gen_config_pno_stop(ar, vdev_id); 10623 10624 if (IS_ERR_OR_NULL(skb)) 10625 return -ENOMEM; 10626 10627 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID); 10628 } 10629 10630 static void ath12k_wmi_fill_ns_offload(struct ath12k *ar, 10631 struct wmi_arp_ns_offload_arg *offload, 10632 void **ptr, 10633 bool enable, 10634 bool ext) 10635 { 10636 struct wmi_ns_offload_params *ns; 10637 struct wmi_tlv *tlv; 10638 void *buf_ptr = *ptr; 10639 u32 ns_cnt, ns_ext_tuples; 10640 int i, max_offloads; 10641 10642 ns_cnt = offload->ipv6_count; 10643 10644 tlv = buf_ptr; 10645 10646 if (ext) { 10647 ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS; 10648 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 10649 ns_ext_tuples * sizeof(*ns)); 10650 i = WMI_MAX_NS_OFFLOADS; 10651 max_offloads = offload->ipv6_count; 10652 } else { 10653 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 10654 WMI_MAX_NS_OFFLOADS * sizeof(*ns)); 10655 i = 0; 10656 max_offloads = WMI_MAX_NS_OFFLOADS; 10657 } 10658 10659 buf_ptr += sizeof(*tlv); 10660 10661 for (; i < max_offloads; i++) { 10662 ns = buf_ptr; 10663 ns->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NS_OFFLOAD_TUPLE, 10664 sizeof(*ns)); 10665 10666 if (enable) { 10667 if (i < ns_cnt) 10668 ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_VALID); 10669 10670 memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16); 10671 memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16); 10672 10673 if (offload->ipv6_type[i]) 10674 ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_IS_IPV6_ANYCAST); 10675 10676 memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN); 10677 10678 if (!is_zero_ether_addr(ns->target_mac.addr)) 10679 ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_MAC_VALID); 10680 10681 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 10682 "wmi index %d ns_solicited %pI6 target %pI6", 10683 i, ns->solicitation_ipaddr, 10684 ns->target_ipaddr[0]); 10685 } 10686 10687 buf_ptr += sizeof(*ns); 10688 } 10689 10690 *ptr = buf_ptr; 10691 } 10692 10693 static void ath12k_wmi_fill_arp_offload(struct ath12k *ar, 10694 struct wmi_arp_ns_offload_arg *offload, 10695 void **ptr, 10696 bool enable) 10697 { 10698 struct wmi_arp_offload_params *arp; 10699 struct wmi_tlv *tlv; 10700 void *buf_ptr = *ptr; 10701 int i; 10702 10703 /* fill arp tuple */ 10704 tlv = buf_ptr; 10705 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 10706 WMI_MAX_ARP_OFFLOADS * sizeof(*arp)); 10707 buf_ptr += sizeof(*tlv); 10708 10709 for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) { 10710 arp = buf_ptr; 10711 arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARP_OFFLOAD_TUPLE, 10712 sizeof(*arp)); 10713 10714 if (enable && i < offload->ipv4_count) { 10715 /* Copy the target ip addr and flags */ 10716 arp->flags = cpu_to_le32(WMI_ARPOL_FLAGS_VALID); 10717 memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4); 10718 10719 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi arp offload address %pI4", 10720 arp->target_ipaddr); 10721 } 10722 10723 buf_ptr += sizeof(*arp); 10724 } 10725 10726 *ptr = buf_ptr; 10727 } 10728 10729 int ath12k_wmi_arp_ns_offload(struct ath12k *ar, 10730 struct ath12k_link_vif *arvif, 10731 struct wmi_arp_ns_offload_arg *offload, 10732 bool enable) 10733 { 10734 struct wmi_set_arp_ns_offload_cmd *cmd; 10735 struct wmi_tlv *tlv; 10736 struct sk_buff *skb; 10737 void *buf_ptr; 10738 size_t len; 10739 u8 ns_cnt, ns_ext_tuples = 0; 10740 10741 ns_cnt = offload->ipv6_count; 10742 10743 len = sizeof(*cmd) + 10744 sizeof(*tlv) + 10745 WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_params) + 10746 sizeof(*tlv) + 10747 WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_params); 10748 10749 if (ns_cnt > WMI_MAX_NS_OFFLOADS) { 10750 ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS; 10751 len += sizeof(*tlv) + 10752 ns_ext_tuples * sizeof(struct wmi_ns_offload_params); 10753 } 10754 10755 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10756 if (!skb) 10757 return -ENOMEM; 10758 10759 buf_ptr = skb->data; 10760 cmd = buf_ptr; 10761 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_ARP_NS_OFFLOAD_CMD, 10762 sizeof(*cmd)); 10763 cmd->flags = cpu_to_le32(0); 10764 cmd->vdev_id = cpu_to_le32(arvif->vdev_id); 10765 cmd->num_ns_ext_tuples = cpu_to_le32(ns_ext_tuples); 10766 10767 buf_ptr += sizeof(*cmd); 10768 10769 ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0); 10770 ath12k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable); 10771 10772 if (ns_ext_tuples) 10773 ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1); 10774 10775 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID); 10776 } 10777 10778 int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar, 10779 struct ath12k_link_vif *arvif, bool enable) 10780 { 10781 struct ath12k_rekey_data *rekey_data = &arvif->rekey_data; 10782 struct wmi_gtk_rekey_offload_cmd *cmd; 10783 struct sk_buff *skb; 10784 __le64 replay_ctr; 10785 int len; 10786 10787 len = sizeof(*cmd); 10788 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10789 if (!skb) 10790 return -ENOMEM; 10791 10792 cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data; 10793 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd)); 10794 cmd->vdev_id = cpu_to_le32(arvif->vdev_id); 10795 10796 if (enable) { 10797 cmd->flags = cpu_to_le32(GTK_OFFLOAD_ENABLE_OPCODE); 10798 10799 /* the length in rekey_data and cmd is equal */ 10800 memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck)); 10801 memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek)); 10802 10803 replay_ctr = cpu_to_le64(rekey_data->replay_ctr); 10804 memcpy(cmd->replay_ctr, &replay_ctr, 10805 sizeof(replay_ctr)); 10806 } else { 10807 cmd->flags = cpu_to_le32(GTK_OFFLOAD_DISABLE_OPCODE); 10808 } 10809 10810 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "offload gtk rekey vdev: %d %d\n", 10811 arvif->vdev_id, enable); 10812 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID); 10813 } 10814 10815 int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar, 10816 struct ath12k_link_vif *arvif) 10817 { 10818 struct wmi_gtk_rekey_offload_cmd *cmd; 10819 struct sk_buff *skb; 10820 int len; 10821 10822 len = sizeof(*cmd); 10823 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10824 if (!skb) 10825 return -ENOMEM; 10826 10827 cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data; 10828 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd)); 10829 cmd->vdev_id = cpu_to_le32(arvif->vdev_id); 10830 cmd->flags = cpu_to_le32(GTK_OFFLOAD_REQUEST_STATUS_OPCODE); 10831 10832 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "get gtk rekey vdev_id: %d\n", 10833 arvif->vdev_id); 10834 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID); 10835 } 10836 10837 int ath12k_wmi_sta_keepalive(struct ath12k *ar, 10838 const struct wmi_sta_keepalive_arg *arg) 10839 { 10840 struct wmi_sta_keepalive_arp_resp_params *arp; 10841 struct ath12k_wmi_pdev *wmi = ar->wmi; 10842 struct wmi_sta_keepalive_cmd *cmd; 10843 struct sk_buff *skb; 10844 size_t len; 10845 10846 len = sizeof(*cmd) + sizeof(*arp); 10847 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 10848 if (!skb) 10849 return -ENOMEM; 10850 10851 cmd = (struct wmi_sta_keepalive_cmd *)skb->data; 10852 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALIVE_CMD, sizeof(*cmd)); 10853 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 10854 cmd->enabled = cpu_to_le32(arg->enabled); 10855 cmd->interval = cpu_to_le32(arg->interval); 10856 cmd->method = cpu_to_le32(arg->method); 10857 10858 arp = (struct wmi_sta_keepalive_arp_resp_params *)(cmd + 1); 10859 arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALVE_ARP_RESPONSE, 10860 sizeof(*arp)); 10861 if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE || 10862 arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) { 10863 arp->src_ip4_addr = cpu_to_le32(arg->src_ip4_addr); 10864 arp->dest_ip4_addr = cpu_to_le32(arg->dest_ip4_addr); 10865 ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr); 10866 } 10867 10868 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 10869 "wmi sta keepalive vdev %d enabled %d method %d interval %d\n", 10870 arg->vdev_id, arg->enabled, arg->method, arg->interval); 10871 10872 return ath12k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID); 10873 } 10874 10875 int ath12k_wmi_mlo_setup(struct ath12k *ar, struct wmi_mlo_setup_arg *mlo_params) 10876 { 10877 struct wmi_mlo_setup_cmd *cmd; 10878 struct ath12k_wmi_pdev *wmi = ar->wmi; 10879 u32 *partner_links, num_links; 10880 int i, ret, buf_len, arg_len; 10881 struct sk_buff *skb; 10882 struct wmi_tlv *tlv; 10883 void *ptr; 10884 10885 num_links = mlo_params->num_partner_links; 10886 arg_len = num_links * sizeof(u32); 10887 buf_len = sizeof(*cmd) + TLV_HDR_SIZE + arg_len; 10888 10889 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len); 10890 if (!skb) 10891 return -ENOMEM; 10892 10893 cmd = (struct wmi_mlo_setup_cmd *)skb->data; 10894 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_SETUP_CMD, 10895 sizeof(*cmd)); 10896 cmd->mld_group_id = mlo_params->group_id; 10897 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); 10898 ptr = skb->data + sizeof(*cmd); 10899 10900 tlv = ptr; 10901 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len); 10902 ptr += TLV_HDR_SIZE; 10903 10904 partner_links = ptr; 10905 for (i = 0; i < num_links; i++) 10906 partner_links[i] = mlo_params->partner_link_id[i]; 10907 10908 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_SETUP_CMDID); 10909 if (ret) { 10910 ath12k_warn(ar->ab, "failed to submit WMI_MLO_SETUP_CMDID command: %d\n", 10911 ret); 10912 dev_kfree_skb(skb); 10913 return ret; 10914 } 10915 10916 return 0; 10917 } 10918 10919 int ath12k_wmi_mlo_ready(struct ath12k *ar) 10920 { 10921 struct wmi_mlo_ready_cmd *cmd; 10922 struct ath12k_wmi_pdev *wmi = ar->wmi; 10923 struct sk_buff *skb; 10924 int ret, len; 10925 10926 len = sizeof(*cmd); 10927 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 10928 if (!skb) 10929 return -ENOMEM; 10930 10931 cmd = (struct wmi_mlo_ready_cmd *)skb->data; 10932 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_READY_CMD, 10933 sizeof(*cmd)); 10934 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); 10935 10936 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_READY_CMDID); 10937 if (ret) { 10938 ath12k_warn(ar->ab, "failed to submit WMI_MLO_READY_CMDID command: %d\n", 10939 ret); 10940 dev_kfree_skb(skb); 10941 return ret; 10942 } 10943 10944 return 0; 10945 } 10946 10947 int ath12k_wmi_mlo_teardown(struct ath12k *ar) 10948 { 10949 struct wmi_mlo_teardown_cmd *cmd; 10950 struct ath12k_wmi_pdev *wmi = ar->wmi; 10951 struct sk_buff *skb; 10952 int ret, len; 10953 10954 len = sizeof(*cmd); 10955 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 10956 if (!skb) 10957 return -ENOMEM; 10958 10959 cmd = (struct wmi_mlo_teardown_cmd *)skb->data; 10960 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_TEARDOWN_CMD, 10961 sizeof(*cmd)); 10962 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); 10963 cmd->reason_code = WMI_MLO_TEARDOWN_SSR_REASON; 10964 10965 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_TEARDOWN_CMDID); 10966 if (ret) { 10967 ath12k_warn(ar->ab, "failed to submit WMI MLO teardown command: %d\n", 10968 ret); 10969 dev_kfree_skb(skb); 10970 return ret; 10971 } 10972 10973 return 0; 10974 } 10975 10976 bool ath12k_wmi_supports_6ghz_cc_ext(struct ath12k *ar) 10977 { 10978 return test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT, 10979 ar->ab->wmi_ab.svc_map) && ar->supports_6ghz; 10980 } 10981 10982 int ath12k_wmi_send_vdev_set_tpc_power(struct ath12k *ar, 10983 u32 vdev_id, 10984 struct ath12k_reg_tpc_power_info *param) 10985 { 10986 struct wmi_vdev_set_tpc_power_cmd *cmd; 10987 struct ath12k_wmi_pdev *wmi = ar->wmi; 10988 struct wmi_vdev_ch_power_params *ch; 10989 int i, ret, len, array_len; 10990 struct sk_buff *skb; 10991 struct wmi_tlv *tlv; 10992 u8 *ptr; 10993 10994 array_len = sizeof(*ch) * param->num_pwr_levels; 10995 len = sizeof(*cmd) + TLV_HDR_SIZE + array_len; 10996 10997 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 10998 if (!skb) 10999 return -ENOMEM; 11000 11001 ptr = skb->data; 11002 11003 cmd = (struct wmi_vdev_set_tpc_power_cmd *)ptr; 11004 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_TPC_POWER_CMD, 11005 sizeof(*cmd)); 11006 cmd->vdev_id = cpu_to_le32(vdev_id); 11007 cmd->psd_power = cpu_to_le32(param->is_psd_power); 11008 cmd->eirp_power = cpu_to_le32(param->eirp_power); 11009 cmd->power_type_6ghz = cpu_to_le32(param->ap_power_type); 11010 11011 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 11012 "tpc vdev id %d is psd power %d eirp power %d 6 ghz power type %d\n", 11013 vdev_id, param->is_psd_power, param->eirp_power, param->ap_power_type); 11014 11015 ptr += sizeof(*cmd); 11016 tlv = (struct wmi_tlv *)ptr; 11017 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, array_len); 11018 11019 ptr += TLV_HDR_SIZE; 11020 ch = (struct wmi_vdev_ch_power_params *)ptr; 11021 11022 for (i = 0; i < param->num_pwr_levels; i++, ch++) { 11023 ch->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CH_POWER_INFO, 11024 sizeof(*ch)); 11025 ch->chan_cfreq = cpu_to_le32(param->chan_power_info[i].chan_cfreq); 11026 ch->tx_power = cpu_to_le32(param->chan_power_info[i].tx_power); 11027 11028 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "tpc chan freq %d TX power %d\n", 11029 ch->chan_cfreq, ch->tx_power); 11030 } 11031 11032 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_TPC_POWER_CMDID); 11033 if (ret) { 11034 ath12k_warn(ar->ab, "failed to send WMI_VDEV_SET_TPC_POWER_CMDID\n"); 11035 dev_kfree_skb(skb); 11036 return ret; 11037 } 11038 11039 return 0; 11040 } 11041 11042 static int 11043 ath12k_wmi_fill_disallowed_bmap(struct ath12k_base *ab, 11044 struct wmi_disallowed_mlo_mode_bitmap_params *dislw_bmap, 11045 struct wmi_mlo_link_set_active_arg *arg) 11046 { 11047 struct wmi_ml_disallow_mode_bmap_arg *dislw_bmap_arg; 11048 u8 i; 11049 11050 if (arg->num_disallow_mode_comb > 11051 ARRAY_SIZE(arg->disallow_bmap)) { 11052 ath12k_warn(ab, "invalid num_disallow_mode_comb: %d", 11053 arg->num_disallow_mode_comb); 11054 return -EINVAL; 11055 } 11056 11057 dislw_bmap_arg = &arg->disallow_bmap[0]; 11058 for (i = 0; i < arg->num_disallow_mode_comb; i++) { 11059 dislw_bmap->tlv_header = 11060 ath12k_wmi_tlv_cmd_hdr(0, sizeof(*dislw_bmap)); 11061 dislw_bmap->disallowed_mode_bitmap = 11062 cpu_to_le32(dislw_bmap_arg->disallowed_mode); 11063 dislw_bmap->ieee_link_id_comb = 11064 le32_encode_bits(dislw_bmap_arg->ieee_link_id[0], 11065 WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_1) | 11066 le32_encode_bits(dislw_bmap_arg->ieee_link_id[1], 11067 WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_2) | 11068 le32_encode_bits(dislw_bmap_arg->ieee_link_id[2], 11069 WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_3) | 11070 le32_encode_bits(dislw_bmap_arg->ieee_link_id[3], 11071 WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_4); 11072 11073 ath12k_dbg(ab, ATH12K_DBG_WMI, 11074 "entry %d disallowed_mode %d ieee_link_id_comb 0x%x", 11075 i, dislw_bmap_arg->disallowed_mode, 11076 dislw_bmap_arg->ieee_link_id_comb); 11077 dislw_bmap++; 11078 dislw_bmap_arg++; 11079 } 11080 11081 return 0; 11082 } 11083 11084 int ath12k_wmi_send_mlo_link_set_active_cmd(struct ath12k_base *ab, 11085 struct wmi_mlo_link_set_active_arg *arg) 11086 { 11087 struct wmi_disallowed_mlo_mode_bitmap_params *disallowed_mode_bmap; 11088 struct wmi_mlo_set_active_link_number_params *link_num_param; 11089 u32 num_link_num_param = 0, num_vdev_bitmap = 0; 11090 struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; 11091 struct wmi_mlo_link_set_active_cmd *cmd; 11092 u32 num_inactive_vdev_bitmap = 0; 11093 u32 num_disallow_mode_comb = 0; 11094 struct wmi_tlv *tlv; 11095 struct sk_buff *skb; 11096 __le32 *vdev_bitmap; 11097 void *buf_ptr; 11098 int i, ret; 11099 u32 len; 11100 11101 if (!arg->num_vdev_bitmap && !arg->num_link_entry) { 11102 ath12k_warn(ab, "Invalid num_vdev_bitmap and num_link_entry"); 11103 return -EINVAL; 11104 } 11105 11106 switch (arg->force_mode) { 11107 case WMI_MLO_LINK_FORCE_MODE_ACTIVE_LINK_NUM: 11108 case WMI_MLO_LINK_FORCE_MODE_INACTIVE_LINK_NUM: 11109 num_link_num_param = arg->num_link_entry; 11110 fallthrough; 11111 case WMI_MLO_LINK_FORCE_MODE_ACTIVE: 11112 case WMI_MLO_LINK_FORCE_MODE_INACTIVE: 11113 case WMI_MLO_LINK_FORCE_MODE_NO_FORCE: 11114 num_vdev_bitmap = arg->num_vdev_bitmap; 11115 break; 11116 case WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE: 11117 num_vdev_bitmap = arg->num_vdev_bitmap; 11118 num_inactive_vdev_bitmap = arg->num_inactive_vdev_bitmap; 11119 break; 11120 default: 11121 ath12k_warn(ab, "Invalid force mode: %u", arg->force_mode); 11122 return -EINVAL; 11123 } 11124 11125 num_disallow_mode_comb = arg->num_disallow_mode_comb; 11126 len = sizeof(*cmd) + 11127 TLV_HDR_SIZE + sizeof(*link_num_param) * num_link_num_param + 11128 TLV_HDR_SIZE + sizeof(*vdev_bitmap) * num_vdev_bitmap + 11129 TLV_HDR_SIZE + TLV_HDR_SIZE + TLV_HDR_SIZE + 11130 TLV_HDR_SIZE + sizeof(*disallowed_mode_bmap) * num_disallow_mode_comb; 11131 if (arg->force_mode == WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE) 11132 len += sizeof(*vdev_bitmap) * num_inactive_vdev_bitmap; 11133 11134 skb = ath12k_wmi_alloc_skb(wmi_ab, len); 11135 if (!skb) 11136 return -ENOMEM; 11137 11138 cmd = (struct wmi_mlo_link_set_active_cmd *)skb->data; 11139 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_LINK_SET_ACTIVE_CMD, 11140 sizeof(*cmd)); 11141 cmd->force_mode = cpu_to_le32(arg->force_mode); 11142 cmd->reason = cpu_to_le32(arg->reason); 11143 ath12k_dbg(ab, ATH12K_DBG_WMI, 11144 "mode %d reason %d num_link_num_param %d num_vdev_bitmap %d inactive %d num_disallow_mode_comb %d", 11145 arg->force_mode, arg->reason, num_link_num_param, 11146 num_vdev_bitmap, num_inactive_vdev_bitmap, 11147 num_disallow_mode_comb); 11148 11149 buf_ptr = skb->data + sizeof(*cmd); 11150 tlv = buf_ptr; 11151 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 11152 sizeof(*link_num_param) * num_link_num_param); 11153 buf_ptr += TLV_HDR_SIZE; 11154 11155 if (num_link_num_param) { 11156 cmd->ctrl_flags = 11157 le32_encode_bits(arg->ctrl_flags.dync_force_link_num ? 1 : 0, 11158 CRTL_F_DYNC_FORCE_LINK_NUM); 11159 11160 link_num_param = buf_ptr; 11161 for (i = 0; i < num_link_num_param; i++) { 11162 link_num_param->tlv_header = 11163 ath12k_wmi_tlv_cmd_hdr(0, sizeof(*link_num_param)); 11164 link_num_param->num_of_link = 11165 cpu_to_le32(arg->link_num[i].num_of_link); 11166 link_num_param->vdev_type = 11167 cpu_to_le32(arg->link_num[i].vdev_type); 11168 link_num_param->vdev_subtype = 11169 cpu_to_le32(arg->link_num[i].vdev_subtype); 11170 link_num_param->home_freq = 11171 cpu_to_le32(arg->link_num[i].home_freq); 11172 ath12k_dbg(ab, ATH12K_DBG_WMI, 11173 "entry %d num_of_link %d vdev type %d subtype %d freq %d control_flags %d", 11174 i, arg->link_num[i].num_of_link, 11175 arg->link_num[i].vdev_type, 11176 arg->link_num[i].vdev_subtype, 11177 arg->link_num[i].home_freq, 11178 __le32_to_cpu(cmd->ctrl_flags)); 11179 link_num_param++; 11180 } 11181 11182 buf_ptr += sizeof(*link_num_param) * num_link_num_param; 11183 } 11184 11185 tlv = buf_ptr; 11186 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 11187 sizeof(*vdev_bitmap) * num_vdev_bitmap); 11188 buf_ptr += TLV_HDR_SIZE; 11189 11190 if (num_vdev_bitmap) { 11191 vdev_bitmap = buf_ptr; 11192 for (i = 0; i < num_vdev_bitmap; i++) { 11193 vdev_bitmap[i] = cpu_to_le32(arg->vdev_bitmap[i]); 11194 ath12k_dbg(ab, ATH12K_DBG_WMI, "entry %d vdev_id_bitmap 0x%x", 11195 i, arg->vdev_bitmap[i]); 11196 } 11197 11198 buf_ptr += sizeof(*vdev_bitmap) * num_vdev_bitmap; 11199 } 11200 11201 if (arg->force_mode == WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE) { 11202 tlv = buf_ptr; 11203 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 11204 sizeof(*vdev_bitmap) * 11205 num_inactive_vdev_bitmap); 11206 buf_ptr += TLV_HDR_SIZE; 11207 11208 if (num_inactive_vdev_bitmap) { 11209 vdev_bitmap = buf_ptr; 11210 for (i = 0; i < num_inactive_vdev_bitmap; i++) { 11211 vdev_bitmap[i] = 11212 cpu_to_le32(arg->inactive_vdev_bitmap[i]); 11213 ath12k_dbg(ab, ATH12K_DBG_WMI, 11214 "entry %d inactive_vdev_id_bitmap 0x%x", 11215 i, arg->inactive_vdev_bitmap[i]); 11216 } 11217 11218 buf_ptr += sizeof(*vdev_bitmap) * num_inactive_vdev_bitmap; 11219 } 11220 } else { 11221 /* add empty vdev bitmap2 tlv */ 11222 tlv = buf_ptr; 11223 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0); 11224 buf_ptr += TLV_HDR_SIZE; 11225 } 11226 11227 /* add empty ieee_link_id_bitmap tlv */ 11228 tlv = buf_ptr; 11229 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0); 11230 buf_ptr += TLV_HDR_SIZE; 11231 11232 /* add empty ieee_link_id_bitmap2 tlv */ 11233 tlv = buf_ptr; 11234 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0); 11235 buf_ptr += TLV_HDR_SIZE; 11236 11237 tlv = buf_ptr; 11238 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 11239 sizeof(*disallowed_mode_bmap) * 11240 arg->num_disallow_mode_comb); 11241 buf_ptr += TLV_HDR_SIZE; 11242 11243 ret = ath12k_wmi_fill_disallowed_bmap(ab, buf_ptr, arg); 11244 if (ret) 11245 goto free_skb; 11246 11247 ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_MLO_LINK_SET_ACTIVE_CMDID); 11248 if (ret) { 11249 ath12k_warn(ab, 11250 "failed to send WMI_MLO_LINK_SET_ACTIVE_CMDID: %d\n", ret); 11251 goto free_skb; 11252 } 11253 11254 ath12k_dbg(ab, ATH12K_DBG_WMI, "WMI mlo link set active cmd"); 11255 11256 return ret; 11257 11258 free_skb: 11259 dev_kfree_skb(skb); 11260 return ret; 11261 } 11262