1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 */ 6 #include <linux/skbuff.h> 7 #include <linux/ctype.h> 8 #include <net/mac80211.h> 9 #include <net/cfg80211.h> 10 #include <linux/completion.h> 11 #include <linux/if_ether.h> 12 #include <linux/types.h> 13 #include <linux/pci.h> 14 #include <linux/uuid.h> 15 #include <linux/time.h> 16 #include <linux/of.h> 17 #include "core.h" 18 #include "debugfs.h" 19 #include "debug.h" 20 #include "mac.h" 21 #include "hw.h" 22 #include "peer.h" 23 #include "p2p.h" 24 #include "testmode.h" 25 26 struct ath12k_wmi_svc_ready_parse { 27 bool wmi_svc_bitmap_done; 28 }; 29 30 struct wmi_tlv_fw_stats_parse { 31 const struct wmi_stats_event *ev; 32 struct ath12k_fw_stats *stats; 33 const struct wmi_per_chain_rssi_stat_params *rssi; 34 int rssi_num; 35 bool chain_rssi_done; 36 }; 37 38 struct ath12k_wmi_dma_ring_caps_parse { 39 struct ath12k_wmi_dma_ring_caps_params *dma_ring_caps; 40 u32 n_dma_ring_caps; 41 }; 42 43 struct ath12k_wmi_service_ext_arg { 44 u32 default_conc_scan_config_bits; 45 u32 default_fw_config_bits; 46 struct ath12k_wmi_ppe_threshold_arg ppet; 47 u32 he_cap_info; 48 u32 mpdu_density; 49 u32 max_bssid_rx_filters; 50 u32 num_hw_modes; 51 u32 num_phy; 52 }; 53 54 struct ath12k_wmi_svc_rdy_ext_parse { 55 struct ath12k_wmi_service_ext_arg arg; 56 const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps; 57 const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps; 58 u32 n_hw_mode_caps; 59 u32 tot_phy_id; 60 struct ath12k_wmi_hw_mode_cap_params pref_hw_mode_caps; 61 struct ath12k_wmi_mac_phy_caps_params *mac_phy_caps; 62 u32 n_mac_phy_caps; 63 const struct ath12k_wmi_soc_hal_reg_caps_params *soc_hal_reg_caps; 64 const struct ath12k_wmi_hal_reg_caps_ext_params *ext_hal_reg_caps; 65 u32 n_ext_hal_reg_caps; 66 struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse; 67 bool hw_mode_done; 68 bool mac_phy_done; 69 bool ext_hal_reg_done; 70 bool mac_phy_chainmask_combo_done; 71 bool mac_phy_chainmask_cap_done; 72 bool oem_dma_ring_cap_done; 73 bool dma_ring_cap_done; 74 }; 75 76 struct ath12k_wmi_svc_rdy_ext2_arg { 77 u32 reg_db_version; 78 u32 hw_min_max_tx_power_2ghz; 79 u32 hw_min_max_tx_power_5ghz; 80 u32 chwidth_num_peer_caps; 81 u32 preamble_puncture_bw; 82 u32 max_user_per_ppdu_ofdma; 83 u32 max_user_per_ppdu_mumimo; 84 u32 target_cap_flags; 85 u32 eht_cap_mac_info[WMI_MAX_EHTCAP_MAC_SIZE]; 86 u32 max_num_linkview_peers; 87 u32 max_num_msduq_supported_per_tid; 88 u32 default_num_msduq_supported_per_tid; 89 }; 90 91 struct ath12k_wmi_svc_rdy_ext2_parse { 92 struct ath12k_wmi_svc_rdy_ext2_arg arg; 93 struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse; 94 bool dma_ring_cap_done; 95 bool spectral_bin_scaling_done; 96 bool mac_phy_caps_ext_done; 97 bool hal_reg_caps_ext2_done; 98 bool scan_radio_caps_ext2_done; 99 bool twt_caps_done; 100 bool htt_msdu_idx_to_qtype_map_done; 101 bool dbs_or_sbs_cap_ext_done; 102 }; 103 104 struct ath12k_wmi_rdy_parse { 105 u32 num_extra_mac_addr; 106 }; 107 108 struct ath12k_wmi_dma_buf_release_arg { 109 struct ath12k_wmi_dma_buf_release_fixed_params fixed; 110 const struct ath12k_wmi_dma_buf_release_entry_params *buf_entry; 111 const struct ath12k_wmi_dma_buf_release_meta_data_params *meta_data; 112 u32 num_buf_entry; 113 u32 num_meta; 114 bool buf_entry_done; 115 bool meta_data_done; 116 }; 117 118 struct ath12k_wmi_tlv_policy { 119 size_t min_len; 120 }; 121 122 struct wmi_tlv_mgmt_rx_parse { 123 const struct ath12k_wmi_mgmt_rx_params *fixed; 124 const u8 *frame_buf; 125 bool frame_buf_done; 126 }; 127 128 static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = { 129 [WMI_TAG_ARRAY_BYTE] = { .min_len = 0 }, 130 [WMI_TAG_ARRAY_UINT32] = { .min_len = 0 }, 131 [WMI_TAG_SERVICE_READY_EVENT] = { 132 .min_len = sizeof(struct wmi_service_ready_event) }, 133 [WMI_TAG_SERVICE_READY_EXT_EVENT] = { 134 .min_len = sizeof(struct wmi_service_ready_ext_event) }, 135 [WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS] = { 136 .min_len = sizeof(struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params) }, 137 [WMI_TAG_SOC_HAL_REG_CAPABILITIES] = { 138 .min_len = sizeof(struct ath12k_wmi_soc_hal_reg_caps_params) }, 139 [WMI_TAG_VDEV_START_RESPONSE_EVENT] = { 140 .min_len = sizeof(struct wmi_vdev_start_resp_event) }, 141 [WMI_TAG_PEER_DELETE_RESP_EVENT] = { 142 .min_len = sizeof(struct wmi_peer_delete_resp_event) }, 143 [WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT] = { 144 .min_len = sizeof(struct wmi_bcn_tx_status_event) }, 145 [WMI_TAG_VDEV_STOPPED_EVENT] = { 146 .min_len = sizeof(struct wmi_vdev_stopped_event) }, 147 [WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT] = { 148 .min_len = sizeof(struct wmi_reg_chan_list_cc_ext_event) }, 149 [WMI_TAG_MGMT_RX_HDR] = { 150 .min_len = sizeof(struct ath12k_wmi_mgmt_rx_params) }, 151 [WMI_TAG_MGMT_TX_COMPL_EVENT] = { 152 .min_len = sizeof(struct wmi_mgmt_tx_compl_event) }, 153 [WMI_TAG_SCAN_EVENT] = { 154 .min_len = sizeof(struct wmi_scan_event) }, 155 [WMI_TAG_PEER_STA_KICKOUT_EVENT] = { 156 .min_len = sizeof(struct wmi_peer_sta_kickout_event) }, 157 [WMI_TAG_ROAM_EVENT] = { 158 .min_len = sizeof(struct wmi_roam_event) }, 159 [WMI_TAG_CHAN_INFO_EVENT] = { 160 .min_len = sizeof(struct wmi_chan_info_event) }, 161 [WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT] = { 162 .min_len = sizeof(struct wmi_pdev_bss_chan_info_event) }, 163 [WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT] = { 164 .min_len = sizeof(struct wmi_vdev_install_key_compl_event) }, 165 [WMI_TAG_READY_EVENT] = { 166 .min_len = sizeof(struct ath12k_wmi_ready_event_min_params) }, 167 [WMI_TAG_SERVICE_AVAILABLE_EVENT] = { 168 .min_len = sizeof(struct wmi_service_available_event) }, 169 [WMI_TAG_PEER_ASSOC_CONF_EVENT] = { 170 .min_len = sizeof(struct wmi_peer_assoc_conf_event) }, 171 [WMI_TAG_RFKILL_EVENT] = { 172 .min_len = sizeof(struct wmi_rfkill_state_change_event) }, 173 [WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT] = { 174 .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) }, 175 [WMI_TAG_HOST_SWFDA_EVENT] = { 176 .min_len = sizeof(struct wmi_fils_discovery_event) }, 177 [WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = { 178 .min_len = sizeof(struct wmi_probe_resp_tx_status_event) }, 179 [WMI_TAG_VDEV_DELETE_RESP_EVENT] = { 180 .min_len = sizeof(struct wmi_vdev_delete_resp_event) }, 181 [WMI_TAG_TWT_ENABLE_COMPLETE_EVENT] = { 182 .min_len = sizeof(struct wmi_twt_enable_event) }, 183 [WMI_TAG_TWT_DISABLE_COMPLETE_EVENT] = { 184 .min_len = sizeof(struct wmi_twt_disable_event) }, 185 [WMI_TAG_P2P_NOA_INFO] = { 186 .min_len = sizeof(struct ath12k_wmi_p2p_noa_info) }, 187 [WMI_TAG_P2P_NOA_EVENT] = { 188 .min_len = sizeof(struct wmi_p2p_noa_event) }, 189 [WMI_TAG_11D_NEW_COUNTRY_EVENT] = { 190 .min_len = sizeof(struct wmi_11d_new_cc_event) }, 191 [WMI_TAG_PER_CHAIN_RSSI_STATS] = { 192 .min_len = sizeof(struct wmi_per_chain_rssi_stat_params) }, 193 }; 194 195 __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len) 196 { 197 return le32_encode_bits(cmd, WMI_TLV_TAG) | 198 le32_encode_bits(len, WMI_TLV_LEN); 199 } 200 201 static __le32 ath12k_wmi_tlv_cmd_hdr(u32 cmd, u32 len) 202 { 203 return ath12k_wmi_tlv_hdr(cmd, len - TLV_HDR_SIZE); 204 } 205 206 void ath12k_wmi_init_qcn9274(struct ath12k_base *ab, 207 struct ath12k_wmi_resource_config_arg *config) 208 { 209 config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS(ab); 210 config->num_peers = ab->num_radios * 211 ath12k_core_get_max_peers_per_radio(ab); 212 config->num_offload_peers = TARGET_NUM_OFFLD_PEERS; 213 config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS; 214 config->num_peer_keys = TARGET_NUM_PEER_KEYS; 215 config->ast_skid_limit = TARGET_AST_SKID_LIMIT; 216 config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1; 217 config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1; 218 config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI; 219 config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI; 220 config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI; 221 config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI; 222 223 if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) 224 config->rx_decap_mode = TARGET_DECAP_MODE_RAW; 225 else 226 config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI; 227 228 config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS; 229 config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV; 230 config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV; 231 config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES; 232 config->num_mcast_groups = TARGET_NUM_MCAST_GROUPS; 233 config->num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS; 234 config->mcast2ucast_mode = TARGET_MCAST2UCAST_MODE; 235 config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE; 236 config->num_wds_entries = TARGET_NUM_WDS_ENTRIES; 237 config->dma_burst_size = TARGET_DMA_BURST_SIZE; 238 config->rx_skip_defrag_timeout_dup_detection_check = 239 TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK; 240 config->vow_config = TARGET_VOW_CONFIG; 241 config->gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV; 242 config->num_msdu_desc = TARGET_NUM_MSDU_DESC; 243 config->beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD; 244 config->rx_batchmode = TARGET_RX_BATCHMODE; 245 /* Indicates host supports peer map v3 and unmap v2 support */ 246 config->peer_map_unmap_version = 0x32; 247 config->twt_ap_pdev_count = ab->num_radios; 248 config->twt_ap_sta_count = 1000; 249 config->ema_max_vap_cnt = ab->num_radios; 250 config->ema_max_profile_period = TARGET_EMA_MAX_PROFILE_PERIOD; 251 config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt; 252 253 if (test_bit(WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT, ab->wmi_ab.svc_map)) 254 config->peer_metadata_ver = ATH12K_PEER_METADATA_V1B; 255 } 256 257 void ath12k_wmi_init_wcn7850(struct ath12k_base *ab, 258 struct ath12k_wmi_resource_config_arg *config) 259 { 260 config->num_vdevs = 4; 261 config->num_peers = 16; 262 config->num_tids = 32; 263 264 config->num_offload_peers = 3; 265 config->num_offload_reorder_buffs = 3; 266 config->num_peer_keys = TARGET_NUM_PEER_KEYS; 267 config->ast_skid_limit = TARGET_AST_SKID_LIMIT; 268 config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1; 269 config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1; 270 config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI; 271 config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI; 272 config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI; 273 config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI; 274 config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI; 275 config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS; 276 config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV; 277 config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV; 278 config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES; 279 config->num_mcast_groups = 0; 280 config->num_mcast_table_elems = 0; 281 config->mcast2ucast_mode = 0; 282 config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE; 283 config->num_wds_entries = 0; 284 config->dma_burst_size = 0; 285 config->rx_skip_defrag_timeout_dup_detection_check = 0; 286 config->vow_config = TARGET_VOW_CONFIG; 287 config->gtk_offload_max_vdev = 2; 288 config->num_msdu_desc = 0x400; 289 config->beacon_tx_offload_max_vdev = 2; 290 config->rx_batchmode = TARGET_RX_BATCHMODE; 291 292 config->peer_map_unmap_version = 0x1; 293 config->use_pdev_id = 1; 294 config->max_frag_entries = 0xa; 295 config->num_tdls_vdevs = 0x1; 296 config->num_tdls_conn_table_entries = 8; 297 config->beacon_tx_offload_max_vdev = 0x2; 298 config->num_multicast_filter_entries = 0x20; 299 config->num_wow_filters = 0x16; 300 config->num_keep_alive_pattern = 0; 301 } 302 303 #define PRIMAP(_hw_mode_) \ 304 [_hw_mode_] = _hw_mode_##_PRI 305 306 static const int ath12k_hw_mode_pri_map[] = { 307 PRIMAP(WMI_HOST_HW_MODE_SINGLE), 308 PRIMAP(WMI_HOST_HW_MODE_DBS), 309 PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE), 310 PRIMAP(WMI_HOST_HW_MODE_SBS), 311 PRIMAP(WMI_HOST_HW_MODE_DBS_SBS), 312 PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS), 313 /* keep last */ 314 PRIMAP(WMI_HOST_HW_MODE_MAX), 315 }; 316 317 static int 318 ath12k_wmi_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len, 319 int (*iter)(struct ath12k_base *ab, u16 tag, u16 len, 320 const void *ptr, void *data), 321 void *data) 322 { 323 const void *begin = ptr; 324 const struct wmi_tlv *tlv; 325 u16 tlv_tag, tlv_len; 326 int ret; 327 328 while (len > 0) { 329 if (len < sizeof(*tlv)) { 330 ath12k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", 331 ptr - begin, len, sizeof(*tlv)); 332 return -EINVAL; 333 } 334 335 tlv = ptr; 336 tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG); 337 tlv_len = le32_get_bits(tlv->header, WMI_TLV_LEN); 338 ptr += sizeof(*tlv); 339 len -= sizeof(*tlv); 340 341 if (tlv_len > len) { 342 ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n", 343 tlv_tag, ptr - begin, len, tlv_len); 344 return -EINVAL; 345 } 346 347 if (tlv_tag < ARRAY_SIZE(ath12k_wmi_tlv_policies) && 348 ath12k_wmi_tlv_policies[tlv_tag].min_len && 349 ath12k_wmi_tlv_policies[tlv_tag].min_len > tlv_len) { 350 ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n", 351 tlv_tag, ptr - begin, tlv_len, 352 ath12k_wmi_tlv_policies[tlv_tag].min_len); 353 return -EINVAL; 354 } 355 356 ret = iter(ab, tlv_tag, tlv_len, ptr, data); 357 if (ret) 358 return ret; 359 360 ptr += tlv_len; 361 len -= tlv_len; 362 } 363 364 return 0; 365 } 366 367 static int ath12k_wmi_tlv_iter_parse(struct ath12k_base *ab, u16 tag, u16 len, 368 const void *ptr, void *data) 369 { 370 const void **tb = data; 371 372 if (tag < WMI_TAG_MAX) 373 tb[tag] = ptr; 374 375 return 0; 376 } 377 378 static int ath12k_wmi_tlv_parse(struct ath12k_base *ar, const void **tb, 379 const void *ptr, size_t len) 380 { 381 return ath12k_wmi_tlv_iter(ar, ptr, len, ath12k_wmi_tlv_iter_parse, 382 (void *)tb); 383 } 384 385 static const void ** 386 ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab, 387 struct sk_buff *skb, gfp_t gfp) 388 { 389 const void **tb; 390 int ret; 391 392 tb = kcalloc(WMI_TAG_MAX, sizeof(*tb), gfp); 393 if (!tb) 394 return ERR_PTR(-ENOMEM); 395 396 ret = ath12k_wmi_tlv_parse(ab, tb, skb->data, skb->len); 397 if (ret) { 398 kfree(tb); 399 return ERR_PTR(ret); 400 } 401 402 return tb; 403 } 404 405 static int ath12k_wmi_cmd_send_nowait(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb, 406 u32 cmd_id) 407 { 408 struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb); 409 struct ath12k_base *ab = wmi->wmi_ab->ab; 410 struct wmi_cmd_hdr *cmd_hdr; 411 int ret; 412 413 if (!skb_push(skb, sizeof(struct wmi_cmd_hdr))) 414 return -ENOMEM; 415 416 cmd_hdr = (struct wmi_cmd_hdr *)skb->data; 417 cmd_hdr->cmd_id = le32_encode_bits(cmd_id, WMI_CMD_HDR_CMD_ID); 418 419 memset(skb_cb, 0, sizeof(*skb_cb)); 420 ret = ath12k_htc_send(&ab->htc, wmi->eid, skb); 421 422 if (ret) 423 goto err_pull; 424 425 return 0; 426 427 err_pull: 428 skb_pull(skb, sizeof(struct wmi_cmd_hdr)); 429 return ret; 430 } 431 432 int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb, 433 u32 cmd_id) 434 { 435 struct ath12k_wmi_base *wmi_ab = wmi->wmi_ab; 436 int ret = -EOPNOTSUPP; 437 438 might_sleep(); 439 440 wait_event_timeout(wmi_ab->tx_credits_wq, ({ 441 ret = ath12k_wmi_cmd_send_nowait(wmi, skb, cmd_id); 442 443 if (ret && test_bit(ATH12K_FLAG_CRASH_FLUSH, &wmi_ab->ab->dev_flags)) 444 ret = -ESHUTDOWN; 445 446 (ret != -EAGAIN); 447 }), WMI_SEND_TIMEOUT_HZ); 448 449 if (ret == -EAGAIN) 450 ath12k_warn(wmi_ab->ab, "wmi command %d timeout\n", cmd_id); 451 452 return ret; 453 } 454 455 static int ath12k_pull_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle, 456 const void *ptr, 457 struct ath12k_wmi_service_ext_arg *arg) 458 { 459 const struct wmi_service_ready_ext_event *ev = ptr; 460 int i; 461 462 if (!ev) 463 return -EINVAL; 464 465 /* Move this to host based bitmap */ 466 arg->default_conc_scan_config_bits = 467 le32_to_cpu(ev->default_conc_scan_config_bits); 468 arg->default_fw_config_bits = le32_to_cpu(ev->default_fw_config_bits); 469 arg->he_cap_info = le32_to_cpu(ev->he_cap_info); 470 arg->mpdu_density = le32_to_cpu(ev->mpdu_density); 471 arg->max_bssid_rx_filters = le32_to_cpu(ev->max_bssid_rx_filters); 472 arg->ppet.numss_m1 = le32_to_cpu(ev->ppet.numss_m1); 473 arg->ppet.ru_bit_mask = le32_to_cpu(ev->ppet.ru_info); 474 475 for (i = 0; i < WMI_MAX_NUM_SS; i++) 476 arg->ppet.ppet16_ppet8_ru3_ru0[i] = 477 le32_to_cpu(ev->ppet.ppet16_ppet8_ru3_ru0[i]); 478 479 return 0; 480 } 481 482 static int 483 ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle, 484 struct ath12k_wmi_svc_rdy_ext_parse *svc, 485 u8 hw_mode_id, u8 phy_id, 486 struct ath12k_pdev *pdev) 487 { 488 const struct ath12k_wmi_mac_phy_caps_params *mac_caps; 489 const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps = svc->hw_caps; 490 const struct ath12k_wmi_hw_mode_cap_params *wmi_hw_mode_caps = svc->hw_mode_caps; 491 const struct ath12k_wmi_mac_phy_caps_params *wmi_mac_phy_caps = svc->mac_phy_caps; 492 struct ath12k_base *ab = wmi_handle->wmi_ab->ab; 493 struct ath12k_band_cap *cap_band; 494 struct ath12k_pdev_cap *pdev_cap = &pdev->cap; 495 struct ath12k_fw_pdev *fw_pdev; 496 u32 phy_map; 497 u32 hw_idx, phy_idx = 0; 498 int i; 499 500 if (!hw_caps || !wmi_hw_mode_caps || !svc->soc_hal_reg_caps) 501 return -EINVAL; 502 503 for (hw_idx = 0; hw_idx < le32_to_cpu(hw_caps->num_hw_modes); hw_idx++) { 504 if (hw_mode_id == le32_to_cpu(wmi_hw_mode_caps[hw_idx].hw_mode_id)) 505 break; 506 507 phy_map = le32_to_cpu(wmi_hw_mode_caps[hw_idx].phy_id_map); 508 phy_idx = fls(phy_map); 509 } 510 511 if (hw_idx == le32_to_cpu(hw_caps->num_hw_modes)) 512 return -EINVAL; 513 514 phy_idx += phy_id; 515 if (phy_id >= le32_to_cpu(svc->soc_hal_reg_caps->num_phy)) 516 return -EINVAL; 517 518 mac_caps = wmi_mac_phy_caps + phy_idx; 519 520 pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps); 521 pdev->hw_link_id = ath12k_wmi_mac_phy_get_hw_link_id(mac_caps); 522 pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands); 523 pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density); 524 525 fw_pdev = &ab->fw_pdev[ab->fw_pdev_count]; 526 fw_pdev->supported_bands = le32_to_cpu(mac_caps->supported_bands); 527 fw_pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps); 528 fw_pdev->phy_id = le32_to_cpu(mac_caps->phy_id); 529 ab->fw_pdev_count++; 530 531 /* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from 532 * band to band for a single radio, need to see how this should be 533 * handled. 534 */ 535 if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2GHZ_CAP) { 536 pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_2g); 537 pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_2g); 538 } else if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5GHZ_CAP) { 539 pdev_cap->vht_cap = le32_to_cpu(mac_caps->vht_cap_info_5g); 540 pdev_cap->vht_mcs = le32_to_cpu(mac_caps->vht_supp_mcs_5g); 541 pdev_cap->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g); 542 pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_5g); 543 pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_5g); 544 pdev_cap->nss_ratio_enabled = 545 WMI_NSS_RATIO_EN_DIS_GET(mac_caps->nss_ratio); 546 pdev_cap->nss_ratio_info = 547 WMI_NSS_RATIO_INFO_GET(mac_caps->nss_ratio); 548 } else { 549 return -EINVAL; 550 } 551 552 /* tx/rx chainmask reported from fw depends on the actual hw chains used, 553 * For example, for 4x4 capable macphys, first 4 chains can be used for first 554 * mac and the remaining 4 chains can be used for the second mac or vice-versa. 555 * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0 556 * will be advertised for second mac or vice-versa. Compute the shift value 557 * for tx/rx chainmask which will be used to advertise supported ht/vht rates to 558 * mac80211. 559 */ 560 pdev_cap->tx_chain_mask_shift = 561 find_first_bit((unsigned long *)&pdev_cap->tx_chain_mask, 32); 562 pdev_cap->rx_chain_mask_shift = 563 find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32); 564 565 if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2GHZ_CAP) { 566 cap_band = &pdev_cap->band[NL80211_BAND_2GHZ]; 567 cap_band->phy_id = le32_to_cpu(mac_caps->phy_id); 568 cap_band->max_bw_supported = le32_to_cpu(mac_caps->max_bw_supported_2g); 569 cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_2g); 570 cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_2g); 571 cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_2g_ext); 572 cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_2g); 573 for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++) 574 cap_band->he_cap_phy_info[i] = 575 le32_to_cpu(mac_caps->he_cap_phy_info_2g[i]); 576 577 cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet2g.numss_m1); 578 cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet2g.ru_info); 579 580 for (i = 0; i < WMI_MAX_NUM_SS; i++) 581 cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] = 582 le32_to_cpu(mac_caps->he_ppet2g.ppet16_ppet8_ru3_ru0[i]); 583 } 584 585 if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5GHZ_CAP) { 586 cap_band = &pdev_cap->band[NL80211_BAND_5GHZ]; 587 cap_band->phy_id = le32_to_cpu(mac_caps->phy_id); 588 cap_band->max_bw_supported = 589 le32_to_cpu(mac_caps->max_bw_supported_5g); 590 cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g); 591 cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g); 592 cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext); 593 cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g); 594 for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++) 595 cap_band->he_cap_phy_info[i] = 596 le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]); 597 598 cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1); 599 cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info); 600 601 for (i = 0; i < WMI_MAX_NUM_SS; i++) 602 cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] = 603 le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]); 604 605 cap_band = &pdev_cap->band[NL80211_BAND_6GHZ]; 606 cap_band->max_bw_supported = 607 le32_to_cpu(mac_caps->max_bw_supported_5g); 608 cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g); 609 cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g); 610 cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext); 611 cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g); 612 for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++) 613 cap_band->he_cap_phy_info[i] = 614 le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]); 615 616 cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1); 617 cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info); 618 619 for (i = 0; i < WMI_MAX_NUM_SS; i++) 620 cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] = 621 le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]); 622 } 623 624 return 0; 625 } 626 627 static int 628 ath12k_pull_reg_cap_svc_rdy_ext(struct ath12k_wmi_pdev *wmi_handle, 629 const struct ath12k_wmi_soc_hal_reg_caps_params *reg_caps, 630 const struct ath12k_wmi_hal_reg_caps_ext_params *ext_caps, 631 u8 phy_idx, 632 struct ath12k_wmi_hal_reg_capabilities_ext_arg *param) 633 { 634 const struct ath12k_wmi_hal_reg_caps_ext_params *ext_reg_cap; 635 636 if (!reg_caps || !ext_caps) 637 return -EINVAL; 638 639 if (phy_idx >= le32_to_cpu(reg_caps->num_phy)) 640 return -EINVAL; 641 642 ext_reg_cap = &ext_caps[phy_idx]; 643 644 param->phy_id = le32_to_cpu(ext_reg_cap->phy_id); 645 param->eeprom_reg_domain = le32_to_cpu(ext_reg_cap->eeprom_reg_domain); 646 param->eeprom_reg_domain_ext = 647 le32_to_cpu(ext_reg_cap->eeprom_reg_domain_ext); 648 param->regcap1 = le32_to_cpu(ext_reg_cap->regcap1); 649 param->regcap2 = le32_to_cpu(ext_reg_cap->regcap2); 650 /* check if param->wireless_mode is needed */ 651 param->low_2ghz_chan = le32_to_cpu(ext_reg_cap->low_2ghz_chan); 652 param->high_2ghz_chan = le32_to_cpu(ext_reg_cap->high_2ghz_chan); 653 param->low_5ghz_chan = le32_to_cpu(ext_reg_cap->low_5ghz_chan); 654 param->high_5ghz_chan = le32_to_cpu(ext_reg_cap->high_5ghz_chan); 655 656 return 0; 657 } 658 659 static int ath12k_pull_service_ready_tlv(struct ath12k_base *ab, 660 const void *evt_buf, 661 struct ath12k_wmi_target_cap_arg *cap) 662 { 663 const struct wmi_service_ready_event *ev = evt_buf; 664 665 if (!ev) { 666 ath12k_err(ab, "%s: failed by NULL param\n", 667 __func__); 668 return -EINVAL; 669 } 670 671 cap->phy_capability = le32_to_cpu(ev->phy_capability); 672 cap->max_frag_entry = le32_to_cpu(ev->max_frag_entry); 673 cap->num_rf_chains = le32_to_cpu(ev->num_rf_chains); 674 cap->ht_cap_info = le32_to_cpu(ev->ht_cap_info); 675 cap->vht_cap_info = le32_to_cpu(ev->vht_cap_info); 676 cap->vht_supp_mcs = le32_to_cpu(ev->vht_supp_mcs); 677 cap->hw_min_tx_power = le32_to_cpu(ev->hw_min_tx_power); 678 cap->hw_max_tx_power = le32_to_cpu(ev->hw_max_tx_power); 679 cap->sys_cap_info = le32_to_cpu(ev->sys_cap_info); 680 cap->min_pkt_size_enable = le32_to_cpu(ev->min_pkt_size_enable); 681 cap->max_bcn_ie_size = le32_to_cpu(ev->max_bcn_ie_size); 682 cap->max_num_scan_channels = le32_to_cpu(ev->max_num_scan_channels); 683 cap->max_supported_macs = le32_to_cpu(ev->max_supported_macs); 684 cap->wmi_fw_sub_feat_caps = le32_to_cpu(ev->wmi_fw_sub_feat_caps); 685 cap->txrx_chainmask = le32_to_cpu(ev->txrx_chainmask); 686 cap->default_dbs_hw_mode_index = le32_to_cpu(ev->default_dbs_hw_mode_index); 687 cap->num_msdu_desc = le32_to_cpu(ev->num_msdu_desc); 688 689 return 0; 690 } 691 692 /* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in 693 * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each 694 * 4-byte word. 695 */ 696 static void ath12k_wmi_service_bitmap_copy(struct ath12k_wmi_pdev *wmi, 697 const u32 *wmi_svc_bm) 698 { 699 int i, j; 700 701 for (i = 0, j = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) { 702 do { 703 if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32)) 704 set_bit(j, wmi->wmi_ab->svc_map); 705 } while (++j % WMI_SERVICE_BITS_IN_SIZE32); 706 } 707 } 708 709 static int ath12k_wmi_svc_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len, 710 const void *ptr, void *data) 711 { 712 struct ath12k_wmi_svc_ready_parse *svc_ready = data; 713 struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0]; 714 u16 expect_len; 715 716 switch (tag) { 717 case WMI_TAG_SERVICE_READY_EVENT: 718 if (ath12k_pull_service_ready_tlv(ab, ptr, &ab->target_caps)) 719 return -EINVAL; 720 break; 721 722 case WMI_TAG_ARRAY_UINT32: 723 if (!svc_ready->wmi_svc_bitmap_done) { 724 expect_len = WMI_SERVICE_BM_SIZE * sizeof(u32); 725 if (len < expect_len) { 726 ath12k_warn(ab, "invalid len %d for the tag 0x%x\n", 727 len, tag); 728 return -EINVAL; 729 } 730 731 ath12k_wmi_service_bitmap_copy(wmi_handle, ptr); 732 733 svc_ready->wmi_svc_bitmap_done = true; 734 } 735 break; 736 default: 737 break; 738 } 739 740 return 0; 741 } 742 743 static int ath12k_service_ready_event(struct ath12k_base *ab, struct sk_buff *skb) 744 { 745 struct ath12k_wmi_svc_ready_parse svc_ready = { }; 746 int ret; 747 748 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 749 ath12k_wmi_svc_rdy_parse, 750 &svc_ready); 751 if (ret) { 752 ath12k_warn(ab, "failed to parse tlv %d\n", ret); 753 return ret; 754 } 755 756 return 0; 757 } 758 759 static u32 ath12k_wmi_mgmt_get_freq(struct ath12k *ar, 760 struct ieee80211_tx_info *info) 761 { 762 struct ath12k_base *ab = ar->ab; 763 u32 freq = 0; 764 765 if (ab->hw_params->single_pdev_only && 766 ar->scan.is_roc && 767 (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) 768 freq = ar->scan.roc_freq; 769 770 return freq; 771 } 772 773 struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_ab, u32 len) 774 { 775 struct sk_buff *skb; 776 struct ath12k_base *ab = wmi_ab->ab; 777 u32 round_len = roundup(len, 4); 778 779 skb = ath12k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len); 780 if (!skb) 781 return NULL; 782 783 skb_reserve(skb, WMI_SKB_HEADROOM); 784 if (!IS_ALIGNED((unsigned long)skb->data, 4)) 785 ath12k_warn(ab, "unaligned WMI skb data\n"); 786 787 skb_put(skb, round_len); 788 memset(skb->data, 0, round_len); 789 790 return skb; 791 } 792 793 int ath12k_wmi_mgmt_send(struct ath12k_link_vif *arvif, u32 buf_id, 794 struct sk_buff *frame) 795 { 796 struct ath12k *ar = arvif->ar; 797 struct ath12k_wmi_pdev *wmi = ar->wmi; 798 struct wmi_mgmt_send_cmd *cmd; 799 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame); 800 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)frame->data; 801 struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); 802 int cmd_len = sizeof(struct ath12k_wmi_mgmt_send_tx_params); 803 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)hdr; 804 struct ath12k_wmi_mlo_mgmt_send_params *ml_params; 805 struct ath12k_base *ab = ar->ab; 806 struct wmi_tlv *frame_tlv, *tlv; 807 struct ath12k_skb_cb *skb_cb; 808 u32 buf_len, buf_len_aligned; 809 u32 vdev_id = arvif->vdev_id; 810 bool link_agnostic = false; 811 struct sk_buff *skb; 812 int ret, len; 813 void *ptr; 814 815 buf_len = min_t(int, frame->len, WMI_MGMT_SEND_DOWNLD_LEN); 816 817 buf_len_aligned = roundup(buf_len, sizeof(u32)); 818 819 len = sizeof(*cmd) + sizeof(*frame_tlv) + buf_len_aligned; 820 821 if (ieee80211_vif_is_mld(vif)) { 822 skb_cb = ATH12K_SKB_CB(frame); 823 if ((skb_cb->flags & ATH12K_SKB_MLO_STA) && 824 ab->hw_params->hw_ops->is_frame_link_agnostic && 825 ab->hw_params->hw_ops->is_frame_link_agnostic(arvif, mgmt)) { 826 len += cmd_len + TLV_HDR_SIZE + sizeof(*ml_params); 827 ath12k_generic_dbg(ATH12K_DBG_MGMT, 828 "Sending Mgmt Frame fc 0x%0x as link agnostic", 829 mgmt->frame_control); 830 link_agnostic = true; 831 } 832 } 833 834 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 835 if (!skb) 836 return -ENOMEM; 837 838 cmd = (struct wmi_mgmt_send_cmd *)skb->data; 839 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MGMT_TX_SEND_CMD, 840 sizeof(*cmd)); 841 cmd->vdev_id = cpu_to_le32(vdev_id); 842 cmd->desc_id = cpu_to_le32(buf_id); 843 cmd->chanfreq = cpu_to_le32(ath12k_wmi_mgmt_get_freq(ar, info)); 844 cmd->paddr_lo = cpu_to_le32(lower_32_bits(ATH12K_SKB_CB(frame)->paddr)); 845 cmd->paddr_hi = cpu_to_le32(upper_32_bits(ATH12K_SKB_CB(frame)->paddr)); 846 cmd->frame_len = cpu_to_le32(frame->len); 847 cmd->buf_len = cpu_to_le32(buf_len); 848 cmd->tx_params_valid = 0; 849 850 frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd)); 851 frame_tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, buf_len_aligned); 852 853 memcpy(frame_tlv->value, frame->data, buf_len); 854 855 if (!link_agnostic) 856 goto send; 857 858 ptr = skb->data + sizeof(*cmd) + sizeof(*frame_tlv) + buf_len_aligned; 859 860 tlv = ptr; 861 862 /* Tx params not used currently */ 863 tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TX_SEND_PARAMS, cmd_len); 864 ptr += cmd_len; 865 866 tlv = ptr; 867 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, sizeof(*ml_params)); 868 ptr += TLV_HDR_SIZE; 869 870 ml_params = ptr; 871 ml_params->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_TX_SEND_PARAMS, 872 sizeof(*ml_params)); 873 874 ml_params->hw_link_id = cpu_to_le32(WMI_MGMT_LINK_AGNOSTIC_ID); 875 876 send: 877 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID); 878 if (ret) { 879 ath12k_warn(ar->ab, 880 "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n"); 881 dev_kfree_skb(skb); 882 } 883 884 return ret; 885 } 886 887 int ath12k_wmi_send_stats_request_cmd(struct ath12k *ar, u32 stats_id, 888 u32 vdev_id, u32 pdev_id) 889 { 890 struct ath12k_wmi_pdev *wmi = ar->wmi; 891 struct wmi_request_stats_cmd *cmd; 892 struct sk_buff *skb; 893 int ret; 894 895 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 896 if (!skb) 897 return -ENOMEM; 898 899 cmd = (struct wmi_request_stats_cmd *)skb->data; 900 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REQUEST_STATS_CMD, 901 sizeof(*cmd)); 902 903 cmd->stats_id = cpu_to_le32(stats_id); 904 cmd->vdev_id = cpu_to_le32(vdev_id); 905 cmd->pdev_id = cpu_to_le32(pdev_id); 906 907 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_REQUEST_STATS_CMDID); 908 if (ret) { 909 ath12k_warn(ar->ab, "failed to send WMI_REQUEST_STATS cmd\n"); 910 dev_kfree_skb(skb); 911 } 912 913 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 914 "WMI request stats 0x%x vdev id %d pdev id %d\n", 915 stats_id, vdev_id, pdev_id); 916 917 return ret; 918 } 919 920 int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr, 921 struct ath12k_wmi_vdev_create_arg *args) 922 { 923 struct ath12k_wmi_pdev *wmi = ar->wmi; 924 struct wmi_vdev_create_cmd *cmd; 925 struct sk_buff *skb; 926 struct ath12k_wmi_vdev_txrx_streams_params *txrx_streams; 927 bool is_ml_vdev = is_valid_ether_addr(args->mld_addr); 928 struct wmi_vdev_create_mlo_params *ml_params; 929 struct wmi_tlv *tlv; 930 int ret, len; 931 void *ptr; 932 933 /* It can be optimized my sending tx/rx chain configuration 934 * only for supported bands instead of always sending it for 935 * both the bands. 936 */ 937 len = sizeof(*cmd) + TLV_HDR_SIZE + 938 (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams)) + 939 (is_ml_vdev ? TLV_HDR_SIZE + sizeof(*ml_params) : 0); 940 941 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 942 if (!skb) 943 return -ENOMEM; 944 945 cmd = (struct wmi_vdev_create_cmd *)skb->data; 946 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CREATE_CMD, 947 sizeof(*cmd)); 948 949 cmd->vdev_id = cpu_to_le32(args->if_id); 950 cmd->vdev_type = cpu_to_le32(args->type); 951 cmd->vdev_subtype = cpu_to_le32(args->subtype); 952 cmd->num_cfg_txrx_streams = cpu_to_le32(WMI_NUM_SUPPORTED_BAND_MAX); 953 cmd->pdev_id = cpu_to_le32(args->pdev_id); 954 cmd->mbssid_flags = cpu_to_le32(args->mbssid_flags); 955 cmd->mbssid_tx_vdev_id = cpu_to_le32(args->mbssid_tx_vdev_id); 956 cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id); 957 ether_addr_copy(cmd->vdev_macaddr.addr, macaddr); 958 959 if (args->if_stats_id != ATH12K_INVAL_VDEV_STATS_ID) 960 cmd->vdev_stats_id_valid = cpu_to_le32(BIT(0)); 961 962 ptr = skb->data + sizeof(*cmd); 963 len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams); 964 965 tlv = ptr; 966 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); 967 968 ptr += TLV_HDR_SIZE; 969 txrx_streams = ptr; 970 len = sizeof(*txrx_streams); 971 txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS, 972 len); 973 txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_2G); 974 txrx_streams->supported_tx_streams = 975 cpu_to_le32(args->chains[NL80211_BAND_2GHZ].tx); 976 txrx_streams->supported_rx_streams = 977 cpu_to_le32(args->chains[NL80211_BAND_2GHZ].rx); 978 979 txrx_streams++; 980 txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS, 981 len); 982 txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_5G); 983 txrx_streams->supported_tx_streams = 984 cpu_to_le32(args->chains[NL80211_BAND_5GHZ].tx); 985 txrx_streams->supported_rx_streams = 986 cpu_to_le32(args->chains[NL80211_BAND_5GHZ].rx); 987 988 ptr += WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams); 989 990 if (is_ml_vdev) { 991 tlv = ptr; 992 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 993 sizeof(*ml_params)); 994 ptr += TLV_HDR_SIZE; 995 ml_params = ptr; 996 997 ml_params->tlv_header = 998 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_CREATE_PARAMS, 999 sizeof(*ml_params)); 1000 ether_addr_copy(ml_params->mld_macaddr.addr, args->mld_addr); 1001 } 1002 1003 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1004 "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n", 1005 args->if_id, args->type, args->subtype, 1006 macaddr, args->pdev_id); 1007 1008 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_CREATE_CMDID); 1009 if (ret) { 1010 ath12k_warn(ar->ab, 1011 "failed to submit WMI_VDEV_CREATE_CMDID\n"); 1012 dev_kfree_skb(skb); 1013 } 1014 1015 return ret; 1016 } 1017 1018 int ath12k_wmi_vdev_delete(struct ath12k *ar, u8 vdev_id) 1019 { 1020 struct ath12k_wmi_pdev *wmi = ar->wmi; 1021 struct wmi_vdev_delete_cmd *cmd; 1022 struct sk_buff *skb; 1023 int ret; 1024 1025 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1026 if (!skb) 1027 return -ENOMEM; 1028 1029 cmd = (struct wmi_vdev_delete_cmd *)skb->data; 1030 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DELETE_CMD, 1031 sizeof(*cmd)); 1032 cmd->vdev_id = cpu_to_le32(vdev_id); 1033 1034 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev delete id %d\n", vdev_id); 1035 1036 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DELETE_CMDID); 1037 if (ret) { 1038 ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DELETE_CMDID\n"); 1039 dev_kfree_skb(skb); 1040 } 1041 1042 return ret; 1043 } 1044 1045 int ath12k_wmi_vdev_stop(struct ath12k *ar, u8 vdev_id) 1046 { 1047 struct ath12k_wmi_pdev *wmi = ar->wmi; 1048 struct wmi_vdev_stop_cmd *cmd; 1049 struct sk_buff *skb; 1050 int ret; 1051 1052 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1053 if (!skb) 1054 return -ENOMEM; 1055 1056 cmd = (struct wmi_vdev_stop_cmd *)skb->data; 1057 1058 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_STOP_CMD, 1059 sizeof(*cmd)); 1060 cmd->vdev_id = cpu_to_le32(vdev_id); 1061 1062 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev stop id 0x%x\n", vdev_id); 1063 1064 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_STOP_CMDID); 1065 if (ret) { 1066 ath12k_warn(ar->ab, "failed to submit WMI_VDEV_STOP cmd\n"); 1067 dev_kfree_skb(skb); 1068 } 1069 1070 return ret; 1071 } 1072 1073 int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id) 1074 { 1075 struct ath12k_wmi_pdev *wmi = ar->wmi; 1076 struct wmi_vdev_down_cmd *cmd; 1077 struct sk_buff *skb; 1078 int ret; 1079 1080 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1081 if (!skb) 1082 return -ENOMEM; 1083 1084 cmd = (struct wmi_vdev_down_cmd *)skb->data; 1085 1086 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DOWN_CMD, 1087 sizeof(*cmd)); 1088 cmd->vdev_id = cpu_to_le32(vdev_id); 1089 1090 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev down id 0x%x\n", vdev_id); 1091 1092 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DOWN_CMDID); 1093 if (ret) { 1094 ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd\n"); 1095 dev_kfree_skb(skb); 1096 } 1097 1098 return ret; 1099 } 1100 1101 static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan, 1102 struct wmi_vdev_start_req_arg *arg) 1103 { 1104 u32 center_freq1 = arg->band_center_freq1; 1105 1106 memset(chan, 0, sizeof(*chan)); 1107 1108 chan->mhz = cpu_to_le32(arg->freq); 1109 chan->band_center_freq1 = cpu_to_le32(center_freq1); 1110 if (arg->mode == MODE_11BE_EHT320) { 1111 if (arg->freq > center_freq1) 1112 chan->band_center_freq1 = cpu_to_le32(center_freq1 + 80); 1113 else 1114 chan->band_center_freq1 = cpu_to_le32(center_freq1 - 80); 1115 1116 chan->band_center_freq2 = cpu_to_le32(center_freq1); 1117 1118 } else if (arg->mode == MODE_11BE_EHT160 || 1119 arg->mode == MODE_11AX_HE160) { 1120 if (arg->freq > center_freq1) 1121 chan->band_center_freq1 = cpu_to_le32(center_freq1 + 40); 1122 else 1123 chan->band_center_freq1 = cpu_to_le32(center_freq1 - 40); 1124 1125 chan->band_center_freq2 = cpu_to_le32(center_freq1); 1126 } else { 1127 chan->band_center_freq2 = 0; 1128 } 1129 1130 chan->info |= le32_encode_bits(arg->mode, WMI_CHAN_INFO_MODE); 1131 if (arg->passive) 1132 chan->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE); 1133 if (arg->allow_ibss) 1134 chan->info |= cpu_to_le32(WMI_CHAN_INFO_ADHOC_ALLOWED); 1135 if (arg->allow_ht) 1136 chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT); 1137 if (arg->allow_vht) 1138 chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT); 1139 if (arg->allow_he) 1140 chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE); 1141 if (arg->ht40plus) 1142 chan->info |= cpu_to_le32(WMI_CHAN_INFO_HT40_PLUS); 1143 if (arg->chan_radar) 1144 chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS); 1145 if (arg->freq2_radar) 1146 chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS_FREQ2); 1147 1148 chan->reg_info_1 = le32_encode_bits(arg->max_power, 1149 WMI_CHAN_REG_INFO1_MAX_PWR) | 1150 le32_encode_bits(arg->max_reg_power, 1151 WMI_CHAN_REG_INFO1_MAX_REG_PWR); 1152 1153 chan->reg_info_2 = le32_encode_bits(arg->max_antenna_gain, 1154 WMI_CHAN_REG_INFO2_ANT_MAX) | 1155 le32_encode_bits(arg->max_power, WMI_CHAN_REG_INFO2_MAX_TX_PWR); 1156 } 1157 1158 int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg, 1159 bool restart) 1160 { 1161 struct wmi_vdev_start_mlo_params *ml_params; 1162 struct wmi_partner_link_info *partner_info; 1163 struct ath12k_wmi_pdev *wmi = ar->wmi; 1164 struct wmi_vdev_start_request_cmd *cmd; 1165 struct sk_buff *skb; 1166 struct ath12k_wmi_channel_params *chan; 1167 struct wmi_tlv *tlv; 1168 void *ptr; 1169 int ret, len, i, ml_arg_size = 0; 1170 1171 if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid))) 1172 return -EINVAL; 1173 1174 len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE; 1175 1176 if (!restart && arg->ml.enabled) { 1177 ml_arg_size = TLV_HDR_SIZE + sizeof(*ml_params) + 1178 TLV_HDR_SIZE + (arg->ml.num_partner_links * 1179 sizeof(*partner_info)); 1180 len += ml_arg_size; 1181 } 1182 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 1183 if (!skb) 1184 return -ENOMEM; 1185 1186 cmd = (struct wmi_vdev_start_request_cmd *)skb->data; 1187 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_START_REQUEST_CMD, 1188 sizeof(*cmd)); 1189 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 1190 cmd->beacon_interval = cpu_to_le32(arg->bcn_intval); 1191 cmd->bcn_tx_rate = cpu_to_le32(arg->bcn_tx_rate); 1192 cmd->dtim_period = cpu_to_le32(arg->dtim_period); 1193 cmd->num_noa_descriptors = cpu_to_le32(arg->num_noa_descriptors); 1194 cmd->preferred_rx_streams = cpu_to_le32(arg->pref_rx_streams); 1195 cmd->preferred_tx_streams = cpu_to_le32(arg->pref_tx_streams); 1196 cmd->cac_duration_ms = cpu_to_le32(arg->cac_duration_ms); 1197 cmd->regdomain = cpu_to_le32(arg->regdomain); 1198 cmd->he_ops = cpu_to_le32(arg->he_ops); 1199 cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap); 1200 cmd->mbssid_flags = cpu_to_le32(arg->mbssid_flags); 1201 cmd->mbssid_tx_vdev_id = cpu_to_le32(arg->mbssid_tx_vdev_id); 1202 1203 if (!restart) { 1204 if (arg->ssid) { 1205 cmd->ssid.ssid_len = cpu_to_le32(arg->ssid_len); 1206 memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len); 1207 } 1208 if (arg->hidden_ssid) 1209 cmd->flags |= cpu_to_le32(WMI_VDEV_START_HIDDEN_SSID); 1210 if (arg->pmf_enabled) 1211 cmd->flags |= cpu_to_le32(WMI_VDEV_START_PMF_ENABLED); 1212 } 1213 1214 cmd->flags |= cpu_to_le32(WMI_VDEV_START_LDPC_RX_ENABLED); 1215 1216 ptr = skb->data + sizeof(*cmd); 1217 chan = ptr; 1218 1219 ath12k_wmi_put_wmi_channel(chan, arg); 1220 1221 chan->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL, 1222 sizeof(*chan)); 1223 ptr += sizeof(*chan); 1224 1225 tlv = ptr; 1226 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0); 1227 1228 /* Note: This is a nested TLV containing: 1229 * [wmi_tlv][ath12k_wmi_p2p_noa_descriptor][wmi_tlv].. 1230 */ 1231 1232 ptr += sizeof(*tlv); 1233 1234 if (ml_arg_size) { 1235 tlv = ptr; 1236 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 1237 sizeof(*ml_params)); 1238 ptr += TLV_HDR_SIZE; 1239 1240 ml_params = ptr; 1241 1242 ml_params->tlv_header = 1243 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_START_PARAMS, 1244 sizeof(*ml_params)); 1245 1246 ml_params->flags = le32_encode_bits(arg->ml.enabled, 1247 ATH12K_WMI_FLAG_MLO_ENABLED) | 1248 le32_encode_bits(arg->ml.assoc_link, 1249 ATH12K_WMI_FLAG_MLO_ASSOC_LINK) | 1250 le32_encode_bits(arg->ml.mcast_link, 1251 ATH12K_WMI_FLAG_MLO_MCAST_VDEV) | 1252 le32_encode_bits(arg->ml.link_add, 1253 ATH12K_WMI_FLAG_MLO_LINK_ADD); 1254 1255 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %d start ml flags 0x%x\n", 1256 arg->vdev_id, ml_params->flags); 1257 1258 ptr += sizeof(*ml_params); 1259 1260 tlv = ptr; 1261 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 1262 arg->ml.num_partner_links * 1263 sizeof(*partner_info)); 1264 ptr += TLV_HDR_SIZE; 1265 1266 partner_info = ptr; 1267 1268 for (i = 0; i < arg->ml.num_partner_links; i++) { 1269 partner_info->tlv_header = 1270 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PARTNER_LINK_PARAMS, 1271 sizeof(*partner_info)); 1272 partner_info->vdev_id = 1273 cpu_to_le32(arg->ml.partner_info[i].vdev_id); 1274 partner_info->hw_link_id = 1275 cpu_to_le32(arg->ml.partner_info[i].hw_link_id); 1276 ether_addr_copy(partner_info->vdev_addr.addr, 1277 arg->ml.partner_info[i].addr); 1278 1279 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "partner vdev %d hw_link_id %d macaddr%pM\n", 1280 partner_info->vdev_id, partner_info->hw_link_id, 1281 partner_info->vdev_addr.addr); 1282 1283 partner_info++; 1284 } 1285 1286 ptr = partner_info; 1287 } 1288 1289 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n", 1290 restart ? "restart" : "start", arg->vdev_id, 1291 arg->freq, arg->mode); 1292 1293 if (restart) 1294 ret = ath12k_wmi_cmd_send(wmi, skb, 1295 WMI_VDEV_RESTART_REQUEST_CMDID); 1296 else 1297 ret = ath12k_wmi_cmd_send(wmi, skb, 1298 WMI_VDEV_START_REQUEST_CMDID); 1299 if (ret) { 1300 ath12k_warn(ar->ab, "failed to submit vdev_%s cmd\n", 1301 restart ? "restart" : "start"); 1302 dev_kfree_skb(skb); 1303 } 1304 1305 return ret; 1306 } 1307 1308 int ath12k_wmi_vdev_up(struct ath12k *ar, struct ath12k_wmi_vdev_up_params *params) 1309 { 1310 struct ath12k_wmi_pdev *wmi = ar->wmi; 1311 struct wmi_vdev_up_cmd *cmd; 1312 struct sk_buff *skb; 1313 int ret; 1314 1315 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1316 if (!skb) 1317 return -ENOMEM; 1318 1319 cmd = (struct wmi_vdev_up_cmd *)skb->data; 1320 1321 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_UP_CMD, 1322 sizeof(*cmd)); 1323 cmd->vdev_id = cpu_to_le32(params->vdev_id); 1324 cmd->vdev_assoc_id = cpu_to_le32(params->aid); 1325 1326 ether_addr_copy(cmd->vdev_bssid.addr, params->bssid); 1327 1328 if (params->tx_bssid) { 1329 ether_addr_copy(cmd->tx_vdev_bssid.addr, params->tx_bssid); 1330 cmd->nontx_profile_idx = cpu_to_le32(params->nontx_profile_idx); 1331 cmd->nontx_profile_cnt = cpu_to_le32(params->nontx_profile_cnt); 1332 } 1333 1334 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1335 "WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n", 1336 params->vdev_id, params->aid, params->bssid); 1337 1338 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID); 1339 if (ret) { 1340 ath12k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n"); 1341 dev_kfree_skb(skb); 1342 } 1343 1344 return ret; 1345 } 1346 1347 int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar, 1348 struct ath12k_wmi_peer_create_arg *arg) 1349 { 1350 struct ath12k_wmi_pdev *wmi = ar->wmi; 1351 struct wmi_peer_create_cmd *cmd; 1352 struct sk_buff *skb; 1353 int ret, len; 1354 struct wmi_peer_create_mlo_params *ml_param; 1355 void *ptr; 1356 struct wmi_tlv *tlv; 1357 1358 len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*ml_param); 1359 1360 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 1361 if (!skb) 1362 return -ENOMEM; 1363 1364 cmd = (struct wmi_peer_create_cmd *)skb->data; 1365 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_CREATE_CMD, 1366 sizeof(*cmd)); 1367 1368 ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_addr); 1369 cmd->peer_type = cpu_to_le32(arg->peer_type); 1370 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 1371 1372 ptr = skb->data + sizeof(*cmd); 1373 tlv = ptr; 1374 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 1375 sizeof(*ml_param)); 1376 ptr += TLV_HDR_SIZE; 1377 ml_param = ptr; 1378 ml_param->tlv_header = 1379 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_CREATE_PARAMS, 1380 sizeof(*ml_param)); 1381 if (arg->ml_enabled) 1382 ml_param->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED); 1383 1384 ptr += sizeof(*ml_param); 1385 1386 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1387 "WMI peer create vdev_id %d peer_addr %pM ml_flags 0x%x\n", 1388 arg->vdev_id, arg->peer_addr, ml_param->flags); 1389 1390 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID); 1391 if (ret) { 1392 ath12k_warn(ar->ab, "failed to submit WMI_PEER_CREATE cmd\n"); 1393 dev_kfree_skb(skb); 1394 } 1395 1396 return ret; 1397 } 1398 1399 int ath12k_wmi_send_peer_delete_cmd(struct ath12k *ar, 1400 const u8 *peer_addr, u8 vdev_id) 1401 { 1402 struct ath12k_wmi_pdev *wmi = ar->wmi; 1403 struct wmi_peer_delete_cmd *cmd; 1404 struct sk_buff *skb; 1405 int ret; 1406 1407 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1408 if (!skb) 1409 return -ENOMEM; 1410 1411 cmd = (struct wmi_peer_delete_cmd *)skb->data; 1412 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_DELETE_CMD, 1413 sizeof(*cmd)); 1414 1415 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 1416 cmd->vdev_id = cpu_to_le32(vdev_id); 1417 1418 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1419 "WMI peer delete vdev_id %d peer_addr %pM\n", 1420 vdev_id, peer_addr); 1421 1422 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID); 1423 if (ret) { 1424 ath12k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n"); 1425 dev_kfree_skb(skb); 1426 } 1427 1428 return ret; 1429 } 1430 1431 int ath12k_wmi_send_pdev_set_regdomain(struct ath12k *ar, 1432 struct ath12k_wmi_pdev_set_regdomain_arg *arg) 1433 { 1434 struct ath12k_wmi_pdev *wmi = ar->wmi; 1435 struct wmi_pdev_set_regdomain_cmd *cmd; 1436 struct sk_buff *skb; 1437 int ret; 1438 1439 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1440 if (!skb) 1441 return -ENOMEM; 1442 1443 cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data; 1444 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_REGDOMAIN_CMD, 1445 sizeof(*cmd)); 1446 1447 cmd->reg_domain = cpu_to_le32(arg->current_rd_in_use); 1448 cmd->reg_domain_2g = cpu_to_le32(arg->current_rd_2g); 1449 cmd->reg_domain_5g = cpu_to_le32(arg->current_rd_5g); 1450 cmd->conformance_test_limit_2g = cpu_to_le32(arg->ctl_2g); 1451 cmd->conformance_test_limit_5g = cpu_to_le32(arg->ctl_5g); 1452 cmd->dfs_domain = cpu_to_le32(arg->dfs_domain); 1453 cmd->pdev_id = cpu_to_le32(arg->pdev_id); 1454 1455 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1456 "WMI pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n", 1457 arg->current_rd_in_use, arg->current_rd_2g, 1458 arg->current_rd_5g, arg->dfs_domain, arg->pdev_id); 1459 1460 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_REGDOMAIN_CMDID); 1461 if (ret) { 1462 ath12k_warn(ar->ab, 1463 "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n"); 1464 dev_kfree_skb(skb); 1465 } 1466 1467 return ret; 1468 } 1469 1470 int ath12k_wmi_set_peer_param(struct ath12k *ar, const u8 *peer_addr, 1471 u32 vdev_id, u32 param_id, u32 param_val) 1472 { 1473 struct ath12k_wmi_pdev *wmi = ar->wmi; 1474 struct wmi_peer_set_param_cmd *cmd; 1475 struct sk_buff *skb; 1476 int ret; 1477 1478 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1479 if (!skb) 1480 return -ENOMEM; 1481 1482 cmd = (struct wmi_peer_set_param_cmd *)skb->data; 1483 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_SET_PARAM_CMD, 1484 sizeof(*cmd)); 1485 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 1486 cmd->vdev_id = cpu_to_le32(vdev_id); 1487 cmd->param_id = cpu_to_le32(param_id); 1488 cmd->param_value = cpu_to_le32(param_val); 1489 1490 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1491 "WMI vdev %d peer 0x%pM set param %d value %d\n", 1492 vdev_id, peer_addr, param_id, param_val); 1493 1494 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_SET_PARAM_CMDID); 1495 if (ret) { 1496 ath12k_warn(ar->ab, "failed to send WMI_PEER_SET_PARAM cmd\n"); 1497 dev_kfree_skb(skb); 1498 } 1499 1500 return ret; 1501 } 1502 1503 int ath12k_wmi_send_peer_flush_tids_cmd(struct ath12k *ar, 1504 u8 peer_addr[ETH_ALEN], 1505 u32 peer_tid_bitmap, 1506 u8 vdev_id) 1507 { 1508 struct ath12k_wmi_pdev *wmi = ar->wmi; 1509 struct wmi_peer_flush_tids_cmd *cmd; 1510 struct sk_buff *skb; 1511 int ret; 1512 1513 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1514 if (!skb) 1515 return -ENOMEM; 1516 1517 cmd = (struct wmi_peer_flush_tids_cmd *)skb->data; 1518 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_FLUSH_TIDS_CMD, 1519 sizeof(*cmd)); 1520 1521 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 1522 cmd->peer_tid_bitmap = cpu_to_le32(peer_tid_bitmap); 1523 cmd->vdev_id = cpu_to_le32(vdev_id); 1524 1525 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1526 "WMI peer flush vdev_id %d peer_addr %pM tids %08x\n", 1527 vdev_id, peer_addr, peer_tid_bitmap); 1528 1529 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_FLUSH_TIDS_CMDID); 1530 if (ret) { 1531 ath12k_warn(ar->ab, 1532 "failed to send WMI_PEER_FLUSH_TIDS cmd\n"); 1533 dev_kfree_skb(skb); 1534 } 1535 1536 return ret; 1537 } 1538 1539 int ath12k_wmi_peer_rx_reorder_queue_setup(struct ath12k *ar, 1540 int vdev_id, const u8 *addr, 1541 dma_addr_t paddr, u8 tid, 1542 u8 ba_window_size_valid, 1543 u32 ba_window_size) 1544 { 1545 struct wmi_peer_reorder_queue_setup_cmd *cmd; 1546 struct sk_buff *skb; 1547 int ret; 1548 1549 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); 1550 if (!skb) 1551 return -ENOMEM; 1552 1553 cmd = (struct wmi_peer_reorder_queue_setup_cmd *)skb->data; 1554 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_SETUP_CMD, 1555 sizeof(*cmd)); 1556 1557 ether_addr_copy(cmd->peer_macaddr.addr, addr); 1558 cmd->vdev_id = cpu_to_le32(vdev_id); 1559 cmd->tid = cpu_to_le32(tid); 1560 cmd->queue_ptr_lo = cpu_to_le32(lower_32_bits(paddr)); 1561 cmd->queue_ptr_hi = cpu_to_le32(upper_32_bits(paddr)); 1562 cmd->queue_no = cpu_to_le32(tid); 1563 cmd->ba_window_size_valid = cpu_to_le32(ba_window_size_valid); 1564 cmd->ba_window_size = cpu_to_le32(ba_window_size); 1565 1566 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1567 "wmi rx reorder queue setup addr %pM vdev_id %d tid %d\n", 1568 addr, vdev_id, tid); 1569 1570 ret = ath12k_wmi_cmd_send(ar->wmi, skb, 1571 WMI_PEER_REORDER_QUEUE_SETUP_CMDID); 1572 if (ret) { 1573 ath12k_warn(ar->ab, 1574 "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n"); 1575 dev_kfree_skb(skb); 1576 } 1577 1578 return ret; 1579 } 1580 1581 int 1582 ath12k_wmi_rx_reord_queue_remove(struct ath12k *ar, 1583 struct ath12k_wmi_rx_reorder_queue_remove_arg *arg) 1584 { 1585 struct ath12k_wmi_pdev *wmi = ar->wmi; 1586 struct wmi_peer_reorder_queue_remove_cmd *cmd; 1587 struct sk_buff *skb; 1588 int ret; 1589 1590 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1591 if (!skb) 1592 return -ENOMEM; 1593 1594 cmd = (struct wmi_peer_reorder_queue_remove_cmd *)skb->data; 1595 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_REMOVE_CMD, 1596 sizeof(*cmd)); 1597 1598 ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_macaddr); 1599 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 1600 cmd->tid_mask = cpu_to_le32(arg->peer_tid_bitmap); 1601 1602 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1603 "%s: peer_macaddr %pM vdev_id %d, tid_map %d", __func__, 1604 arg->peer_macaddr, arg->vdev_id, arg->peer_tid_bitmap); 1605 1606 ret = ath12k_wmi_cmd_send(wmi, skb, 1607 WMI_PEER_REORDER_QUEUE_REMOVE_CMDID); 1608 if (ret) { 1609 ath12k_warn(ar->ab, 1610 "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID"); 1611 dev_kfree_skb(skb); 1612 } 1613 1614 return ret; 1615 } 1616 1617 int ath12k_wmi_pdev_set_param(struct ath12k *ar, u32 param_id, 1618 u32 param_value, u8 pdev_id) 1619 { 1620 struct ath12k_wmi_pdev *wmi = ar->wmi; 1621 struct wmi_pdev_set_param_cmd *cmd; 1622 struct sk_buff *skb; 1623 int ret; 1624 1625 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1626 if (!skb) 1627 return -ENOMEM; 1628 1629 cmd = (struct wmi_pdev_set_param_cmd *)skb->data; 1630 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_PARAM_CMD, 1631 sizeof(*cmd)); 1632 cmd->pdev_id = cpu_to_le32(pdev_id); 1633 cmd->param_id = cpu_to_le32(param_id); 1634 cmd->param_value = cpu_to_le32(param_value); 1635 1636 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1637 "WMI pdev set param %d pdev id %d value %d\n", 1638 param_id, pdev_id, param_value); 1639 1640 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_PARAM_CMDID); 1641 if (ret) { 1642 ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n"); 1643 dev_kfree_skb(skb); 1644 } 1645 1646 return ret; 1647 } 1648 1649 int ath12k_wmi_pdev_set_ps_mode(struct ath12k *ar, int vdev_id, u32 enable) 1650 { 1651 struct ath12k_wmi_pdev *wmi = ar->wmi; 1652 struct wmi_pdev_set_ps_mode_cmd *cmd; 1653 struct sk_buff *skb; 1654 int ret; 1655 1656 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1657 if (!skb) 1658 return -ENOMEM; 1659 1660 cmd = (struct wmi_pdev_set_ps_mode_cmd *)skb->data; 1661 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_MODE_CMD, 1662 sizeof(*cmd)); 1663 cmd->vdev_id = cpu_to_le32(vdev_id); 1664 cmd->sta_ps_mode = cpu_to_le32(enable); 1665 1666 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1667 "WMI vdev set psmode %d vdev id %d\n", 1668 enable, vdev_id); 1669 1670 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID); 1671 if (ret) { 1672 ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n"); 1673 dev_kfree_skb(skb); 1674 } 1675 1676 return ret; 1677 } 1678 1679 int ath12k_wmi_pdev_suspend(struct ath12k *ar, u32 suspend_opt, 1680 u32 pdev_id) 1681 { 1682 struct ath12k_wmi_pdev *wmi = ar->wmi; 1683 struct wmi_pdev_suspend_cmd *cmd; 1684 struct sk_buff *skb; 1685 int ret; 1686 1687 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1688 if (!skb) 1689 return -ENOMEM; 1690 1691 cmd = (struct wmi_pdev_suspend_cmd *)skb->data; 1692 1693 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SUSPEND_CMD, 1694 sizeof(*cmd)); 1695 1696 cmd->suspend_opt = cpu_to_le32(suspend_opt); 1697 cmd->pdev_id = cpu_to_le32(pdev_id); 1698 1699 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1700 "WMI pdev suspend pdev_id %d\n", pdev_id); 1701 1702 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SUSPEND_CMDID); 1703 if (ret) { 1704 ath12k_warn(ar->ab, "failed to send WMI_PDEV_SUSPEND cmd\n"); 1705 dev_kfree_skb(skb); 1706 } 1707 1708 return ret; 1709 } 1710 1711 int ath12k_wmi_pdev_resume(struct ath12k *ar, u32 pdev_id) 1712 { 1713 struct ath12k_wmi_pdev *wmi = ar->wmi; 1714 struct wmi_pdev_resume_cmd *cmd; 1715 struct sk_buff *skb; 1716 int ret; 1717 1718 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1719 if (!skb) 1720 return -ENOMEM; 1721 1722 cmd = (struct wmi_pdev_resume_cmd *)skb->data; 1723 1724 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_RESUME_CMD, 1725 sizeof(*cmd)); 1726 cmd->pdev_id = cpu_to_le32(pdev_id); 1727 1728 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1729 "WMI pdev resume pdev id %d\n", pdev_id); 1730 1731 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_RESUME_CMDID); 1732 if (ret) { 1733 ath12k_warn(ar->ab, "failed to send WMI_PDEV_RESUME cmd\n"); 1734 dev_kfree_skb(skb); 1735 } 1736 1737 return ret; 1738 } 1739 1740 /* TODO FW Support for the cmd is not available yet. 1741 * Can be tested once the command and corresponding 1742 * event is implemented in FW 1743 */ 1744 int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar, 1745 enum wmi_bss_chan_info_req_type type) 1746 { 1747 struct ath12k_wmi_pdev *wmi = ar->wmi; 1748 struct wmi_pdev_bss_chan_info_req_cmd *cmd; 1749 struct sk_buff *skb; 1750 int ret; 1751 1752 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1753 if (!skb) 1754 return -ENOMEM; 1755 1756 cmd = (struct wmi_pdev_bss_chan_info_req_cmd *)skb->data; 1757 1758 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST, 1759 sizeof(*cmd)); 1760 cmd->req_type = cpu_to_le32(type); 1761 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); 1762 1763 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1764 "WMI bss chan info req type %d\n", type); 1765 1766 ret = ath12k_wmi_cmd_send(wmi, skb, 1767 WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID); 1768 if (ret) { 1769 ath12k_warn(ar->ab, 1770 "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n"); 1771 dev_kfree_skb(skb); 1772 } 1773 1774 return ret; 1775 } 1776 1777 int ath12k_wmi_send_set_ap_ps_param_cmd(struct ath12k *ar, u8 *peer_addr, 1778 struct ath12k_wmi_ap_ps_arg *arg) 1779 { 1780 struct ath12k_wmi_pdev *wmi = ar->wmi; 1781 struct wmi_ap_ps_peer_cmd *cmd; 1782 struct sk_buff *skb; 1783 int ret; 1784 1785 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1786 if (!skb) 1787 return -ENOMEM; 1788 1789 cmd = (struct wmi_ap_ps_peer_cmd *)skb->data; 1790 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_AP_PS_PEER_CMD, 1791 sizeof(*cmd)); 1792 1793 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 1794 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 1795 cmd->param = cpu_to_le32(arg->param); 1796 cmd->value = cpu_to_le32(arg->value); 1797 1798 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1799 "WMI set ap ps vdev id %d peer %pM param %d value %d\n", 1800 arg->vdev_id, peer_addr, arg->param, arg->value); 1801 1802 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_AP_PS_PEER_PARAM_CMDID); 1803 if (ret) { 1804 ath12k_warn(ar->ab, 1805 "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n"); 1806 dev_kfree_skb(skb); 1807 } 1808 1809 return ret; 1810 } 1811 1812 int ath12k_wmi_set_sta_ps_param(struct ath12k *ar, u32 vdev_id, 1813 u32 param, u32 param_value) 1814 { 1815 struct ath12k_wmi_pdev *wmi = ar->wmi; 1816 struct wmi_sta_powersave_param_cmd *cmd; 1817 struct sk_buff *skb; 1818 int ret; 1819 1820 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1821 if (!skb) 1822 return -ENOMEM; 1823 1824 cmd = (struct wmi_sta_powersave_param_cmd *)skb->data; 1825 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_PARAM_CMD, 1826 sizeof(*cmd)); 1827 1828 cmd->vdev_id = cpu_to_le32(vdev_id); 1829 cmd->param = cpu_to_le32(param); 1830 cmd->value = cpu_to_le32(param_value); 1831 1832 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1833 "WMI set sta ps vdev_id %d param %d value %d\n", 1834 vdev_id, param, param_value); 1835 1836 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_PARAM_CMDID); 1837 if (ret) { 1838 ath12k_warn(ar->ab, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID"); 1839 dev_kfree_skb(skb); 1840 } 1841 1842 return ret; 1843 } 1844 1845 int ath12k_wmi_force_fw_hang_cmd(struct ath12k *ar, u32 type, u32 delay_time_ms) 1846 { 1847 struct ath12k_wmi_pdev *wmi = ar->wmi; 1848 struct wmi_force_fw_hang_cmd *cmd; 1849 struct sk_buff *skb; 1850 int ret, len; 1851 1852 len = sizeof(*cmd); 1853 1854 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 1855 if (!skb) 1856 return -ENOMEM; 1857 1858 cmd = (struct wmi_force_fw_hang_cmd *)skb->data; 1859 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FORCE_FW_HANG_CMD, 1860 len); 1861 1862 cmd->type = cpu_to_le32(type); 1863 cmd->delay_time_ms = cpu_to_le32(delay_time_ms); 1864 1865 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_FORCE_FW_HANG_CMDID); 1866 1867 if (ret) { 1868 ath12k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID"); 1869 dev_kfree_skb(skb); 1870 } 1871 return ret; 1872 } 1873 1874 int ath12k_wmi_vdev_set_param_cmd(struct ath12k *ar, u32 vdev_id, 1875 u32 param_id, u32 param_value) 1876 { 1877 struct ath12k_wmi_pdev *wmi = ar->wmi; 1878 struct wmi_vdev_set_param_cmd *cmd; 1879 struct sk_buff *skb; 1880 int ret; 1881 1882 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1883 if (!skb) 1884 return -ENOMEM; 1885 1886 cmd = (struct wmi_vdev_set_param_cmd *)skb->data; 1887 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_PARAM_CMD, 1888 sizeof(*cmd)); 1889 1890 cmd->vdev_id = cpu_to_le32(vdev_id); 1891 cmd->param_id = cpu_to_le32(param_id); 1892 cmd->param_value = cpu_to_le32(param_value); 1893 1894 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1895 "WMI vdev id 0x%x set param %d value %d\n", 1896 vdev_id, param_id, param_value); 1897 1898 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_PARAM_CMDID); 1899 if (ret) { 1900 ath12k_warn(ar->ab, 1901 "failed to send WMI_VDEV_SET_PARAM_CMDID\n"); 1902 dev_kfree_skb(skb); 1903 } 1904 1905 return ret; 1906 } 1907 1908 int ath12k_wmi_send_pdev_temperature_cmd(struct ath12k *ar) 1909 { 1910 struct ath12k_wmi_pdev *wmi = ar->wmi; 1911 struct wmi_get_pdev_temperature_cmd *cmd; 1912 struct sk_buff *skb; 1913 int ret; 1914 1915 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1916 if (!skb) 1917 return -ENOMEM; 1918 1919 cmd = (struct wmi_get_pdev_temperature_cmd *)skb->data; 1920 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_GET_TEMPERATURE_CMD, 1921 sizeof(*cmd)); 1922 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); 1923 1924 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1925 "WMI pdev get temperature for pdev_id %d\n", ar->pdev->pdev_id); 1926 1927 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_GET_TEMPERATURE_CMDID); 1928 if (ret) { 1929 ath12k_warn(ar->ab, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n"); 1930 dev_kfree_skb(skb); 1931 } 1932 1933 return ret; 1934 } 1935 1936 int ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k *ar, 1937 u32 vdev_id, u32 bcn_ctrl_op) 1938 { 1939 struct ath12k_wmi_pdev *wmi = ar->wmi; 1940 struct wmi_bcn_offload_ctrl_cmd *cmd; 1941 struct sk_buff *skb; 1942 int ret; 1943 1944 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1945 if (!skb) 1946 return -ENOMEM; 1947 1948 cmd = (struct wmi_bcn_offload_ctrl_cmd *)skb->data; 1949 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_OFFLOAD_CTRL_CMD, 1950 sizeof(*cmd)); 1951 1952 cmd->vdev_id = cpu_to_le32(vdev_id); 1953 cmd->bcn_ctrl_op = cpu_to_le32(bcn_ctrl_op); 1954 1955 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1956 "WMI bcn ctrl offload vdev id %d ctrl_op %d\n", 1957 vdev_id, bcn_ctrl_op); 1958 1959 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_OFFLOAD_CTRL_CMDID); 1960 if (ret) { 1961 ath12k_warn(ar->ab, 1962 "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n"); 1963 dev_kfree_skb(skb); 1964 } 1965 1966 return ret; 1967 } 1968 1969 int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id, 1970 const u8 *p2p_ie) 1971 { 1972 struct ath12k_wmi_pdev *wmi = ar->wmi; 1973 struct wmi_p2p_go_set_beacon_ie_cmd *cmd; 1974 size_t p2p_ie_len, aligned_len; 1975 struct wmi_tlv *tlv; 1976 struct sk_buff *skb; 1977 void *ptr; 1978 int ret, len; 1979 1980 p2p_ie_len = p2p_ie[1] + 2; 1981 aligned_len = roundup(p2p_ie_len, sizeof(u32)); 1982 1983 len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len; 1984 1985 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 1986 if (!skb) 1987 return -ENOMEM; 1988 1989 ptr = skb->data; 1990 cmd = ptr; 1991 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_P2P_GO_SET_BEACON_IE, 1992 sizeof(*cmd)); 1993 cmd->vdev_id = cpu_to_le32(vdev_id); 1994 cmd->ie_buf_len = cpu_to_le32(p2p_ie_len); 1995 1996 ptr += sizeof(*cmd); 1997 tlv = ptr; 1998 tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE, 1999 aligned_len); 2000 memcpy(tlv->value, p2p_ie, p2p_ie_len); 2001 2002 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_P2P_GO_SET_BEACON_IE); 2003 if (ret) { 2004 ath12k_warn(ar->ab, "failed to send WMI_P2P_GO_SET_BEACON_IE\n"); 2005 dev_kfree_skb(skb); 2006 } 2007 2008 return ret; 2009 } 2010 2011 int ath12k_wmi_bcn_tmpl(struct ath12k_link_vif *arvif, 2012 struct ieee80211_mutable_offsets *offs, 2013 struct sk_buff *bcn, 2014 struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args) 2015 { 2016 struct ath12k *ar = arvif->ar; 2017 struct ath12k_wmi_pdev *wmi = ar->wmi; 2018 struct ath12k_base *ab = ar->ab; 2019 struct wmi_bcn_tmpl_cmd *cmd; 2020 struct ath12k_wmi_bcn_prb_info_params *bcn_prb_info; 2021 struct ath12k_vif *ahvif = arvif->ahvif; 2022 struct ieee80211_bss_conf *conf; 2023 u32 vdev_id = arvif->vdev_id; 2024 struct wmi_tlv *tlv; 2025 struct sk_buff *skb; 2026 u32 ema_params = 0; 2027 void *ptr; 2028 int ret, len; 2029 size_t aligned_len = roundup(bcn->len, 4); 2030 2031 conf = ath12k_mac_get_link_bss_conf(arvif); 2032 if (!conf) { 2033 ath12k_warn(ab, 2034 "unable to access bss link conf in beacon template command for vif %pM link %u\n", 2035 ahvif->vif->addr, arvif->link_id); 2036 return -EINVAL; 2037 } 2038 2039 len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len; 2040 2041 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 2042 if (!skb) 2043 return -ENOMEM; 2044 2045 cmd = (struct wmi_bcn_tmpl_cmd *)skb->data; 2046 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_TMPL_CMD, 2047 sizeof(*cmd)); 2048 cmd->vdev_id = cpu_to_le32(vdev_id); 2049 cmd->tim_ie_offset = cpu_to_le32(offs->tim_offset); 2050 2051 if (conf->csa_active) { 2052 cmd->csa_switch_count_offset = 2053 cpu_to_le32(offs->cntdwn_counter_offs[0]); 2054 cmd->ext_csa_switch_count_offset = 2055 cpu_to_le32(offs->cntdwn_counter_offs[1]); 2056 cmd->csa_event_bitmap = cpu_to_le32(0xFFFFFFFF); 2057 arvif->current_cntdown_counter = bcn->data[offs->cntdwn_counter_offs[0]]; 2058 } 2059 2060 cmd->buf_len = cpu_to_le32(bcn->len); 2061 cmd->mbssid_ie_offset = cpu_to_le32(offs->mbssid_off); 2062 if (ema_args) { 2063 u32p_replace_bits(&ema_params, ema_args->bcn_cnt, WMI_EMA_BEACON_CNT); 2064 u32p_replace_bits(&ema_params, ema_args->bcn_index, WMI_EMA_BEACON_IDX); 2065 if (ema_args->bcn_index == 0) 2066 u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_FIRST); 2067 if (ema_args->bcn_index + 1 == ema_args->bcn_cnt) 2068 u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_LAST); 2069 cmd->ema_params = cpu_to_le32(ema_params); 2070 } 2071 cmd->feature_enable_bitmap = 2072 cpu_to_le32(u32_encode_bits(arvif->beacon_prot, 2073 WMI_BEACON_PROTECTION_EN_BIT)); 2074 2075 ptr = skb->data + sizeof(*cmd); 2076 2077 bcn_prb_info = ptr; 2078 len = sizeof(*bcn_prb_info); 2079 bcn_prb_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO, 2080 len); 2081 bcn_prb_info->caps = 0; 2082 bcn_prb_info->erp = 0; 2083 2084 ptr += sizeof(*bcn_prb_info); 2085 2086 tlv = ptr; 2087 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len); 2088 memcpy(tlv->value, bcn->data, bcn->len); 2089 2090 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID); 2091 if (ret) { 2092 ath12k_warn(ab, "failed to send WMI_BCN_TMPL_CMDID\n"); 2093 dev_kfree_skb(skb); 2094 } 2095 2096 return ret; 2097 } 2098 2099 int ath12k_wmi_vdev_install_key(struct ath12k *ar, 2100 struct wmi_vdev_install_key_arg *arg) 2101 { 2102 struct ath12k_wmi_pdev *wmi = ar->wmi; 2103 struct wmi_vdev_install_key_cmd *cmd; 2104 struct wmi_tlv *tlv; 2105 struct sk_buff *skb; 2106 int ret, len, key_len_aligned; 2107 2108 /* WMI_TAG_ARRAY_BYTE needs to be aligned with 4, the actual key 2109 * length is specified in cmd->key_len. 2110 */ 2111 key_len_aligned = roundup(arg->key_len, 4); 2112 2113 len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned; 2114 2115 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 2116 if (!skb) 2117 return -ENOMEM; 2118 2119 cmd = (struct wmi_vdev_install_key_cmd *)skb->data; 2120 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_INSTALL_KEY_CMD, 2121 sizeof(*cmd)); 2122 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 2123 ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr); 2124 cmd->key_idx = cpu_to_le32(arg->key_idx); 2125 cmd->key_flags = cpu_to_le32(arg->key_flags); 2126 cmd->key_cipher = cpu_to_le32(arg->key_cipher); 2127 cmd->key_len = cpu_to_le32(arg->key_len); 2128 cmd->key_txmic_len = cpu_to_le32(arg->key_txmic_len); 2129 cmd->key_rxmic_len = cpu_to_le32(arg->key_rxmic_len); 2130 2131 if (arg->key_rsc_counter) 2132 cmd->key_rsc_counter = cpu_to_le64(arg->key_rsc_counter); 2133 2134 tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd)); 2135 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, key_len_aligned); 2136 memcpy(tlv->value, arg->key_data, arg->key_len); 2137 2138 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 2139 "WMI vdev install key idx %d cipher %d len %d\n", 2140 arg->key_idx, arg->key_cipher, arg->key_len); 2141 2142 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID); 2143 if (ret) { 2144 ath12k_warn(ar->ab, 2145 "failed to send WMI_VDEV_INSTALL_KEY cmd\n"); 2146 dev_kfree_skb(skb); 2147 } 2148 2149 return ret; 2150 } 2151 2152 static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd, 2153 struct ath12k_wmi_peer_assoc_arg *arg, 2154 bool hw_crypto_disabled) 2155 { 2156 cmd->peer_flags = 0; 2157 cmd->peer_flags_ext = 0; 2158 2159 if (arg->is_wme_set) { 2160 if (arg->qos_flag) 2161 cmd->peer_flags |= cpu_to_le32(WMI_PEER_QOS); 2162 if (arg->apsd_flag) 2163 cmd->peer_flags |= cpu_to_le32(WMI_PEER_APSD); 2164 if (arg->ht_flag) 2165 cmd->peer_flags |= cpu_to_le32(WMI_PEER_HT); 2166 if (arg->bw_40) 2167 cmd->peer_flags |= cpu_to_le32(WMI_PEER_40MHZ); 2168 if (arg->bw_80) 2169 cmd->peer_flags |= cpu_to_le32(WMI_PEER_80MHZ); 2170 if (arg->bw_160) 2171 cmd->peer_flags |= cpu_to_le32(WMI_PEER_160MHZ); 2172 if (arg->bw_320) 2173 cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_320MHZ); 2174 2175 /* Typically if STBC is enabled for VHT it should be enabled 2176 * for HT as well 2177 **/ 2178 if (arg->stbc_flag) 2179 cmd->peer_flags |= cpu_to_le32(WMI_PEER_STBC); 2180 2181 /* Typically if LDPC is enabled for VHT it should be enabled 2182 * for HT as well 2183 **/ 2184 if (arg->ldpc_flag) 2185 cmd->peer_flags |= cpu_to_le32(WMI_PEER_LDPC); 2186 2187 if (arg->static_mimops_flag) 2188 cmd->peer_flags |= cpu_to_le32(WMI_PEER_STATIC_MIMOPS); 2189 if (arg->dynamic_mimops_flag) 2190 cmd->peer_flags |= cpu_to_le32(WMI_PEER_DYN_MIMOPS); 2191 if (arg->spatial_mux_flag) 2192 cmd->peer_flags |= cpu_to_le32(WMI_PEER_SPATIAL_MUX); 2193 if (arg->vht_flag) 2194 cmd->peer_flags |= cpu_to_le32(WMI_PEER_VHT); 2195 if (arg->he_flag) 2196 cmd->peer_flags |= cpu_to_le32(WMI_PEER_HE); 2197 if (arg->twt_requester) 2198 cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_REQ); 2199 if (arg->twt_responder) 2200 cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_RESP); 2201 if (arg->eht_flag) 2202 cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_EHT); 2203 } 2204 2205 /* Suppress authorization for all AUTH modes that need 4-way handshake 2206 * (during re-association). 2207 * Authorization will be done for these modes on key installation. 2208 */ 2209 if (arg->auth_flag) 2210 cmd->peer_flags |= cpu_to_le32(WMI_PEER_AUTH); 2211 if (arg->need_ptk_4_way) { 2212 cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_PTK_4_WAY); 2213 if (!hw_crypto_disabled && arg->is_assoc) 2214 cmd->peer_flags &= cpu_to_le32(~WMI_PEER_AUTH); 2215 } 2216 if (arg->need_gtk_2_way) 2217 cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_GTK_2_WAY); 2218 /* safe mode bypass the 4-way handshake */ 2219 if (arg->safe_mode_enabled) 2220 cmd->peer_flags &= cpu_to_le32(~(WMI_PEER_NEED_PTK_4_WAY | 2221 WMI_PEER_NEED_GTK_2_WAY)); 2222 2223 if (arg->is_pmf_enabled) 2224 cmd->peer_flags |= cpu_to_le32(WMI_PEER_PMF); 2225 2226 /* Disable AMSDU for station transmit, if user configures it */ 2227 /* Disable AMSDU for AP transmit to 11n Stations, if user configures 2228 * it 2229 * if (arg->amsdu_disable) Add after FW support 2230 **/ 2231 2232 /* Target asserts if node is marked HT and all MCS is set to 0. 2233 * Mark the node as non-HT if all the mcs rates are disabled through 2234 * iwpriv 2235 **/ 2236 if (arg->peer_ht_rates.num_rates == 0) 2237 cmd->peer_flags &= cpu_to_le32(~WMI_PEER_HT); 2238 } 2239 2240 int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar, 2241 struct ath12k_wmi_peer_assoc_arg *arg) 2242 { 2243 struct ath12k_wmi_pdev *wmi = ar->wmi; 2244 struct wmi_peer_assoc_complete_cmd *cmd; 2245 struct ath12k_wmi_vht_rate_set_params *mcs; 2246 struct ath12k_wmi_he_rate_set_params *he_mcs; 2247 struct ath12k_wmi_eht_rate_set_params *eht_mcs; 2248 struct wmi_peer_assoc_mlo_params *ml_params; 2249 struct wmi_peer_assoc_mlo_partner_info_params *partner_info; 2250 struct sk_buff *skb; 2251 struct wmi_tlv *tlv; 2252 void *ptr; 2253 u32 peer_legacy_rates_align, eml_pad_delay, eml_trans_delay; 2254 u32 peer_ht_rates_align, eml_trans_timeout; 2255 int i, ret, len; 2256 u16 eml_cap; 2257 __le32 v; 2258 2259 peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates, 2260 sizeof(u32)); 2261 peer_ht_rates_align = roundup(arg->peer_ht_rates.num_rates, 2262 sizeof(u32)); 2263 2264 len = sizeof(*cmd) + 2265 TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(u8)) + 2266 TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) + 2267 sizeof(*mcs) + TLV_HDR_SIZE + 2268 (sizeof(*he_mcs) * arg->peer_he_mcs_count) + 2269 TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count); 2270 2271 if (arg->ml.enabled) 2272 len += TLV_HDR_SIZE + sizeof(*ml_params) + 2273 TLV_HDR_SIZE + (arg->ml.num_partner_links * sizeof(*partner_info)); 2274 else 2275 len += (2 * TLV_HDR_SIZE); 2276 2277 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 2278 if (!skb) 2279 return -ENOMEM; 2280 2281 ptr = skb->data; 2282 2283 cmd = ptr; 2284 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_ASSOC_COMPLETE_CMD, 2285 sizeof(*cmd)); 2286 2287 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 2288 2289 cmd->peer_new_assoc = cpu_to_le32(arg->peer_new_assoc); 2290 cmd->peer_associd = cpu_to_le32(arg->peer_associd); 2291 cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap); 2292 2293 ath12k_wmi_copy_peer_flags(cmd, arg, 2294 test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, 2295 &ar->ab->dev_flags)); 2296 2297 ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_mac); 2298 2299 cmd->peer_rate_caps = cpu_to_le32(arg->peer_rate_caps); 2300 cmd->peer_caps = cpu_to_le32(arg->peer_caps); 2301 cmd->peer_listen_intval = cpu_to_le32(arg->peer_listen_intval); 2302 cmd->peer_ht_caps = cpu_to_le32(arg->peer_ht_caps); 2303 cmd->peer_max_mpdu = cpu_to_le32(arg->peer_max_mpdu); 2304 cmd->peer_mpdu_density = cpu_to_le32(arg->peer_mpdu_density); 2305 cmd->peer_vht_caps = cpu_to_le32(arg->peer_vht_caps); 2306 cmd->peer_phymode = cpu_to_le32(arg->peer_phymode); 2307 2308 /* Update 11ax capabilities */ 2309 cmd->peer_he_cap_info = cpu_to_le32(arg->peer_he_cap_macinfo[0]); 2310 cmd->peer_he_cap_info_ext = cpu_to_le32(arg->peer_he_cap_macinfo[1]); 2311 cmd->peer_he_cap_info_internal = cpu_to_le32(arg->peer_he_cap_macinfo_internal); 2312 cmd->peer_he_caps_6ghz = cpu_to_le32(arg->peer_he_caps_6ghz); 2313 cmd->peer_he_ops = cpu_to_le32(arg->peer_he_ops); 2314 for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++) 2315 cmd->peer_he_cap_phy[i] = 2316 cpu_to_le32(arg->peer_he_cap_phyinfo[i]); 2317 cmd->peer_ppet.numss_m1 = cpu_to_le32(arg->peer_ppet.numss_m1); 2318 cmd->peer_ppet.ru_info = cpu_to_le32(arg->peer_ppet.ru_bit_mask); 2319 for (i = 0; i < WMI_MAX_NUM_SS; i++) 2320 cmd->peer_ppet.ppet16_ppet8_ru3_ru0[i] = 2321 cpu_to_le32(arg->peer_ppet.ppet16_ppet8_ru3_ru0[i]); 2322 2323 /* Update 11be capabilities */ 2324 memcpy_and_pad(cmd->peer_eht_cap_mac, sizeof(cmd->peer_eht_cap_mac), 2325 arg->peer_eht_cap_mac, sizeof(arg->peer_eht_cap_mac), 2326 0); 2327 memcpy_and_pad(cmd->peer_eht_cap_phy, sizeof(cmd->peer_eht_cap_phy), 2328 arg->peer_eht_cap_phy, sizeof(arg->peer_eht_cap_phy), 2329 0); 2330 memcpy_and_pad(&cmd->peer_eht_ppet, sizeof(cmd->peer_eht_ppet), 2331 &arg->peer_eht_ppet, sizeof(arg->peer_eht_ppet), 0); 2332 2333 /* Update peer legacy rate information */ 2334 ptr += sizeof(*cmd); 2335 2336 tlv = ptr; 2337 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_legacy_rates_align); 2338 2339 ptr += TLV_HDR_SIZE; 2340 2341 cmd->num_peer_legacy_rates = cpu_to_le32(arg->peer_legacy_rates.num_rates); 2342 memcpy(ptr, arg->peer_legacy_rates.rates, 2343 arg->peer_legacy_rates.num_rates); 2344 2345 /* Update peer HT rate information */ 2346 ptr += peer_legacy_rates_align; 2347 2348 tlv = ptr; 2349 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_ht_rates_align); 2350 ptr += TLV_HDR_SIZE; 2351 cmd->num_peer_ht_rates = cpu_to_le32(arg->peer_ht_rates.num_rates); 2352 memcpy(ptr, arg->peer_ht_rates.rates, 2353 arg->peer_ht_rates.num_rates); 2354 2355 /* VHT Rates */ 2356 ptr += peer_ht_rates_align; 2357 2358 mcs = ptr; 2359 2360 mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VHT_RATE_SET, 2361 sizeof(*mcs)); 2362 2363 cmd->peer_nss = cpu_to_le32(arg->peer_nss); 2364 2365 /* Update bandwidth-NSS mapping */ 2366 cmd->peer_bw_rxnss_override = 0; 2367 cmd->peer_bw_rxnss_override |= cpu_to_le32(arg->peer_bw_rxnss_override); 2368 2369 if (arg->vht_capable) { 2370 mcs->rx_max_rate = cpu_to_le32(arg->rx_max_rate); 2371 mcs->rx_mcs_set = cpu_to_le32(arg->rx_mcs_set); 2372 mcs->tx_max_rate = cpu_to_le32(arg->tx_max_rate); 2373 mcs->tx_mcs_set = cpu_to_le32(arg->tx_mcs_set); 2374 } 2375 2376 /* HE Rates */ 2377 cmd->peer_he_mcs = cpu_to_le32(arg->peer_he_mcs_count); 2378 cmd->min_data_rate = cpu_to_le32(arg->min_data_rate); 2379 2380 ptr += sizeof(*mcs); 2381 2382 len = arg->peer_he_mcs_count * sizeof(*he_mcs); 2383 2384 tlv = ptr; 2385 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); 2386 ptr += TLV_HDR_SIZE; 2387 2388 /* Loop through the HE rate set */ 2389 for (i = 0; i < arg->peer_he_mcs_count; i++) { 2390 he_mcs = ptr; 2391 he_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET, 2392 sizeof(*he_mcs)); 2393 2394 he_mcs->rx_mcs_set = cpu_to_le32(arg->peer_he_rx_mcs_set[i]); 2395 he_mcs->tx_mcs_set = cpu_to_le32(arg->peer_he_tx_mcs_set[i]); 2396 ptr += sizeof(*he_mcs); 2397 } 2398 2399 tlv = ptr; 2400 len = arg->ml.enabled ? sizeof(*ml_params) : 0; 2401 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); 2402 ptr += TLV_HDR_SIZE; 2403 if (!len) 2404 goto skip_ml_params; 2405 2406 ml_params = ptr; 2407 ml_params->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_ASSOC_PARAMS, 2408 len); 2409 ml_params->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED); 2410 2411 if (arg->ml.assoc_link) 2412 ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK); 2413 2414 if (arg->ml.primary_umac) 2415 ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC); 2416 2417 if (arg->ml.logical_link_idx_valid) 2418 ml_params->flags |= 2419 cpu_to_le32(ATH12K_WMI_FLAG_MLO_LOGICAL_LINK_IDX_VALID); 2420 2421 if (arg->ml.peer_id_valid) 2422 ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PEER_ID_VALID); 2423 2424 ether_addr_copy(ml_params->mld_addr.addr, arg->ml.mld_addr); 2425 ml_params->logical_link_idx = cpu_to_le32(arg->ml.logical_link_idx); 2426 ml_params->ml_peer_id = cpu_to_le32(arg->ml.ml_peer_id); 2427 ml_params->ieee_link_id = cpu_to_le32(arg->ml.ieee_link_id); 2428 2429 eml_cap = arg->ml.eml_cap; 2430 if (u16_get_bits(eml_cap, IEEE80211_EML_CAP_EMLSR_SUPP)) { 2431 ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_EMLSR_SUPPORT); 2432 /* Padding delay */ 2433 eml_pad_delay = ieee80211_emlsr_pad_delay_in_us(eml_cap); 2434 ml_params->emlsr_padding_delay_us = cpu_to_le32(eml_pad_delay); 2435 /* Transition delay */ 2436 eml_trans_delay = ieee80211_emlsr_trans_delay_in_us(eml_cap); 2437 ml_params->emlsr_trans_delay_us = cpu_to_le32(eml_trans_delay); 2438 /* Transition timeout */ 2439 eml_trans_timeout = ieee80211_eml_trans_timeout_in_us(eml_cap); 2440 ml_params->emlsr_trans_timeout_us = 2441 cpu_to_le32(eml_trans_timeout); 2442 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi peer %pM emlsr padding delay %u, trans delay %u trans timeout %u", 2443 arg->peer_mac, eml_pad_delay, eml_trans_delay, 2444 eml_trans_timeout); 2445 } 2446 2447 ptr += sizeof(*ml_params); 2448 2449 skip_ml_params: 2450 /* Loop through the EHT rate set */ 2451 len = arg->peer_eht_mcs_count * sizeof(*eht_mcs); 2452 tlv = ptr; 2453 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); 2454 ptr += TLV_HDR_SIZE; 2455 2456 for (i = 0; i < arg->peer_eht_mcs_count; i++) { 2457 eht_mcs = ptr; 2458 eht_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_EHT_RATE_SET, 2459 sizeof(*eht_mcs)); 2460 2461 eht_mcs->rx_mcs_set = cpu_to_le32(arg->peer_eht_rx_mcs_set[i]); 2462 eht_mcs->tx_mcs_set = cpu_to_le32(arg->peer_eht_tx_mcs_set[i]); 2463 ptr += sizeof(*eht_mcs); 2464 } 2465 2466 /* Update MCS15 capability */ 2467 if (arg->eht_disable_mcs15) 2468 cmd->peer_eht_ops = cpu_to_le32(IEEE80211_EHT_OPER_MCS15_DISABLE); 2469 2470 tlv = ptr; 2471 len = arg->ml.enabled ? arg->ml.num_partner_links * sizeof(*partner_info) : 0; 2472 /* fill ML Partner links */ 2473 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); 2474 ptr += TLV_HDR_SIZE; 2475 2476 if (len == 0) 2477 goto send; 2478 2479 for (i = 0; i < arg->ml.num_partner_links; i++) { 2480 u32 cmd = WMI_TAG_MLO_PARTNER_LINK_PARAMS_PEER_ASSOC; 2481 2482 partner_info = ptr; 2483 partner_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(cmd, 2484 sizeof(*partner_info)); 2485 partner_info->vdev_id = cpu_to_le32(arg->ml.partner_info[i].vdev_id); 2486 partner_info->hw_link_id = 2487 cpu_to_le32(arg->ml.partner_info[i].hw_link_id); 2488 partner_info->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED); 2489 2490 if (arg->ml.partner_info[i].assoc_link) 2491 partner_info->flags |= 2492 cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK); 2493 2494 if (arg->ml.partner_info[i].primary_umac) 2495 partner_info->flags |= 2496 cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC); 2497 2498 if (arg->ml.partner_info[i].logical_link_idx_valid) { 2499 v = cpu_to_le32(ATH12K_WMI_FLAG_MLO_LINK_ID_VALID); 2500 partner_info->flags |= v; 2501 } 2502 2503 partner_info->logical_link_idx = 2504 cpu_to_le32(arg->ml.partner_info[i].logical_link_idx); 2505 ptr += sizeof(*partner_info); 2506 } 2507 2508 send: 2509 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 2510 "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x peer_eht_ops %x\n", 2511 cmd->vdev_id, cmd->peer_associd, arg->peer_mac, 2512 cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps, 2513 cmd->peer_listen_intval, cmd->peer_ht_caps, 2514 cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode, 2515 cmd->peer_mpdu_density, 2516 cmd->peer_vht_caps, cmd->peer_he_cap_info, 2517 cmd->peer_he_ops, cmd->peer_he_cap_info_ext, 2518 cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1], 2519 cmd->peer_he_cap_phy[2], 2520 cmd->peer_bw_rxnss_override, cmd->peer_flags_ext, 2521 cmd->peer_eht_cap_mac[0], cmd->peer_eht_cap_mac[1], 2522 cmd->peer_eht_cap_phy[0], cmd->peer_eht_cap_phy[1], 2523 cmd->peer_eht_cap_phy[2], cmd->peer_eht_ops); 2524 2525 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID); 2526 if (ret) { 2527 ath12k_warn(ar->ab, 2528 "failed to send WMI_PEER_ASSOC_CMDID\n"); 2529 dev_kfree_skb(skb); 2530 } 2531 2532 return ret; 2533 } 2534 2535 void ath12k_wmi_start_scan_init(struct ath12k *ar, 2536 struct ath12k_wmi_scan_req_arg *arg) 2537 { 2538 /* setup commonly used values */ 2539 arg->scan_req_id = 1; 2540 arg->scan_priority = WMI_SCAN_PRIORITY_LOW; 2541 arg->dwell_time_active = 50; 2542 arg->dwell_time_active_2g = 0; 2543 arg->dwell_time_passive = 150; 2544 arg->dwell_time_active_6g = 70; 2545 arg->dwell_time_passive_6g = 70; 2546 arg->min_rest_time = 50; 2547 arg->max_rest_time = 500; 2548 arg->repeat_probe_time = 0; 2549 arg->probe_spacing_time = 0; 2550 arg->idle_time = 0; 2551 arg->max_scan_time = 20000; 2552 arg->probe_delay = 5; 2553 arg->notify_scan_events = WMI_SCAN_EVENT_STARTED | 2554 WMI_SCAN_EVENT_COMPLETED | 2555 WMI_SCAN_EVENT_BSS_CHANNEL | 2556 WMI_SCAN_EVENT_FOREIGN_CHAN | 2557 WMI_SCAN_EVENT_DEQUEUED; 2558 arg->scan_f_chan_stat_evnt = 1; 2559 arg->num_bssid = 1; 2560 2561 /* fill bssid_list[0] with 0xff, otherwise bssid and RA will be 2562 * ZEROs in probe request 2563 */ 2564 eth_broadcast_addr(arg->bssid_list[0].addr); 2565 } 2566 2567 static void ath12k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd, 2568 struct ath12k_wmi_scan_req_arg *arg) 2569 { 2570 /* Scan events subscription */ 2571 if (arg->scan_ev_started) 2572 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_STARTED); 2573 if (arg->scan_ev_completed) 2574 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_COMPLETED); 2575 if (arg->scan_ev_bss_chan) 2576 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_BSS_CHANNEL); 2577 if (arg->scan_ev_foreign_chan) 2578 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN); 2579 if (arg->scan_ev_dequeued) 2580 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_DEQUEUED); 2581 if (arg->scan_ev_preempted) 2582 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_PREEMPTED); 2583 if (arg->scan_ev_start_failed) 2584 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_START_FAILED); 2585 if (arg->scan_ev_restarted) 2586 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESTARTED); 2587 if (arg->scan_ev_foreign_chn_exit) 2588 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT); 2589 if (arg->scan_ev_suspended) 2590 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_SUSPENDED); 2591 if (arg->scan_ev_resumed) 2592 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESUMED); 2593 2594 /** Set scan control flags */ 2595 cmd->scan_ctrl_flags = 0; 2596 if (arg->scan_f_passive) 2597 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_PASSIVE); 2598 if (arg->scan_f_strict_passive_pch) 2599 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN); 2600 if (arg->scan_f_promisc_mode) 2601 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROMISCUOS); 2602 if (arg->scan_f_capture_phy_err) 2603 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CAPTURE_PHY_ERROR); 2604 if (arg->scan_f_half_rate) 2605 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_HALF_RATE_SUPPORT); 2606 if (arg->scan_f_quarter_rate) 2607 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT); 2608 if (arg->scan_f_cck_rates) 2609 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_CCK_RATES); 2610 if (arg->scan_f_ofdm_rates) 2611 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_OFDM_RATES); 2612 if (arg->scan_f_chan_stat_evnt) 2613 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CHAN_STAT_EVENT); 2614 if (arg->scan_f_filter_prb_req) 2615 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ); 2616 if (arg->scan_f_bcast_probe) 2617 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_BCAST_PROBE_REQ); 2618 if (arg->scan_f_offchan_mgmt_tx) 2619 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_MGMT_TX); 2620 if (arg->scan_f_offchan_data_tx) 2621 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_DATA_TX); 2622 if (arg->scan_f_force_active_dfs_chn) 2623 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS); 2624 if (arg->scan_f_add_tpc_ie_in_probe) 2625 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ); 2626 if (arg->scan_f_add_ds_ie_in_probe) 2627 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ); 2628 if (arg->scan_f_add_spoofed_mac_in_probe) 2629 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ); 2630 if (arg->scan_f_add_rand_seq_in_probe) 2631 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ); 2632 if (arg->scan_f_en_ie_whitelist_in_probe) 2633 cmd->scan_ctrl_flags |= 2634 cpu_to_le32(WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ); 2635 2636 cmd->scan_ctrl_flags |= le32_encode_bits(arg->adaptive_dwell_time_mode, 2637 WMI_SCAN_DWELL_MODE_MASK); 2638 } 2639 2640 int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar, 2641 struct ath12k_wmi_scan_req_arg *arg) 2642 { 2643 struct ath12k_wmi_pdev *wmi = ar->wmi; 2644 struct wmi_start_scan_cmd *cmd; 2645 struct ath12k_wmi_ssid_params *ssid = NULL; 2646 struct ath12k_wmi_mac_addr_params *bssid; 2647 struct sk_buff *skb; 2648 struct wmi_tlv *tlv; 2649 void *ptr; 2650 int i, ret, len; 2651 u32 *tmp_ptr, extraie_len_with_pad = 0; 2652 struct ath12k_wmi_hint_short_ssid_arg *s_ssid = NULL; 2653 struct ath12k_wmi_hint_bssid_arg *hint_bssid = NULL; 2654 2655 len = sizeof(*cmd); 2656 2657 len += TLV_HDR_SIZE; 2658 if (arg->num_chan) 2659 len += arg->num_chan * sizeof(u32); 2660 2661 len += TLV_HDR_SIZE; 2662 if (arg->num_ssids) 2663 len += arg->num_ssids * sizeof(*ssid); 2664 2665 len += TLV_HDR_SIZE; 2666 if (arg->num_bssid) 2667 len += sizeof(*bssid) * arg->num_bssid; 2668 2669 if (arg->num_hint_bssid) 2670 len += TLV_HDR_SIZE + 2671 arg->num_hint_bssid * sizeof(*hint_bssid); 2672 2673 if (arg->num_hint_s_ssid) 2674 len += TLV_HDR_SIZE + 2675 arg->num_hint_s_ssid * sizeof(*s_ssid); 2676 2677 len += TLV_HDR_SIZE; 2678 if (arg->extraie.len) 2679 extraie_len_with_pad = 2680 roundup(arg->extraie.len, sizeof(u32)); 2681 if (extraie_len_with_pad <= (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len)) { 2682 len += extraie_len_with_pad; 2683 } else { 2684 ath12k_warn(ar->ab, "discard large size %d bytes extraie for scan start\n", 2685 arg->extraie.len); 2686 extraie_len_with_pad = 0; 2687 } 2688 2689 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 2690 if (!skb) 2691 return -ENOMEM; 2692 2693 ptr = skb->data; 2694 2695 cmd = ptr; 2696 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_START_SCAN_CMD, 2697 sizeof(*cmd)); 2698 2699 cmd->scan_id = cpu_to_le32(arg->scan_id); 2700 cmd->scan_req_id = cpu_to_le32(arg->scan_req_id); 2701 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 2702 if (ar->state_11d == ATH12K_11D_PREPARING) 2703 arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM; 2704 else 2705 arg->scan_priority = WMI_SCAN_PRIORITY_LOW; 2706 cmd->notify_scan_events = cpu_to_le32(arg->notify_scan_events); 2707 2708 ath12k_wmi_copy_scan_event_cntrl_flags(cmd, arg); 2709 2710 cmd->dwell_time_active = cpu_to_le32(arg->dwell_time_active); 2711 cmd->dwell_time_active_2g = cpu_to_le32(arg->dwell_time_active_2g); 2712 cmd->dwell_time_passive = cpu_to_le32(arg->dwell_time_passive); 2713 cmd->dwell_time_active_6g = cpu_to_le32(arg->dwell_time_active_6g); 2714 cmd->dwell_time_passive_6g = cpu_to_le32(arg->dwell_time_passive_6g); 2715 cmd->min_rest_time = cpu_to_le32(arg->min_rest_time); 2716 cmd->max_rest_time = cpu_to_le32(arg->max_rest_time); 2717 cmd->repeat_probe_time = cpu_to_le32(arg->repeat_probe_time); 2718 cmd->probe_spacing_time = cpu_to_le32(arg->probe_spacing_time); 2719 cmd->idle_time = cpu_to_le32(arg->idle_time); 2720 cmd->max_scan_time = cpu_to_le32(arg->max_scan_time); 2721 cmd->probe_delay = cpu_to_le32(arg->probe_delay); 2722 cmd->burst_duration = cpu_to_le32(arg->burst_duration); 2723 cmd->num_chan = cpu_to_le32(arg->num_chan); 2724 cmd->num_bssid = cpu_to_le32(arg->num_bssid); 2725 cmd->num_ssids = cpu_to_le32(arg->num_ssids); 2726 cmd->ie_len = cpu_to_le32(arg->extraie.len); 2727 cmd->n_probes = cpu_to_le32(arg->n_probes); 2728 2729 ptr += sizeof(*cmd); 2730 2731 len = arg->num_chan * sizeof(u32); 2732 2733 tlv = ptr; 2734 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, len); 2735 ptr += TLV_HDR_SIZE; 2736 tmp_ptr = (u32 *)ptr; 2737 2738 memcpy(tmp_ptr, arg->chan_list, arg->num_chan * 4); 2739 2740 ptr += len; 2741 2742 len = arg->num_ssids * sizeof(*ssid); 2743 tlv = ptr; 2744 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len); 2745 2746 ptr += TLV_HDR_SIZE; 2747 2748 if (arg->num_ssids) { 2749 ssid = ptr; 2750 for (i = 0; i < arg->num_ssids; ++i) { 2751 ssid->ssid_len = cpu_to_le32(arg->ssid[i].ssid_len); 2752 memcpy(ssid->ssid, arg->ssid[i].ssid, 2753 arg->ssid[i].ssid_len); 2754 ssid++; 2755 } 2756 } 2757 2758 ptr += (arg->num_ssids * sizeof(*ssid)); 2759 len = arg->num_bssid * sizeof(*bssid); 2760 tlv = ptr; 2761 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len); 2762 2763 ptr += TLV_HDR_SIZE; 2764 bssid = ptr; 2765 2766 if (arg->num_bssid) { 2767 for (i = 0; i < arg->num_bssid; ++i) { 2768 ether_addr_copy(bssid->addr, 2769 arg->bssid_list[i].addr); 2770 bssid++; 2771 } 2772 } 2773 2774 ptr += arg->num_bssid * sizeof(*bssid); 2775 2776 len = extraie_len_with_pad; 2777 tlv = ptr; 2778 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len); 2779 ptr += TLV_HDR_SIZE; 2780 2781 if (extraie_len_with_pad) 2782 memcpy(ptr, arg->extraie.ptr, 2783 arg->extraie.len); 2784 2785 ptr += extraie_len_with_pad; 2786 2787 if (arg->num_hint_s_ssid) { 2788 len = arg->num_hint_s_ssid * sizeof(*s_ssid); 2789 tlv = ptr; 2790 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len); 2791 ptr += TLV_HDR_SIZE; 2792 s_ssid = ptr; 2793 for (i = 0; i < arg->num_hint_s_ssid; ++i) { 2794 s_ssid->freq_flags = arg->hint_s_ssid[i].freq_flags; 2795 s_ssid->short_ssid = arg->hint_s_ssid[i].short_ssid; 2796 s_ssid++; 2797 } 2798 ptr += len; 2799 } 2800 2801 if (arg->num_hint_bssid) { 2802 len = arg->num_hint_bssid * sizeof(struct ath12k_wmi_hint_bssid_arg); 2803 tlv = ptr; 2804 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len); 2805 ptr += TLV_HDR_SIZE; 2806 hint_bssid = ptr; 2807 for (i = 0; i < arg->num_hint_bssid; ++i) { 2808 hint_bssid->freq_flags = 2809 arg->hint_bssid[i].freq_flags; 2810 ether_addr_copy(&arg->hint_bssid[i].bssid.addr[0], 2811 &hint_bssid->bssid.addr[0]); 2812 hint_bssid++; 2813 } 2814 } 2815 2816 ret = ath12k_wmi_cmd_send(wmi, skb, 2817 WMI_START_SCAN_CMDID); 2818 if (ret) { 2819 ath12k_warn(ar->ab, "failed to send WMI_START_SCAN_CMDID\n"); 2820 dev_kfree_skb(skb); 2821 } 2822 2823 return ret; 2824 } 2825 2826 int ath12k_wmi_send_scan_stop_cmd(struct ath12k *ar, 2827 struct ath12k_wmi_scan_cancel_arg *arg) 2828 { 2829 struct ath12k_wmi_pdev *wmi = ar->wmi; 2830 struct wmi_stop_scan_cmd *cmd; 2831 struct sk_buff *skb; 2832 int ret; 2833 2834 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 2835 if (!skb) 2836 return -ENOMEM; 2837 2838 cmd = (struct wmi_stop_scan_cmd *)skb->data; 2839 2840 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STOP_SCAN_CMD, 2841 sizeof(*cmd)); 2842 2843 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 2844 cmd->requestor = cpu_to_le32(arg->requester); 2845 cmd->scan_id = cpu_to_le32(arg->scan_id); 2846 cmd->pdev_id = cpu_to_le32(arg->pdev_id); 2847 /* stop the scan with the corresponding scan_id */ 2848 if (arg->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) { 2849 /* Cancelling all scans */ 2850 cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_ALL); 2851 } else if (arg->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) { 2852 /* Cancelling VAP scans */ 2853 cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_VAP_ALL); 2854 } else if (arg->req_type == WLAN_SCAN_CANCEL_SINGLE) { 2855 /* Cancelling specific scan */ 2856 cmd->req_type = WMI_SCAN_STOP_ONE; 2857 } else { 2858 ath12k_warn(ar->ab, "invalid scan cancel req_type %d", 2859 arg->req_type); 2860 dev_kfree_skb(skb); 2861 return -EINVAL; 2862 } 2863 2864 ret = ath12k_wmi_cmd_send(wmi, skb, 2865 WMI_STOP_SCAN_CMDID); 2866 if (ret) { 2867 ath12k_warn(ar->ab, "failed to send WMI_STOP_SCAN_CMDID\n"); 2868 dev_kfree_skb(skb); 2869 } 2870 2871 return ret; 2872 } 2873 2874 int ath12k_wmi_send_scan_chan_list_cmd(struct ath12k *ar, 2875 struct ath12k_wmi_scan_chan_list_arg *arg) 2876 { 2877 struct ath12k_wmi_pdev *wmi = ar->wmi; 2878 struct wmi_scan_chan_list_cmd *cmd; 2879 struct sk_buff *skb; 2880 struct ath12k_wmi_channel_params *chan_info; 2881 struct ath12k_wmi_channel_arg *channel_arg; 2882 struct wmi_tlv *tlv; 2883 void *ptr; 2884 int i, ret, len; 2885 u16 num_send_chans, num_sends = 0, max_chan_limit = 0; 2886 __le32 *reg1, *reg2; 2887 2888 channel_arg = &arg->channel[0]; 2889 while (arg->nallchans) { 2890 len = sizeof(*cmd) + TLV_HDR_SIZE; 2891 max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) / 2892 sizeof(*chan_info); 2893 2894 num_send_chans = min(arg->nallchans, max_chan_limit); 2895 2896 arg->nallchans -= num_send_chans; 2897 len += sizeof(*chan_info) * num_send_chans; 2898 2899 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 2900 if (!skb) 2901 return -ENOMEM; 2902 2903 cmd = (struct wmi_scan_chan_list_cmd *)skb->data; 2904 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SCAN_CHAN_LIST_CMD, 2905 sizeof(*cmd)); 2906 cmd->pdev_id = cpu_to_le32(arg->pdev_id); 2907 cmd->num_scan_chans = cpu_to_le32(num_send_chans); 2908 if (num_sends) 2909 cmd->flags |= cpu_to_le32(WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG); 2910 2911 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 2912 "WMI no.of chan = %d len = %d pdev_id = %d num_sends = %d\n", 2913 num_send_chans, len, cmd->pdev_id, num_sends); 2914 2915 ptr = skb->data + sizeof(*cmd); 2916 2917 len = sizeof(*chan_info) * num_send_chans; 2918 tlv = ptr; 2919 tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_STRUCT, 2920 len); 2921 ptr += TLV_HDR_SIZE; 2922 2923 for (i = 0; i < num_send_chans; ++i) { 2924 chan_info = ptr; 2925 memset(chan_info, 0, sizeof(*chan_info)); 2926 len = sizeof(*chan_info); 2927 chan_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL, 2928 len); 2929 2930 reg1 = &chan_info->reg_info_1; 2931 reg2 = &chan_info->reg_info_2; 2932 chan_info->mhz = cpu_to_le32(channel_arg->mhz); 2933 chan_info->band_center_freq1 = cpu_to_le32(channel_arg->cfreq1); 2934 chan_info->band_center_freq2 = cpu_to_le32(channel_arg->cfreq2); 2935 2936 if (channel_arg->is_chan_passive) 2937 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE); 2938 if (channel_arg->allow_he) 2939 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE); 2940 else if (channel_arg->allow_vht) 2941 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT); 2942 else if (channel_arg->allow_ht) 2943 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT); 2944 if (channel_arg->half_rate) 2945 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_HALF_RATE); 2946 if (channel_arg->quarter_rate) 2947 chan_info->info |= 2948 cpu_to_le32(WMI_CHAN_INFO_QUARTER_RATE); 2949 2950 if (channel_arg->psc_channel) 2951 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PSC); 2952 2953 if (channel_arg->dfs_set) 2954 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_DFS); 2955 2956 chan_info->info |= le32_encode_bits(channel_arg->phy_mode, 2957 WMI_CHAN_INFO_MODE); 2958 *reg1 |= le32_encode_bits(channel_arg->minpower, 2959 WMI_CHAN_REG_INFO1_MIN_PWR); 2960 *reg1 |= le32_encode_bits(channel_arg->maxpower, 2961 WMI_CHAN_REG_INFO1_MAX_PWR); 2962 *reg1 |= le32_encode_bits(channel_arg->maxregpower, 2963 WMI_CHAN_REG_INFO1_MAX_REG_PWR); 2964 *reg1 |= le32_encode_bits(channel_arg->reg_class_id, 2965 WMI_CHAN_REG_INFO1_REG_CLS); 2966 *reg2 |= le32_encode_bits(channel_arg->antennamax, 2967 WMI_CHAN_REG_INFO2_ANT_MAX); 2968 *reg2 |= le32_encode_bits(channel_arg->maxregpower, 2969 WMI_CHAN_REG_INFO2_MAX_TX_PWR); 2970 2971 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 2972 "WMI chan scan list chan[%d] = %u, chan_info->info %8x\n", 2973 i, chan_info->mhz, chan_info->info); 2974 2975 ptr += sizeof(*chan_info); 2976 2977 channel_arg++; 2978 } 2979 2980 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID); 2981 if (ret) { 2982 ath12k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n"); 2983 dev_kfree_skb(skb); 2984 return ret; 2985 } 2986 2987 num_sends++; 2988 } 2989 2990 return 0; 2991 } 2992 2993 int ath12k_wmi_send_wmm_update_cmd(struct ath12k *ar, u32 vdev_id, 2994 struct wmi_wmm_params_all_arg *param) 2995 { 2996 struct ath12k_wmi_pdev *wmi = ar->wmi; 2997 struct wmi_vdev_set_wmm_params_cmd *cmd; 2998 struct wmi_wmm_params *wmm_param; 2999 struct wmi_wmm_params_arg *wmi_wmm_arg; 3000 struct sk_buff *skb; 3001 int ret, ac; 3002 3003 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3004 if (!skb) 3005 return -ENOMEM; 3006 3007 cmd = (struct wmi_vdev_set_wmm_params_cmd *)skb->data; 3008 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD, 3009 sizeof(*cmd)); 3010 3011 cmd->vdev_id = cpu_to_le32(vdev_id); 3012 cmd->wmm_param_type = 0; 3013 3014 for (ac = 0; ac < WME_NUM_AC; ac++) { 3015 switch (ac) { 3016 case WME_AC_BE: 3017 wmi_wmm_arg = ¶m->ac_be; 3018 break; 3019 case WME_AC_BK: 3020 wmi_wmm_arg = ¶m->ac_bk; 3021 break; 3022 case WME_AC_VI: 3023 wmi_wmm_arg = ¶m->ac_vi; 3024 break; 3025 case WME_AC_VO: 3026 wmi_wmm_arg = ¶m->ac_vo; 3027 break; 3028 } 3029 3030 wmm_param = (struct wmi_wmm_params *)&cmd->wmm_params[ac]; 3031 wmm_param->tlv_header = 3032 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD, 3033 sizeof(*wmm_param)); 3034 3035 wmm_param->aifs = cpu_to_le32(wmi_wmm_arg->aifs); 3036 wmm_param->cwmin = cpu_to_le32(wmi_wmm_arg->cwmin); 3037 wmm_param->cwmax = cpu_to_le32(wmi_wmm_arg->cwmax); 3038 wmm_param->txoplimit = cpu_to_le32(wmi_wmm_arg->txop); 3039 wmm_param->acm = cpu_to_le32(wmi_wmm_arg->acm); 3040 wmm_param->no_ack = cpu_to_le32(wmi_wmm_arg->no_ack); 3041 3042 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3043 "wmi wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n", 3044 ac, wmm_param->aifs, wmm_param->cwmin, 3045 wmm_param->cwmax, wmm_param->txoplimit, 3046 wmm_param->acm, wmm_param->no_ack); 3047 } 3048 ret = ath12k_wmi_cmd_send(wmi, skb, 3049 WMI_VDEV_SET_WMM_PARAMS_CMDID); 3050 if (ret) { 3051 ath12k_warn(ar->ab, 3052 "failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID"); 3053 dev_kfree_skb(skb); 3054 } 3055 3056 return ret; 3057 } 3058 3059 int ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath12k *ar, 3060 u32 pdev_id) 3061 { 3062 struct ath12k_wmi_pdev *wmi = ar->wmi; 3063 struct wmi_dfs_phyerr_offload_cmd *cmd; 3064 struct sk_buff *skb; 3065 int ret; 3066 3067 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3068 if (!skb) 3069 return -ENOMEM; 3070 3071 cmd = (struct wmi_dfs_phyerr_offload_cmd *)skb->data; 3072 cmd->tlv_header = 3073 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD, 3074 sizeof(*cmd)); 3075 3076 cmd->pdev_id = cpu_to_le32(pdev_id); 3077 3078 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3079 "WMI dfs phy err offload enable pdev id %d\n", pdev_id); 3080 3081 ret = ath12k_wmi_cmd_send(wmi, skb, 3082 WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID); 3083 if (ret) { 3084 ath12k_warn(ar->ab, 3085 "failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n"); 3086 dev_kfree_skb(skb); 3087 } 3088 3089 return ret; 3090 } 3091 3092 int ath12k_wmi_set_bios_cmd(struct ath12k_base *ab, u32 param_id, 3093 const u8 *buf, size_t buf_len) 3094 { 3095 struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; 3096 struct wmi_pdev_set_bios_interface_cmd *cmd; 3097 struct wmi_tlv *tlv; 3098 struct sk_buff *skb; 3099 u8 *ptr; 3100 u32 len, len_aligned; 3101 int ret; 3102 3103 len_aligned = roundup(buf_len, sizeof(u32)); 3104 len = sizeof(*cmd) + TLV_HDR_SIZE + len_aligned; 3105 3106 skb = ath12k_wmi_alloc_skb(wmi_ab, len); 3107 if (!skb) 3108 return -ENOMEM; 3109 3110 cmd = (struct wmi_pdev_set_bios_interface_cmd *)skb->data; 3111 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD, 3112 sizeof(*cmd)); 3113 cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC); 3114 cmd->param_type_id = cpu_to_le32(param_id); 3115 cmd->length = cpu_to_le32(buf_len); 3116 3117 ptr = skb->data + sizeof(*cmd); 3118 tlv = (struct wmi_tlv *)ptr; 3119 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len_aligned); 3120 ptr += TLV_HDR_SIZE; 3121 memcpy(ptr, buf, buf_len); 3122 3123 ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], 3124 skb, 3125 WMI_PDEV_SET_BIOS_INTERFACE_CMDID); 3126 if (ret) { 3127 ath12k_warn(ab, 3128 "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID parameter id %d: %d\n", 3129 param_id, ret); 3130 dev_kfree_skb(skb); 3131 } 3132 3133 return 0; 3134 } 3135 3136 int ath12k_wmi_set_bios_sar_cmd(struct ath12k_base *ab, const u8 *psar_table) 3137 { 3138 struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; 3139 struct wmi_pdev_set_bios_sar_table_cmd *cmd; 3140 struct wmi_tlv *tlv; 3141 struct sk_buff *skb; 3142 int ret; 3143 u8 *buf_ptr; 3144 u32 len, sar_table_len_aligned, sar_dbs_backoff_len_aligned; 3145 const u8 *psar_value = psar_table + ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET; 3146 const u8 *pdbs_value = psar_table + ATH12K_ACPI_DBS_BACKOFF_DATA_OFFSET; 3147 3148 sar_table_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_TABLE_LEN, sizeof(u32)); 3149 sar_dbs_backoff_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN, 3150 sizeof(u32)); 3151 len = sizeof(*cmd) + TLV_HDR_SIZE + sar_table_len_aligned + 3152 TLV_HDR_SIZE + sar_dbs_backoff_len_aligned; 3153 3154 skb = ath12k_wmi_alloc_skb(wmi_ab, len); 3155 if (!skb) 3156 return -ENOMEM; 3157 3158 cmd = (struct wmi_pdev_set_bios_sar_table_cmd *)skb->data; 3159 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD, 3160 sizeof(*cmd)); 3161 cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC); 3162 cmd->sar_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_TABLE_LEN); 3163 cmd->dbs_backoff_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN); 3164 3165 buf_ptr = skb->data + sizeof(*cmd); 3166 tlv = (struct wmi_tlv *)buf_ptr; 3167 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, 3168 sar_table_len_aligned); 3169 buf_ptr += TLV_HDR_SIZE; 3170 memcpy(buf_ptr, psar_value, ATH12K_ACPI_BIOS_SAR_TABLE_LEN); 3171 3172 buf_ptr += sar_table_len_aligned; 3173 tlv = (struct wmi_tlv *)buf_ptr; 3174 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, 3175 sar_dbs_backoff_len_aligned); 3176 buf_ptr += TLV_HDR_SIZE; 3177 memcpy(buf_ptr, pdbs_value, ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN); 3178 3179 ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], 3180 skb, 3181 WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID); 3182 if (ret) { 3183 ath12k_warn(ab, 3184 "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID %d\n", 3185 ret); 3186 dev_kfree_skb(skb); 3187 } 3188 3189 return ret; 3190 } 3191 3192 int ath12k_wmi_set_bios_geo_cmd(struct ath12k_base *ab, const u8 *pgeo_table) 3193 { 3194 struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; 3195 struct wmi_pdev_set_bios_geo_table_cmd *cmd; 3196 struct wmi_tlv *tlv; 3197 struct sk_buff *skb; 3198 int ret; 3199 u8 *buf_ptr; 3200 u32 len, sar_geo_len_aligned; 3201 const u8 *pgeo_value = pgeo_table + ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET; 3202 3203 sar_geo_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN, sizeof(u32)); 3204 len = sizeof(*cmd) + TLV_HDR_SIZE + sar_geo_len_aligned; 3205 3206 skb = ath12k_wmi_alloc_skb(wmi_ab, len); 3207 if (!skb) 3208 return -ENOMEM; 3209 3210 cmd = (struct wmi_pdev_set_bios_geo_table_cmd *)skb->data; 3211 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD, 3212 sizeof(*cmd)); 3213 cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC); 3214 cmd->geo_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN); 3215 3216 buf_ptr = skb->data + sizeof(*cmd); 3217 tlv = (struct wmi_tlv *)buf_ptr; 3218 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, sar_geo_len_aligned); 3219 buf_ptr += TLV_HDR_SIZE; 3220 memcpy(buf_ptr, pgeo_value, ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN); 3221 3222 ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], 3223 skb, 3224 WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID); 3225 if (ret) { 3226 ath12k_warn(ab, 3227 "failed to send WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID %d\n", 3228 ret); 3229 dev_kfree_skb(skb); 3230 } 3231 3232 return ret; 3233 } 3234 3235 int ath12k_wmi_delba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac, 3236 u32 tid, u32 initiator, u32 reason) 3237 { 3238 struct ath12k_wmi_pdev *wmi = ar->wmi; 3239 struct wmi_delba_send_cmd *cmd; 3240 struct sk_buff *skb; 3241 int ret; 3242 3243 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3244 if (!skb) 3245 return -ENOMEM; 3246 3247 cmd = (struct wmi_delba_send_cmd *)skb->data; 3248 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DELBA_SEND_CMD, 3249 sizeof(*cmd)); 3250 cmd->vdev_id = cpu_to_le32(vdev_id); 3251 ether_addr_copy(cmd->peer_macaddr.addr, mac); 3252 cmd->tid = cpu_to_le32(tid); 3253 cmd->initiator = cpu_to_le32(initiator); 3254 cmd->reasoncode = cpu_to_le32(reason); 3255 3256 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3257 "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n", 3258 vdev_id, mac, tid, initiator, reason); 3259 3260 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_DELBA_SEND_CMDID); 3261 3262 if (ret) { 3263 ath12k_warn(ar->ab, 3264 "failed to send WMI_DELBA_SEND_CMDID cmd\n"); 3265 dev_kfree_skb(skb); 3266 } 3267 3268 return ret; 3269 } 3270 3271 int ath12k_wmi_addba_set_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac, 3272 u32 tid, u32 status) 3273 { 3274 struct ath12k_wmi_pdev *wmi = ar->wmi; 3275 struct wmi_addba_setresponse_cmd *cmd; 3276 struct sk_buff *skb; 3277 int ret; 3278 3279 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3280 if (!skb) 3281 return -ENOMEM; 3282 3283 cmd = (struct wmi_addba_setresponse_cmd *)skb->data; 3284 cmd->tlv_header = 3285 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SETRESPONSE_CMD, 3286 sizeof(*cmd)); 3287 cmd->vdev_id = cpu_to_le32(vdev_id); 3288 ether_addr_copy(cmd->peer_macaddr.addr, mac); 3289 cmd->tid = cpu_to_le32(tid); 3290 cmd->statuscode = cpu_to_le32(status); 3291 3292 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3293 "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n", 3294 vdev_id, mac, tid, status); 3295 3296 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SET_RESP_CMDID); 3297 3298 if (ret) { 3299 ath12k_warn(ar->ab, 3300 "failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n"); 3301 dev_kfree_skb(skb); 3302 } 3303 3304 return ret; 3305 } 3306 3307 int ath12k_wmi_addba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac, 3308 u32 tid, u32 buf_size) 3309 { 3310 struct ath12k_wmi_pdev *wmi = ar->wmi; 3311 struct wmi_addba_send_cmd *cmd; 3312 struct sk_buff *skb; 3313 int ret; 3314 3315 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3316 if (!skb) 3317 return -ENOMEM; 3318 3319 cmd = (struct wmi_addba_send_cmd *)skb->data; 3320 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SEND_CMD, 3321 sizeof(*cmd)); 3322 cmd->vdev_id = cpu_to_le32(vdev_id); 3323 ether_addr_copy(cmd->peer_macaddr.addr, mac); 3324 cmd->tid = cpu_to_le32(tid); 3325 cmd->buffersize = cpu_to_le32(buf_size); 3326 3327 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3328 "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n", 3329 vdev_id, mac, tid, buf_size); 3330 3331 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SEND_CMDID); 3332 3333 if (ret) { 3334 ath12k_warn(ar->ab, 3335 "failed to send WMI_ADDBA_SEND_CMDID cmd\n"); 3336 dev_kfree_skb(skb); 3337 } 3338 3339 return ret; 3340 } 3341 3342 int ath12k_wmi_addba_clear_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac) 3343 { 3344 struct ath12k_wmi_pdev *wmi = ar->wmi; 3345 struct wmi_addba_clear_resp_cmd *cmd; 3346 struct sk_buff *skb; 3347 int ret; 3348 3349 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3350 if (!skb) 3351 return -ENOMEM; 3352 3353 cmd = (struct wmi_addba_clear_resp_cmd *)skb->data; 3354 cmd->tlv_header = 3355 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_CLEAR_RESP_CMD, 3356 sizeof(*cmd)); 3357 cmd->vdev_id = cpu_to_le32(vdev_id); 3358 ether_addr_copy(cmd->peer_macaddr.addr, mac); 3359 3360 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3361 "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n", 3362 vdev_id, mac); 3363 3364 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_CLEAR_RESP_CMDID); 3365 3366 if (ret) { 3367 ath12k_warn(ar->ab, 3368 "failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n"); 3369 dev_kfree_skb(skb); 3370 } 3371 3372 return ret; 3373 } 3374 3375 int ath12k_wmi_send_init_country_cmd(struct ath12k *ar, 3376 struct ath12k_wmi_init_country_arg *arg) 3377 { 3378 struct ath12k_wmi_pdev *wmi = ar->wmi; 3379 struct wmi_init_country_cmd *cmd; 3380 struct sk_buff *skb; 3381 int ret; 3382 3383 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3384 if (!skb) 3385 return -ENOMEM; 3386 3387 cmd = (struct wmi_init_country_cmd *)skb->data; 3388 cmd->tlv_header = 3389 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_INIT_COUNTRY_CMD, 3390 sizeof(*cmd)); 3391 3392 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); 3393 3394 switch (arg->flags) { 3395 case ALPHA_IS_SET: 3396 cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_ALPHA; 3397 memcpy(&cmd->cc_info.alpha2, arg->cc_info.alpha2, 3); 3398 break; 3399 case CC_IS_SET: 3400 cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE); 3401 cmd->cc_info.country_code = 3402 cpu_to_le32(arg->cc_info.country_code); 3403 break; 3404 case REGDMN_IS_SET: 3405 cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_REGDOMAIN); 3406 cmd->cc_info.regdom_id = cpu_to_le32(arg->cc_info.regdom_id); 3407 break; 3408 default: 3409 ret = -EINVAL; 3410 goto out; 3411 } 3412 3413 ret = ath12k_wmi_cmd_send(wmi, skb, 3414 WMI_SET_INIT_COUNTRY_CMDID); 3415 3416 out: 3417 if (ret) { 3418 ath12k_warn(ar->ab, 3419 "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n", 3420 ret); 3421 dev_kfree_skb(skb); 3422 } 3423 3424 return ret; 3425 } 3426 3427 int ath12k_wmi_send_set_current_country_cmd(struct ath12k *ar, 3428 struct wmi_set_current_country_arg *arg) 3429 { 3430 struct ath12k_wmi_pdev *wmi = ar->wmi; 3431 struct wmi_set_current_country_cmd *cmd; 3432 struct sk_buff *skb; 3433 int ret; 3434 3435 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3436 if (!skb) 3437 return -ENOMEM; 3438 3439 cmd = (struct wmi_set_current_country_cmd *)skb->data; 3440 cmd->tlv_header = 3441 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_CURRENT_COUNTRY_CMD, 3442 sizeof(*cmd)); 3443 3444 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); 3445 memcpy(&cmd->new_alpha2, &arg->alpha2, sizeof(arg->alpha2)); 3446 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SET_CURRENT_COUNTRY_CMDID); 3447 3448 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3449 "set current country pdev id %d alpha2 %c%c\n", 3450 ar->pdev->pdev_id, 3451 arg->alpha2[0], 3452 arg->alpha2[1]); 3453 3454 if (ret) { 3455 ath12k_warn(ar->ab, 3456 "failed to send WMI_SET_CURRENT_COUNTRY_CMDID: %d\n", ret); 3457 dev_kfree_skb(skb); 3458 } 3459 3460 return ret; 3461 } 3462 3463 int ath12k_wmi_send_11d_scan_start_cmd(struct ath12k *ar, 3464 struct wmi_11d_scan_start_arg *arg) 3465 { 3466 struct ath12k_wmi_pdev *wmi = ar->wmi; 3467 struct wmi_11d_scan_start_cmd *cmd; 3468 struct sk_buff *skb; 3469 int ret; 3470 3471 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3472 if (!skb) 3473 return -ENOMEM; 3474 3475 cmd = (struct wmi_11d_scan_start_cmd *)skb->data; 3476 cmd->tlv_header = 3477 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_11D_SCAN_START_CMD, 3478 sizeof(*cmd)); 3479 3480 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 3481 cmd->scan_period_msec = cpu_to_le32(arg->scan_period_msec); 3482 cmd->start_interval_msec = cpu_to_le32(arg->start_interval_msec); 3483 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_START_CMDID); 3484 3485 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3486 "send 11d scan start vdev id %d period %d ms internal %d ms\n", 3487 arg->vdev_id, arg->scan_period_msec, 3488 arg->start_interval_msec); 3489 3490 if (ret) { 3491 ath12k_warn(ar->ab, 3492 "failed to send WMI_11D_SCAN_START_CMDID: %d\n", ret); 3493 dev_kfree_skb(skb); 3494 } 3495 3496 return ret; 3497 } 3498 3499 int ath12k_wmi_send_11d_scan_stop_cmd(struct ath12k *ar, u32 vdev_id) 3500 { 3501 struct ath12k_wmi_pdev *wmi = ar->wmi; 3502 struct wmi_11d_scan_stop_cmd *cmd; 3503 struct sk_buff *skb; 3504 int ret; 3505 3506 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3507 if (!skb) 3508 return -ENOMEM; 3509 3510 cmd = (struct wmi_11d_scan_stop_cmd *)skb->data; 3511 cmd->tlv_header = 3512 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_11D_SCAN_STOP_CMD, 3513 sizeof(*cmd)); 3514 3515 cmd->vdev_id = cpu_to_le32(vdev_id); 3516 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_STOP_CMDID); 3517 3518 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3519 "send 11d scan stop vdev id %d\n", 3520 cmd->vdev_id); 3521 3522 if (ret) { 3523 ath12k_warn(ar->ab, 3524 "failed to send WMI_11D_SCAN_STOP_CMDID: %d\n", ret); 3525 dev_kfree_skb(skb); 3526 } 3527 3528 return ret; 3529 } 3530 3531 int 3532 ath12k_wmi_send_twt_enable_cmd(struct ath12k *ar, u32 pdev_id) 3533 { 3534 struct ath12k_wmi_pdev *wmi = ar->wmi; 3535 struct ath12k_base *ab = wmi->wmi_ab->ab; 3536 struct wmi_twt_enable_params_cmd *cmd; 3537 struct sk_buff *skb; 3538 int ret, len; 3539 3540 len = sizeof(*cmd); 3541 3542 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 3543 if (!skb) 3544 return -ENOMEM; 3545 3546 cmd = (struct wmi_twt_enable_params_cmd *)skb->data; 3547 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_ENABLE_CMD, 3548 len); 3549 cmd->pdev_id = cpu_to_le32(pdev_id); 3550 cmd->sta_cong_timer_ms = cpu_to_le32(ATH12K_TWT_DEF_STA_CONG_TIMER_MS); 3551 cmd->default_slot_size = cpu_to_le32(ATH12K_TWT_DEF_DEFAULT_SLOT_SIZE); 3552 cmd->congestion_thresh_setup = 3553 cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_SETUP); 3554 cmd->congestion_thresh_teardown = 3555 cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_TEARDOWN); 3556 cmd->congestion_thresh_critical = 3557 cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_CRITICAL); 3558 cmd->interference_thresh_teardown = 3559 cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN); 3560 cmd->interference_thresh_setup = 3561 cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_SETUP); 3562 cmd->min_no_sta_setup = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_SETUP); 3563 cmd->min_no_sta_teardown = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_TEARDOWN); 3564 cmd->no_of_bcast_mcast_slots = 3565 cpu_to_le32(ATH12K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS); 3566 cmd->min_no_twt_slots = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_TWT_SLOTS); 3567 cmd->max_no_sta_twt = cpu_to_le32(ATH12K_TWT_DEF_MAX_NO_STA_TWT); 3568 cmd->mode_check_interval = cpu_to_le32(ATH12K_TWT_DEF_MODE_CHECK_INTERVAL); 3569 cmd->add_sta_slot_interval = cpu_to_le32(ATH12K_TWT_DEF_ADD_STA_SLOT_INTERVAL); 3570 cmd->remove_sta_slot_interval = 3571 cpu_to_le32(ATH12K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL); 3572 /* TODO add MBSSID support */ 3573 cmd->mbss_support = 0; 3574 3575 ret = ath12k_wmi_cmd_send(wmi, skb, 3576 WMI_TWT_ENABLE_CMDID); 3577 if (ret) { 3578 ath12k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID"); 3579 dev_kfree_skb(skb); 3580 } 3581 return ret; 3582 } 3583 3584 int 3585 ath12k_wmi_send_twt_disable_cmd(struct ath12k *ar, u32 pdev_id) 3586 { 3587 struct ath12k_wmi_pdev *wmi = ar->wmi; 3588 struct ath12k_base *ab = wmi->wmi_ab->ab; 3589 struct wmi_twt_disable_params_cmd *cmd; 3590 struct sk_buff *skb; 3591 int ret, len; 3592 3593 len = sizeof(*cmd); 3594 3595 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 3596 if (!skb) 3597 return -ENOMEM; 3598 3599 cmd = (struct wmi_twt_disable_params_cmd *)skb->data; 3600 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_DISABLE_CMD, 3601 len); 3602 cmd->pdev_id = cpu_to_le32(pdev_id); 3603 3604 ret = ath12k_wmi_cmd_send(wmi, skb, 3605 WMI_TWT_DISABLE_CMDID); 3606 if (ret) { 3607 ath12k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID"); 3608 dev_kfree_skb(skb); 3609 } 3610 return ret; 3611 } 3612 3613 int 3614 ath12k_wmi_send_obss_spr_cmd(struct ath12k *ar, u32 vdev_id, 3615 struct ieee80211_he_obss_pd *he_obss_pd) 3616 { 3617 struct ath12k_wmi_pdev *wmi = ar->wmi; 3618 struct ath12k_base *ab = wmi->wmi_ab->ab; 3619 struct wmi_obss_spatial_reuse_params_cmd *cmd; 3620 struct sk_buff *skb; 3621 int ret, len; 3622 3623 len = sizeof(*cmd); 3624 3625 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 3626 if (!skb) 3627 return -ENOMEM; 3628 3629 cmd = (struct wmi_obss_spatial_reuse_params_cmd *)skb->data; 3630 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD, 3631 len); 3632 cmd->vdev_id = cpu_to_le32(vdev_id); 3633 cmd->enable = cpu_to_le32(he_obss_pd->enable); 3634 cmd->obss_min = a_cpu_to_sle32(he_obss_pd->min_offset); 3635 cmd->obss_max = a_cpu_to_sle32(he_obss_pd->max_offset); 3636 3637 ret = ath12k_wmi_cmd_send(wmi, skb, 3638 WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID); 3639 if (ret) { 3640 ath12k_warn(ab, 3641 "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID"); 3642 dev_kfree_skb(skb); 3643 } 3644 return ret; 3645 } 3646 3647 int ath12k_wmi_obss_color_cfg_cmd(struct ath12k *ar, u32 vdev_id, 3648 u8 bss_color, u32 period, 3649 bool enable) 3650 { 3651 struct ath12k_wmi_pdev *wmi = ar->wmi; 3652 struct ath12k_base *ab = wmi->wmi_ab->ab; 3653 struct wmi_obss_color_collision_cfg_params_cmd *cmd; 3654 struct sk_buff *skb; 3655 int ret, len; 3656 3657 len = sizeof(*cmd); 3658 3659 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 3660 if (!skb) 3661 return -ENOMEM; 3662 3663 cmd = (struct wmi_obss_color_collision_cfg_params_cmd *)skb->data; 3664 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG, 3665 len); 3666 cmd->vdev_id = cpu_to_le32(vdev_id); 3667 cmd->evt_type = enable ? cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION) : 3668 cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION_DISABLE); 3669 cmd->current_bss_color = cpu_to_le32(bss_color); 3670 cmd->detection_period_ms = cpu_to_le32(period); 3671 cmd->scan_period_ms = cpu_to_le32(ATH12K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS); 3672 cmd->free_slot_expiry_time_ms = 0; 3673 cmd->flags = 0; 3674 3675 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3676 "wmi_send_obss_color_collision_cfg id %d type %d bss_color %d detect_period %d scan_period %d\n", 3677 cmd->vdev_id, cmd->evt_type, cmd->current_bss_color, 3678 cmd->detection_period_ms, cmd->scan_period_ms); 3679 3680 ret = ath12k_wmi_cmd_send(wmi, skb, 3681 WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID); 3682 if (ret) { 3683 ath12k_warn(ab, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID"); 3684 dev_kfree_skb(skb); 3685 } 3686 return ret; 3687 } 3688 3689 int ath12k_wmi_send_bss_color_change_enable_cmd(struct ath12k *ar, u32 vdev_id, 3690 bool enable) 3691 { 3692 struct ath12k_wmi_pdev *wmi = ar->wmi; 3693 struct ath12k_base *ab = wmi->wmi_ab->ab; 3694 struct wmi_bss_color_change_enable_params_cmd *cmd; 3695 struct sk_buff *skb; 3696 int ret, len; 3697 3698 len = sizeof(*cmd); 3699 3700 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 3701 if (!skb) 3702 return -ENOMEM; 3703 3704 cmd = (struct wmi_bss_color_change_enable_params_cmd *)skb->data; 3705 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BSS_COLOR_CHANGE_ENABLE, 3706 len); 3707 cmd->vdev_id = cpu_to_le32(vdev_id); 3708 cmd->enable = enable ? cpu_to_le32(1) : 0; 3709 3710 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3711 "wmi_send_bss_color_change_enable id %d enable %d\n", 3712 cmd->vdev_id, cmd->enable); 3713 3714 ret = ath12k_wmi_cmd_send(wmi, skb, 3715 WMI_BSS_COLOR_CHANGE_ENABLE_CMDID); 3716 if (ret) { 3717 ath12k_warn(ab, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID"); 3718 dev_kfree_skb(skb); 3719 } 3720 return ret; 3721 } 3722 3723 int ath12k_wmi_fils_discovery_tmpl(struct ath12k *ar, u32 vdev_id, 3724 struct sk_buff *tmpl) 3725 { 3726 struct wmi_tlv *tlv; 3727 struct sk_buff *skb; 3728 void *ptr; 3729 int ret, len; 3730 size_t aligned_len; 3731 struct wmi_fils_discovery_tmpl_cmd *cmd; 3732 3733 aligned_len = roundup(tmpl->len, 4); 3734 len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len; 3735 3736 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3737 "WMI vdev %i set FILS discovery template\n", vdev_id); 3738 3739 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 3740 if (!skb) 3741 return -ENOMEM; 3742 3743 cmd = (struct wmi_fils_discovery_tmpl_cmd *)skb->data; 3744 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FILS_DISCOVERY_TMPL_CMD, 3745 sizeof(*cmd)); 3746 cmd->vdev_id = cpu_to_le32(vdev_id); 3747 cmd->buf_len = cpu_to_le32(tmpl->len); 3748 ptr = skb->data + sizeof(*cmd); 3749 3750 tlv = ptr; 3751 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len); 3752 memcpy(tlv->value, tmpl->data, tmpl->len); 3753 3754 ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_FILS_DISCOVERY_TMPL_CMDID); 3755 if (ret) { 3756 ath12k_warn(ar->ab, 3757 "WMI vdev %i failed to send FILS discovery template command\n", 3758 vdev_id); 3759 dev_kfree_skb(skb); 3760 } 3761 return ret; 3762 } 3763 3764 int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id, 3765 struct sk_buff *tmpl) 3766 { 3767 struct wmi_probe_tmpl_cmd *cmd; 3768 struct ath12k_wmi_bcn_prb_info_params *probe_info; 3769 struct wmi_tlv *tlv; 3770 struct sk_buff *skb; 3771 void *ptr; 3772 int ret, len; 3773 size_t aligned_len = roundup(tmpl->len, 4); 3774 3775 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3776 "WMI vdev %i set probe response template\n", vdev_id); 3777 3778 len = sizeof(*cmd) + sizeof(*probe_info) + TLV_HDR_SIZE + aligned_len; 3779 3780 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 3781 if (!skb) 3782 return -ENOMEM; 3783 3784 cmd = (struct wmi_probe_tmpl_cmd *)skb->data; 3785 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PRB_TMPL_CMD, 3786 sizeof(*cmd)); 3787 cmd->vdev_id = cpu_to_le32(vdev_id); 3788 cmd->buf_len = cpu_to_le32(tmpl->len); 3789 3790 ptr = skb->data + sizeof(*cmd); 3791 3792 probe_info = ptr; 3793 len = sizeof(*probe_info); 3794 probe_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO, 3795 len); 3796 probe_info->caps = 0; 3797 probe_info->erp = 0; 3798 3799 ptr += sizeof(*probe_info); 3800 3801 tlv = ptr; 3802 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len); 3803 memcpy(tlv->value, tmpl->data, tmpl->len); 3804 3805 ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_PRB_TMPL_CMDID); 3806 if (ret) { 3807 ath12k_warn(ar->ab, 3808 "WMI vdev %i failed to send probe response template command\n", 3809 vdev_id); 3810 dev_kfree_skb(skb); 3811 } 3812 return ret; 3813 } 3814 3815 int ath12k_wmi_fils_discovery(struct ath12k *ar, u32 vdev_id, u32 interval, 3816 bool unsol_bcast_probe_resp_enabled) 3817 { 3818 struct sk_buff *skb; 3819 int ret, len; 3820 struct wmi_fils_discovery_cmd *cmd; 3821 3822 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3823 "WMI vdev %i set %s interval to %u TU\n", 3824 vdev_id, unsol_bcast_probe_resp_enabled ? 3825 "unsolicited broadcast probe response" : "FILS discovery", 3826 interval); 3827 3828 len = sizeof(*cmd); 3829 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 3830 if (!skb) 3831 return -ENOMEM; 3832 3833 cmd = (struct wmi_fils_discovery_cmd *)skb->data; 3834 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ENABLE_FILS_CMD, 3835 len); 3836 cmd->vdev_id = cpu_to_le32(vdev_id); 3837 cmd->interval = cpu_to_le32(interval); 3838 cmd->config = cpu_to_le32(unsol_bcast_probe_resp_enabled); 3839 3840 ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_ENABLE_FILS_CMDID); 3841 if (ret) { 3842 ath12k_warn(ar->ab, 3843 "WMI vdev %i failed to send FILS discovery enable/disable command\n", 3844 vdev_id); 3845 dev_kfree_skb(skb); 3846 } 3847 return ret; 3848 } 3849 3850 static void 3851 ath12k_fill_band_to_mac_param(struct ath12k_base *soc, 3852 struct ath12k_wmi_pdev_band_arg *arg) 3853 { 3854 u8 i; 3855 struct ath12k_wmi_hal_reg_capabilities_ext_arg *hal_reg_cap; 3856 struct ath12k_pdev *pdev; 3857 3858 for (i = 0; i < soc->num_radios; i++) { 3859 pdev = &soc->pdevs[i]; 3860 hal_reg_cap = &soc->hal_reg_cap[i]; 3861 arg[i].pdev_id = pdev->pdev_id; 3862 3863 switch (pdev->cap.supported_bands) { 3864 case WMI_HOST_WLAN_2GHZ_5GHZ_CAP: 3865 arg[i].start_freq = hal_reg_cap->low_2ghz_chan; 3866 arg[i].end_freq = hal_reg_cap->high_5ghz_chan; 3867 break; 3868 case WMI_HOST_WLAN_2GHZ_CAP: 3869 arg[i].start_freq = hal_reg_cap->low_2ghz_chan; 3870 arg[i].end_freq = hal_reg_cap->high_2ghz_chan; 3871 break; 3872 case WMI_HOST_WLAN_5GHZ_CAP: 3873 arg[i].start_freq = hal_reg_cap->low_5ghz_chan; 3874 arg[i].end_freq = hal_reg_cap->high_5ghz_chan; 3875 break; 3876 default: 3877 break; 3878 } 3879 } 3880 } 3881 3882 static void 3883 ath12k_wmi_copy_resource_config(struct ath12k_base *ab, 3884 struct ath12k_wmi_resource_config_params *wmi_cfg, 3885 struct ath12k_wmi_resource_config_arg *tg_cfg) 3886 { 3887 wmi_cfg->num_vdevs = cpu_to_le32(tg_cfg->num_vdevs); 3888 wmi_cfg->num_peers = cpu_to_le32(tg_cfg->num_peers); 3889 wmi_cfg->num_offload_peers = cpu_to_le32(tg_cfg->num_offload_peers); 3890 wmi_cfg->num_offload_reorder_buffs = 3891 cpu_to_le32(tg_cfg->num_offload_reorder_buffs); 3892 wmi_cfg->num_peer_keys = cpu_to_le32(tg_cfg->num_peer_keys); 3893 wmi_cfg->num_tids = cpu_to_le32(tg_cfg->num_tids); 3894 wmi_cfg->ast_skid_limit = cpu_to_le32(tg_cfg->ast_skid_limit); 3895 wmi_cfg->tx_chain_mask = cpu_to_le32(tg_cfg->tx_chain_mask); 3896 wmi_cfg->rx_chain_mask = cpu_to_le32(tg_cfg->rx_chain_mask); 3897 wmi_cfg->rx_timeout_pri[0] = cpu_to_le32(tg_cfg->rx_timeout_pri[0]); 3898 wmi_cfg->rx_timeout_pri[1] = cpu_to_le32(tg_cfg->rx_timeout_pri[1]); 3899 wmi_cfg->rx_timeout_pri[2] = cpu_to_le32(tg_cfg->rx_timeout_pri[2]); 3900 wmi_cfg->rx_timeout_pri[3] = cpu_to_le32(tg_cfg->rx_timeout_pri[3]); 3901 wmi_cfg->rx_decap_mode = cpu_to_le32(tg_cfg->rx_decap_mode); 3902 wmi_cfg->scan_max_pending_req = cpu_to_le32(tg_cfg->scan_max_pending_req); 3903 wmi_cfg->bmiss_offload_max_vdev = cpu_to_le32(tg_cfg->bmiss_offload_max_vdev); 3904 wmi_cfg->roam_offload_max_vdev = cpu_to_le32(tg_cfg->roam_offload_max_vdev); 3905 wmi_cfg->roam_offload_max_ap_profiles = 3906 cpu_to_le32(tg_cfg->roam_offload_max_ap_profiles); 3907 wmi_cfg->num_mcast_groups = cpu_to_le32(tg_cfg->num_mcast_groups); 3908 wmi_cfg->num_mcast_table_elems = cpu_to_le32(tg_cfg->num_mcast_table_elems); 3909 wmi_cfg->mcast2ucast_mode = cpu_to_le32(tg_cfg->mcast2ucast_mode); 3910 wmi_cfg->tx_dbg_log_size = cpu_to_le32(tg_cfg->tx_dbg_log_size); 3911 wmi_cfg->num_wds_entries = cpu_to_le32(tg_cfg->num_wds_entries); 3912 wmi_cfg->dma_burst_size = cpu_to_le32(tg_cfg->dma_burst_size); 3913 wmi_cfg->mac_aggr_delim = cpu_to_le32(tg_cfg->mac_aggr_delim); 3914 wmi_cfg->rx_skip_defrag_timeout_dup_detection_check = 3915 cpu_to_le32(tg_cfg->rx_skip_defrag_timeout_dup_detection_check); 3916 wmi_cfg->vow_config = cpu_to_le32(tg_cfg->vow_config); 3917 wmi_cfg->gtk_offload_max_vdev = cpu_to_le32(tg_cfg->gtk_offload_max_vdev); 3918 wmi_cfg->num_msdu_desc = cpu_to_le32(tg_cfg->num_msdu_desc); 3919 wmi_cfg->max_frag_entries = cpu_to_le32(tg_cfg->max_frag_entries); 3920 wmi_cfg->num_tdls_vdevs = cpu_to_le32(tg_cfg->num_tdls_vdevs); 3921 wmi_cfg->num_tdls_conn_table_entries = 3922 cpu_to_le32(tg_cfg->num_tdls_conn_table_entries); 3923 wmi_cfg->beacon_tx_offload_max_vdev = 3924 cpu_to_le32(tg_cfg->beacon_tx_offload_max_vdev); 3925 wmi_cfg->num_multicast_filter_entries = 3926 cpu_to_le32(tg_cfg->num_multicast_filter_entries); 3927 wmi_cfg->num_wow_filters = cpu_to_le32(tg_cfg->num_wow_filters); 3928 wmi_cfg->num_keep_alive_pattern = cpu_to_le32(tg_cfg->num_keep_alive_pattern); 3929 wmi_cfg->keep_alive_pattern_size = cpu_to_le32(tg_cfg->keep_alive_pattern_size); 3930 wmi_cfg->max_tdls_concurrent_sleep_sta = 3931 cpu_to_le32(tg_cfg->max_tdls_concurrent_sleep_sta); 3932 wmi_cfg->max_tdls_concurrent_buffer_sta = 3933 cpu_to_le32(tg_cfg->max_tdls_concurrent_buffer_sta); 3934 wmi_cfg->wmi_send_separate = cpu_to_le32(tg_cfg->wmi_send_separate); 3935 wmi_cfg->num_ocb_vdevs = cpu_to_le32(tg_cfg->num_ocb_vdevs); 3936 wmi_cfg->num_ocb_channels = cpu_to_le32(tg_cfg->num_ocb_channels); 3937 wmi_cfg->num_ocb_schedules = cpu_to_le32(tg_cfg->num_ocb_schedules); 3938 wmi_cfg->bpf_instruction_size = cpu_to_le32(tg_cfg->bpf_instruction_size); 3939 wmi_cfg->max_bssid_rx_filters = cpu_to_le32(tg_cfg->max_bssid_rx_filters); 3940 wmi_cfg->use_pdev_id = cpu_to_le32(tg_cfg->use_pdev_id); 3941 wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config | 3942 WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64 | 3943 WMI_RSRC_CFG_FLAG1_ACK_RSSI); 3944 wmi_cfg->peer_map_unmap_version = cpu_to_le32(tg_cfg->peer_map_unmap_version); 3945 wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params); 3946 wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count); 3947 wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count); 3948 wmi_cfg->flags2 = le32_encode_bits(tg_cfg->peer_metadata_ver, 3949 WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION); 3950 wmi_cfg->host_service_flags = cpu_to_le32(tg_cfg->is_reg_cc_ext_event_supported << 3951 WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT); 3952 if (ab->hw_params->reoq_lut_support) 3953 wmi_cfg->host_service_flags |= 3954 cpu_to_le32(1 << WMI_RSRC_CFG_HOST_SVC_FLAG_REO_QREF_SUPPORT_BIT); 3955 wmi_cfg->ema_max_vap_cnt = cpu_to_le32(tg_cfg->ema_max_vap_cnt); 3956 wmi_cfg->ema_max_profile_period = cpu_to_le32(tg_cfg->ema_max_profile_period); 3957 wmi_cfg->flags2 |= cpu_to_le32(WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET); 3958 } 3959 3960 static int ath12k_init_cmd_send(struct ath12k_wmi_pdev *wmi, 3961 struct ath12k_wmi_init_cmd_arg *arg) 3962 { 3963 struct ath12k_base *ab = wmi->wmi_ab->ab; 3964 struct sk_buff *skb; 3965 struct wmi_init_cmd *cmd; 3966 struct ath12k_wmi_resource_config_params *cfg; 3967 struct ath12k_wmi_pdev_set_hw_mode_cmd *hw_mode; 3968 struct ath12k_wmi_pdev_band_to_mac_params *band_to_mac; 3969 struct ath12k_wmi_host_mem_chunk_params *host_mem_chunks; 3970 struct wmi_tlv *tlv; 3971 size_t ret, len; 3972 void *ptr; 3973 u32 hw_mode_len = 0; 3974 u16 idx; 3975 3976 if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX) 3977 hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE + 3978 (arg->num_band_to_mac * sizeof(*band_to_mac)); 3979 3980 len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len + 3981 (arg->num_mem_chunks ? (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0); 3982 3983 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 3984 if (!skb) 3985 return -ENOMEM; 3986 3987 cmd = (struct wmi_init_cmd *)skb->data; 3988 3989 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_INIT_CMD, 3990 sizeof(*cmd)); 3991 3992 ptr = skb->data + sizeof(*cmd); 3993 cfg = ptr; 3994 3995 ath12k_wmi_copy_resource_config(ab, cfg, &arg->res_cfg); 3996 3997 cfg->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_RESOURCE_CONFIG, 3998 sizeof(*cfg)); 3999 4000 ptr += sizeof(*cfg); 4001 host_mem_chunks = ptr + TLV_HDR_SIZE; 4002 len = sizeof(struct ath12k_wmi_host_mem_chunk_params); 4003 4004 for (idx = 0; idx < arg->num_mem_chunks; ++idx) { 4005 host_mem_chunks[idx].tlv_header = 4006 ath12k_wmi_tlv_hdr(WMI_TAG_WLAN_HOST_MEMORY_CHUNK, 4007 len); 4008 4009 host_mem_chunks[idx].ptr = cpu_to_le32(arg->mem_chunks[idx].paddr); 4010 host_mem_chunks[idx].size = cpu_to_le32(arg->mem_chunks[idx].len); 4011 host_mem_chunks[idx].req_id = cpu_to_le32(arg->mem_chunks[idx].req_id); 4012 4013 ath12k_dbg(ab, ATH12K_DBG_WMI, 4014 "WMI host mem chunk req_id %d paddr 0x%llx len %d\n", 4015 arg->mem_chunks[idx].req_id, 4016 (u64)arg->mem_chunks[idx].paddr, 4017 arg->mem_chunks[idx].len); 4018 } 4019 cmd->num_host_mem_chunks = cpu_to_le32(arg->num_mem_chunks); 4020 len = sizeof(struct ath12k_wmi_host_mem_chunk_params) * arg->num_mem_chunks; 4021 4022 /* num_mem_chunks is zero */ 4023 tlv = ptr; 4024 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); 4025 ptr += TLV_HDR_SIZE + len; 4026 4027 if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX) { 4028 hw_mode = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)ptr; 4029 hw_mode->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD, 4030 sizeof(*hw_mode)); 4031 4032 hw_mode->hw_mode_index = cpu_to_le32(arg->hw_mode_id); 4033 hw_mode->num_band_to_mac = cpu_to_le32(arg->num_band_to_mac); 4034 4035 ptr += sizeof(*hw_mode); 4036 4037 len = arg->num_band_to_mac * sizeof(*band_to_mac); 4038 tlv = ptr; 4039 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); 4040 4041 ptr += TLV_HDR_SIZE; 4042 len = sizeof(*band_to_mac); 4043 4044 for (idx = 0; idx < arg->num_band_to_mac; idx++) { 4045 band_to_mac = (void *)ptr; 4046 4047 band_to_mac->tlv_header = 4048 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BAND_TO_MAC, 4049 len); 4050 band_to_mac->pdev_id = cpu_to_le32(arg->band_to_mac[idx].pdev_id); 4051 band_to_mac->start_freq = 4052 cpu_to_le32(arg->band_to_mac[idx].start_freq); 4053 band_to_mac->end_freq = 4054 cpu_to_le32(arg->band_to_mac[idx].end_freq); 4055 ptr += sizeof(*band_to_mac); 4056 } 4057 } 4058 4059 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_INIT_CMDID); 4060 if (ret) { 4061 ath12k_warn(ab, "failed to send WMI_INIT_CMDID\n"); 4062 dev_kfree_skb(skb); 4063 } 4064 4065 return ret; 4066 } 4067 4068 int ath12k_wmi_pdev_lro_cfg(struct ath12k *ar, 4069 int pdev_id) 4070 { 4071 struct ath12k_wmi_pdev_lro_config_cmd *cmd; 4072 struct sk_buff *skb; 4073 int ret; 4074 4075 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); 4076 if (!skb) 4077 return -ENOMEM; 4078 4079 cmd = (struct ath12k_wmi_pdev_lro_config_cmd *)skb->data; 4080 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_LRO_INFO_CMD, 4081 sizeof(*cmd)); 4082 4083 get_random_bytes(cmd->th_4, sizeof(cmd->th_4)); 4084 get_random_bytes(cmd->th_6, sizeof(cmd->th_6)); 4085 4086 cmd->pdev_id = cpu_to_le32(pdev_id); 4087 4088 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 4089 "WMI lro cfg cmd pdev_id 0x%x\n", pdev_id); 4090 4091 ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_LRO_CONFIG_CMDID); 4092 if (ret) { 4093 ath12k_warn(ar->ab, 4094 "failed to send lro cfg req wmi cmd\n"); 4095 goto err; 4096 } 4097 4098 return 0; 4099 err: 4100 dev_kfree_skb(skb); 4101 return ret; 4102 } 4103 4104 int ath12k_wmi_wait_for_service_ready(struct ath12k_base *ab) 4105 { 4106 unsigned long time_left; 4107 4108 time_left = wait_for_completion_timeout(&ab->wmi_ab.service_ready, 4109 WMI_SERVICE_READY_TIMEOUT_HZ); 4110 if (!time_left) 4111 return -ETIMEDOUT; 4112 4113 return 0; 4114 } 4115 4116 int ath12k_wmi_wait_for_unified_ready(struct ath12k_base *ab) 4117 { 4118 unsigned long time_left; 4119 4120 time_left = wait_for_completion_timeout(&ab->wmi_ab.unified_ready, 4121 WMI_SERVICE_READY_TIMEOUT_HZ); 4122 if (!time_left) 4123 return -ETIMEDOUT; 4124 4125 return 0; 4126 } 4127 4128 int ath12k_wmi_set_hw_mode(struct ath12k_base *ab, 4129 enum wmi_host_hw_mode_config_type mode) 4130 { 4131 struct ath12k_wmi_pdev_set_hw_mode_cmd *cmd; 4132 struct sk_buff *skb; 4133 struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; 4134 int len; 4135 int ret; 4136 4137 len = sizeof(*cmd); 4138 4139 skb = ath12k_wmi_alloc_skb(wmi_ab, len); 4140 if (!skb) 4141 return -ENOMEM; 4142 4143 cmd = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)skb->data; 4144 4145 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD, 4146 sizeof(*cmd)); 4147 4148 cmd->pdev_id = WMI_PDEV_ID_SOC; 4149 cmd->hw_mode_index = cpu_to_le32(mode); 4150 4151 ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_PDEV_SET_HW_MODE_CMDID); 4152 if (ret) { 4153 ath12k_warn(ab, "failed to send WMI_PDEV_SET_HW_MODE_CMDID\n"); 4154 dev_kfree_skb(skb); 4155 } 4156 4157 return ret; 4158 } 4159 4160 int ath12k_wmi_cmd_init(struct ath12k_base *ab) 4161 { 4162 struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; 4163 struct ath12k_wmi_init_cmd_arg arg = {}; 4164 4165 if (test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT, 4166 ab->wmi_ab.svc_map)) 4167 arg.res_cfg.is_reg_cc_ext_event_supported = true; 4168 4169 ab->hw_params->wmi_init(ab, &arg.res_cfg); 4170 ab->wow.wmi_conf_rx_decap_mode = arg.res_cfg.rx_decap_mode; 4171 4172 arg.num_mem_chunks = wmi_ab->num_mem_chunks; 4173 arg.hw_mode_id = wmi_ab->preferred_hw_mode; 4174 arg.mem_chunks = wmi_ab->mem_chunks; 4175 4176 if (ab->hw_params->single_pdev_only) 4177 arg.hw_mode_id = WMI_HOST_HW_MODE_MAX; 4178 4179 arg.num_band_to_mac = ab->num_radios; 4180 ath12k_fill_band_to_mac_param(ab, arg.band_to_mac); 4181 4182 ab->dp.peer_metadata_ver = arg.res_cfg.peer_metadata_ver; 4183 4184 return ath12k_init_cmd_send(&wmi_ab->wmi[0], &arg); 4185 } 4186 4187 int ath12k_wmi_vdev_spectral_conf(struct ath12k *ar, 4188 struct ath12k_wmi_vdev_spectral_conf_arg *arg) 4189 { 4190 struct ath12k_wmi_vdev_spectral_conf_cmd *cmd; 4191 struct sk_buff *skb; 4192 int ret; 4193 4194 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); 4195 if (!skb) 4196 return -ENOMEM; 4197 4198 cmd = (struct ath12k_wmi_vdev_spectral_conf_cmd *)skb->data; 4199 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD, 4200 sizeof(*cmd)); 4201 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 4202 cmd->scan_count = cpu_to_le32(arg->scan_count); 4203 cmd->scan_period = cpu_to_le32(arg->scan_period); 4204 cmd->scan_priority = cpu_to_le32(arg->scan_priority); 4205 cmd->scan_fft_size = cpu_to_le32(arg->scan_fft_size); 4206 cmd->scan_gc_ena = cpu_to_le32(arg->scan_gc_ena); 4207 cmd->scan_restart_ena = cpu_to_le32(arg->scan_restart_ena); 4208 cmd->scan_noise_floor_ref = cpu_to_le32(arg->scan_noise_floor_ref); 4209 cmd->scan_init_delay = cpu_to_le32(arg->scan_init_delay); 4210 cmd->scan_nb_tone_thr = cpu_to_le32(arg->scan_nb_tone_thr); 4211 cmd->scan_str_bin_thr = cpu_to_le32(arg->scan_str_bin_thr); 4212 cmd->scan_wb_rpt_mode = cpu_to_le32(arg->scan_wb_rpt_mode); 4213 cmd->scan_rssi_rpt_mode = cpu_to_le32(arg->scan_rssi_rpt_mode); 4214 cmd->scan_rssi_thr = cpu_to_le32(arg->scan_rssi_thr); 4215 cmd->scan_pwr_format = cpu_to_le32(arg->scan_pwr_format); 4216 cmd->scan_rpt_mode = cpu_to_le32(arg->scan_rpt_mode); 4217 cmd->scan_bin_scale = cpu_to_le32(arg->scan_bin_scale); 4218 cmd->scan_dbm_adj = cpu_to_le32(arg->scan_dbm_adj); 4219 cmd->scan_chn_mask = cpu_to_le32(arg->scan_chn_mask); 4220 4221 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 4222 "WMI spectral scan config cmd vdev_id 0x%x\n", 4223 arg->vdev_id); 4224 4225 ret = ath12k_wmi_cmd_send(ar->wmi, skb, 4226 WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID); 4227 if (ret) { 4228 ath12k_warn(ar->ab, 4229 "failed to send spectral scan config wmi cmd\n"); 4230 goto err; 4231 } 4232 4233 return 0; 4234 err: 4235 dev_kfree_skb(skb); 4236 return ret; 4237 } 4238 4239 int ath12k_wmi_vdev_spectral_enable(struct ath12k *ar, u32 vdev_id, 4240 u32 trigger, u32 enable) 4241 { 4242 struct ath12k_wmi_vdev_spectral_enable_cmd *cmd; 4243 struct sk_buff *skb; 4244 int ret; 4245 4246 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); 4247 if (!skb) 4248 return -ENOMEM; 4249 4250 cmd = (struct ath12k_wmi_vdev_spectral_enable_cmd *)skb->data; 4251 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD, 4252 sizeof(*cmd)); 4253 4254 cmd->vdev_id = cpu_to_le32(vdev_id); 4255 cmd->trigger_cmd = cpu_to_le32(trigger); 4256 cmd->enable_cmd = cpu_to_le32(enable); 4257 4258 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 4259 "WMI spectral enable cmd vdev id 0x%x\n", 4260 vdev_id); 4261 4262 ret = ath12k_wmi_cmd_send(ar->wmi, skb, 4263 WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID); 4264 if (ret) { 4265 ath12k_warn(ar->ab, 4266 "failed to send spectral enable wmi cmd\n"); 4267 goto err; 4268 } 4269 4270 return 0; 4271 err: 4272 dev_kfree_skb(skb); 4273 return ret; 4274 } 4275 4276 int ath12k_wmi_pdev_dma_ring_cfg(struct ath12k *ar, 4277 struct ath12k_wmi_pdev_dma_ring_cfg_arg *arg) 4278 { 4279 struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *cmd; 4280 struct sk_buff *skb; 4281 int ret; 4282 4283 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); 4284 if (!skb) 4285 return -ENOMEM; 4286 4287 cmd = (struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *)skb->data; 4288 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DMA_RING_CFG_REQ, 4289 sizeof(*cmd)); 4290 4291 cmd->pdev_id = cpu_to_le32(arg->pdev_id); 4292 cmd->module_id = cpu_to_le32(arg->module_id); 4293 cmd->base_paddr_lo = cpu_to_le32(arg->base_paddr_lo); 4294 cmd->base_paddr_hi = cpu_to_le32(arg->base_paddr_hi); 4295 cmd->head_idx_paddr_lo = cpu_to_le32(arg->head_idx_paddr_lo); 4296 cmd->head_idx_paddr_hi = cpu_to_le32(arg->head_idx_paddr_hi); 4297 cmd->tail_idx_paddr_lo = cpu_to_le32(arg->tail_idx_paddr_lo); 4298 cmd->tail_idx_paddr_hi = cpu_to_le32(arg->tail_idx_paddr_hi); 4299 cmd->num_elems = cpu_to_le32(arg->num_elems); 4300 cmd->buf_size = cpu_to_le32(arg->buf_size); 4301 cmd->num_resp_per_event = cpu_to_le32(arg->num_resp_per_event); 4302 cmd->event_timeout_ms = cpu_to_le32(arg->event_timeout_ms); 4303 4304 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 4305 "WMI DMA ring cfg req cmd pdev_id 0x%x\n", 4306 arg->pdev_id); 4307 4308 ret = ath12k_wmi_cmd_send(ar->wmi, skb, 4309 WMI_PDEV_DMA_RING_CFG_REQ_CMDID); 4310 if (ret) { 4311 ath12k_warn(ar->ab, 4312 "failed to send dma ring cfg req wmi cmd\n"); 4313 goto err; 4314 } 4315 4316 return 0; 4317 err: 4318 dev_kfree_skb(skb); 4319 return ret; 4320 } 4321 4322 static int ath12k_wmi_dma_buf_entry_parse(struct ath12k_base *soc, 4323 u16 tag, u16 len, 4324 const void *ptr, void *data) 4325 { 4326 struct ath12k_wmi_dma_buf_release_arg *arg = data; 4327 4328 if (tag != WMI_TAG_DMA_BUF_RELEASE_ENTRY) 4329 return -EPROTO; 4330 4331 if (arg->num_buf_entry >= le32_to_cpu(arg->fixed.num_buf_release_entry)) 4332 return -ENOBUFS; 4333 4334 arg->num_buf_entry++; 4335 return 0; 4336 } 4337 4338 static int ath12k_wmi_dma_buf_meta_parse(struct ath12k_base *soc, 4339 u16 tag, u16 len, 4340 const void *ptr, void *data) 4341 { 4342 struct ath12k_wmi_dma_buf_release_arg *arg = data; 4343 4344 if (tag != WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA) 4345 return -EPROTO; 4346 4347 if (arg->num_meta >= le32_to_cpu(arg->fixed.num_meta_data_entry)) 4348 return -ENOBUFS; 4349 4350 arg->num_meta++; 4351 4352 return 0; 4353 } 4354 4355 static int ath12k_wmi_dma_buf_parse(struct ath12k_base *ab, 4356 u16 tag, u16 len, 4357 const void *ptr, void *data) 4358 { 4359 struct ath12k_wmi_dma_buf_release_arg *arg = data; 4360 const struct ath12k_wmi_dma_buf_release_fixed_params *fixed; 4361 u32 pdev_id; 4362 int ret; 4363 4364 switch (tag) { 4365 case WMI_TAG_DMA_BUF_RELEASE: 4366 fixed = ptr; 4367 arg->fixed = *fixed; 4368 pdev_id = DP_HW2SW_MACID(le32_to_cpu(fixed->pdev_id)); 4369 arg->fixed.pdev_id = cpu_to_le32(pdev_id); 4370 break; 4371 case WMI_TAG_ARRAY_STRUCT: 4372 if (!arg->buf_entry_done) { 4373 arg->num_buf_entry = 0; 4374 arg->buf_entry = ptr; 4375 4376 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 4377 ath12k_wmi_dma_buf_entry_parse, 4378 arg); 4379 if (ret) { 4380 ath12k_warn(ab, "failed to parse dma buf entry tlv %d\n", 4381 ret); 4382 return ret; 4383 } 4384 4385 arg->buf_entry_done = true; 4386 } else if (!arg->meta_data_done) { 4387 arg->num_meta = 0; 4388 arg->meta_data = ptr; 4389 4390 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 4391 ath12k_wmi_dma_buf_meta_parse, 4392 arg); 4393 if (ret) { 4394 ath12k_warn(ab, "failed to parse dma buf meta tlv %d\n", 4395 ret); 4396 return ret; 4397 } 4398 4399 arg->meta_data_done = true; 4400 } 4401 break; 4402 default: 4403 break; 4404 } 4405 return 0; 4406 } 4407 4408 static void ath12k_wmi_pdev_dma_ring_buf_release_event(struct ath12k_base *ab, 4409 struct sk_buff *skb) 4410 { 4411 struct ath12k_wmi_dma_buf_release_arg arg = {}; 4412 struct ath12k_dbring_buf_release_event param; 4413 int ret; 4414 4415 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 4416 ath12k_wmi_dma_buf_parse, 4417 &arg); 4418 if (ret) { 4419 ath12k_warn(ab, "failed to parse dma buf release tlv %d\n", ret); 4420 return; 4421 } 4422 4423 param.fixed = arg.fixed; 4424 param.buf_entry = arg.buf_entry; 4425 param.num_buf_entry = arg.num_buf_entry; 4426 param.meta_data = arg.meta_data; 4427 param.num_meta = arg.num_meta; 4428 4429 ret = ath12k_dbring_buffer_release_event(ab, ¶m); 4430 if (ret) { 4431 ath12k_warn(ab, "failed to handle dma buf release event %d\n", ret); 4432 return; 4433 } 4434 } 4435 4436 static int ath12k_wmi_hw_mode_caps_parse(struct ath12k_base *soc, 4437 u16 tag, u16 len, 4438 const void *ptr, void *data) 4439 { 4440 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; 4441 struct ath12k_wmi_hw_mode_cap_params *hw_mode_cap; 4442 u32 phy_map = 0; 4443 4444 if (tag != WMI_TAG_HW_MODE_CAPABILITIES) 4445 return -EPROTO; 4446 4447 if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->arg.num_hw_modes) 4448 return -ENOBUFS; 4449 4450 hw_mode_cap = container_of(ptr, struct ath12k_wmi_hw_mode_cap_params, 4451 hw_mode_id); 4452 svc_rdy_ext->n_hw_mode_caps++; 4453 4454 phy_map = le32_to_cpu(hw_mode_cap->phy_id_map); 4455 svc_rdy_ext->tot_phy_id += fls(phy_map); 4456 4457 return 0; 4458 } 4459 4460 static int ath12k_wmi_hw_mode_caps(struct ath12k_base *soc, 4461 u16 len, const void *ptr, void *data) 4462 { 4463 struct ath12k_svc_ext_info *svc_ext_info = &soc->wmi_ab.svc_ext_info; 4464 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; 4465 const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps; 4466 enum wmi_host_hw_mode_config_type mode, pref; 4467 u32 i; 4468 int ret; 4469 4470 svc_rdy_ext->n_hw_mode_caps = 0; 4471 svc_rdy_ext->hw_mode_caps = ptr; 4472 4473 ret = ath12k_wmi_tlv_iter(soc, ptr, len, 4474 ath12k_wmi_hw_mode_caps_parse, 4475 svc_rdy_ext); 4476 if (ret) { 4477 ath12k_warn(soc, "failed to parse tlv %d\n", ret); 4478 return ret; 4479 } 4480 4481 for (i = 0 ; i < svc_rdy_ext->n_hw_mode_caps; i++) { 4482 hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i]; 4483 mode = le32_to_cpu(hw_mode_caps->hw_mode_id); 4484 4485 if (mode >= WMI_HOST_HW_MODE_MAX) 4486 continue; 4487 4488 pref = soc->wmi_ab.preferred_hw_mode; 4489 4490 if (ath12k_hw_mode_pri_map[mode] < ath12k_hw_mode_pri_map[pref]) { 4491 svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps; 4492 soc->wmi_ab.preferred_hw_mode = mode; 4493 } 4494 } 4495 4496 svc_ext_info->num_hw_modes = svc_rdy_ext->n_hw_mode_caps; 4497 4498 ath12k_dbg(soc, ATH12K_DBG_WMI, "num hw modes %u preferred_hw_mode %d\n", 4499 svc_ext_info->num_hw_modes, soc->wmi_ab.preferred_hw_mode); 4500 4501 if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX) 4502 return -EINVAL; 4503 4504 return 0; 4505 } 4506 4507 static int ath12k_wmi_mac_phy_caps_parse(struct ath12k_base *soc, 4508 u16 tag, u16 len, 4509 const void *ptr, void *data) 4510 { 4511 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; 4512 4513 if (tag != WMI_TAG_MAC_PHY_CAPABILITIES) 4514 return -EPROTO; 4515 4516 if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id) 4517 return -ENOBUFS; 4518 4519 len = min_t(u16, len, sizeof(struct ath12k_wmi_mac_phy_caps_params)); 4520 if (!svc_rdy_ext->n_mac_phy_caps) { 4521 svc_rdy_ext->mac_phy_caps = kzalloc((svc_rdy_ext->tot_phy_id) * len, 4522 GFP_ATOMIC); 4523 if (!svc_rdy_ext->mac_phy_caps) 4524 return -ENOMEM; 4525 } 4526 4527 memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps, ptr, len); 4528 svc_rdy_ext->n_mac_phy_caps++; 4529 return 0; 4530 } 4531 4532 static int ath12k_wmi_ext_hal_reg_caps_parse(struct ath12k_base *soc, 4533 u16 tag, u16 len, 4534 const void *ptr, void *data) 4535 { 4536 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; 4537 4538 if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT) 4539 return -EPROTO; 4540 4541 if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->arg.num_phy) 4542 return -ENOBUFS; 4543 4544 svc_rdy_ext->n_ext_hal_reg_caps++; 4545 return 0; 4546 } 4547 4548 static int ath12k_wmi_ext_hal_reg_caps(struct ath12k_base *soc, 4549 u16 len, const void *ptr, void *data) 4550 { 4551 struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0]; 4552 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; 4553 struct ath12k_wmi_hal_reg_capabilities_ext_arg reg_cap; 4554 int ret; 4555 u32 i; 4556 4557 svc_rdy_ext->n_ext_hal_reg_caps = 0; 4558 svc_rdy_ext->ext_hal_reg_caps = ptr; 4559 ret = ath12k_wmi_tlv_iter(soc, ptr, len, 4560 ath12k_wmi_ext_hal_reg_caps_parse, 4561 svc_rdy_ext); 4562 if (ret) { 4563 ath12k_warn(soc, "failed to parse tlv %d\n", ret); 4564 return ret; 4565 } 4566 4567 for (i = 0; i < svc_rdy_ext->arg.num_phy; i++) { 4568 ret = ath12k_pull_reg_cap_svc_rdy_ext(wmi_handle, 4569 svc_rdy_ext->soc_hal_reg_caps, 4570 svc_rdy_ext->ext_hal_reg_caps, i, 4571 ®_cap); 4572 if (ret) { 4573 ath12k_warn(soc, "failed to extract reg cap %d\n", i); 4574 return ret; 4575 } 4576 4577 if (reg_cap.phy_id >= MAX_RADIOS) { 4578 ath12k_warn(soc, "unexpected phy id %u\n", reg_cap.phy_id); 4579 return -EINVAL; 4580 } 4581 4582 soc->hal_reg_cap[reg_cap.phy_id] = reg_cap; 4583 } 4584 return 0; 4585 } 4586 4587 static int ath12k_wmi_ext_soc_hal_reg_caps_parse(struct ath12k_base *soc, 4588 u16 len, const void *ptr, 4589 void *data) 4590 { 4591 struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0]; 4592 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; 4593 u8 hw_mode_id = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.hw_mode_id); 4594 u32 phy_id_map; 4595 int pdev_index = 0; 4596 int ret; 4597 4598 svc_rdy_ext->soc_hal_reg_caps = ptr; 4599 svc_rdy_ext->arg.num_phy = le32_to_cpu(svc_rdy_ext->soc_hal_reg_caps->num_phy); 4600 4601 soc->num_radios = 0; 4602 phy_id_map = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.phy_id_map); 4603 soc->fw_pdev_count = 0; 4604 4605 while (phy_id_map && soc->num_radios < MAX_RADIOS) { 4606 ret = ath12k_pull_mac_phy_cap_svc_ready_ext(wmi_handle, 4607 svc_rdy_ext, 4608 hw_mode_id, soc->num_radios, 4609 &soc->pdevs[pdev_index]); 4610 if (ret) { 4611 ath12k_warn(soc, "failed to extract mac caps, idx :%d\n", 4612 soc->num_radios); 4613 return ret; 4614 } 4615 4616 soc->num_radios++; 4617 4618 /* For single_pdev_only targets, 4619 * save mac_phy capability in the same pdev 4620 */ 4621 if (soc->hw_params->single_pdev_only) 4622 pdev_index = 0; 4623 else 4624 pdev_index = soc->num_radios; 4625 4626 /* TODO: mac_phy_cap prints */ 4627 phy_id_map >>= 1; 4628 } 4629 4630 if (soc->hw_params->single_pdev_only) { 4631 soc->num_radios = 1; 4632 soc->pdevs[0].pdev_id = 0; 4633 } 4634 4635 return 0; 4636 } 4637 4638 static int ath12k_wmi_dma_ring_caps_parse(struct ath12k_base *soc, 4639 u16 tag, u16 len, 4640 const void *ptr, void *data) 4641 { 4642 struct ath12k_wmi_dma_ring_caps_parse *parse = data; 4643 4644 if (tag != WMI_TAG_DMA_RING_CAPABILITIES) 4645 return -EPROTO; 4646 4647 parse->n_dma_ring_caps++; 4648 return 0; 4649 } 4650 4651 static int ath12k_wmi_alloc_dbring_caps(struct ath12k_base *ab, 4652 u32 num_cap) 4653 { 4654 size_t sz; 4655 void *ptr; 4656 4657 sz = num_cap * sizeof(struct ath12k_dbring_cap); 4658 ptr = kzalloc(sz, GFP_ATOMIC); 4659 if (!ptr) 4660 return -ENOMEM; 4661 4662 ab->db_caps = ptr; 4663 ab->num_db_cap = num_cap; 4664 4665 return 0; 4666 } 4667 4668 static void ath12k_wmi_free_dbring_caps(struct ath12k_base *ab) 4669 { 4670 kfree(ab->db_caps); 4671 ab->db_caps = NULL; 4672 ab->num_db_cap = 0; 4673 } 4674 4675 static int ath12k_wmi_dma_ring_caps(struct ath12k_base *ab, 4676 u16 len, const void *ptr, void *data) 4677 { 4678 struct ath12k_wmi_dma_ring_caps_parse *dma_caps_parse = data; 4679 struct ath12k_wmi_dma_ring_caps_params *dma_caps; 4680 struct ath12k_dbring_cap *dir_buff_caps; 4681 int ret; 4682 u32 i; 4683 4684 dma_caps_parse->n_dma_ring_caps = 0; 4685 dma_caps = (struct ath12k_wmi_dma_ring_caps_params *)ptr; 4686 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 4687 ath12k_wmi_dma_ring_caps_parse, 4688 dma_caps_parse); 4689 if (ret) { 4690 ath12k_warn(ab, "failed to parse dma ring caps tlv %d\n", ret); 4691 return ret; 4692 } 4693 4694 if (!dma_caps_parse->n_dma_ring_caps) 4695 return 0; 4696 4697 if (ab->num_db_cap) { 4698 ath12k_warn(ab, "Already processed, so ignoring dma ring caps\n"); 4699 return 0; 4700 } 4701 4702 ret = ath12k_wmi_alloc_dbring_caps(ab, dma_caps_parse->n_dma_ring_caps); 4703 if (ret) 4704 return ret; 4705 4706 dir_buff_caps = ab->db_caps; 4707 for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) { 4708 if (le32_to_cpu(dma_caps[i].module_id) >= WMI_DIRECT_BUF_MAX) { 4709 ath12k_warn(ab, "Invalid module id %d\n", 4710 le32_to_cpu(dma_caps[i].module_id)); 4711 ret = -EINVAL; 4712 goto free_dir_buff; 4713 } 4714 4715 dir_buff_caps[i].id = le32_to_cpu(dma_caps[i].module_id); 4716 dir_buff_caps[i].pdev_id = 4717 DP_HW2SW_MACID(le32_to_cpu(dma_caps[i].pdev_id)); 4718 dir_buff_caps[i].min_elem = le32_to_cpu(dma_caps[i].min_elem); 4719 dir_buff_caps[i].min_buf_sz = le32_to_cpu(dma_caps[i].min_buf_sz); 4720 dir_buff_caps[i].min_buf_align = le32_to_cpu(dma_caps[i].min_buf_align); 4721 } 4722 4723 return 0; 4724 4725 free_dir_buff: 4726 ath12k_wmi_free_dbring_caps(ab); 4727 return ret; 4728 } 4729 4730 static void 4731 ath12k_wmi_save_mac_phy_info(struct ath12k_base *ab, 4732 const struct ath12k_wmi_mac_phy_caps_params *mac_phy_cap, 4733 struct ath12k_svc_ext_mac_phy_info *mac_phy_info) 4734 { 4735 mac_phy_info->phy_id = __le32_to_cpu(mac_phy_cap->phy_id); 4736 mac_phy_info->supported_bands = __le32_to_cpu(mac_phy_cap->supported_bands); 4737 mac_phy_info->hw_freq_range.low_2ghz_freq = 4738 __le32_to_cpu(mac_phy_cap->low_2ghz_chan_freq); 4739 mac_phy_info->hw_freq_range.high_2ghz_freq = 4740 __le32_to_cpu(mac_phy_cap->high_2ghz_chan_freq); 4741 mac_phy_info->hw_freq_range.low_5ghz_freq = 4742 __le32_to_cpu(mac_phy_cap->low_5ghz_chan_freq); 4743 mac_phy_info->hw_freq_range.high_5ghz_freq = 4744 __le32_to_cpu(mac_phy_cap->high_5ghz_chan_freq); 4745 } 4746 4747 static void 4748 ath12k_wmi_save_all_mac_phy_info(struct ath12k_base *ab, 4749 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext) 4750 { 4751 struct ath12k_svc_ext_info *svc_ext_info = &ab->wmi_ab.svc_ext_info; 4752 const struct ath12k_wmi_mac_phy_caps_params *mac_phy_cap; 4753 const struct ath12k_wmi_hw_mode_cap_params *hw_mode_cap; 4754 struct ath12k_svc_ext_mac_phy_info *mac_phy_info; 4755 u32 hw_mode_id, phy_bit_map; 4756 u8 hw_idx; 4757 4758 mac_phy_info = &svc_ext_info->mac_phy_info[0]; 4759 mac_phy_cap = svc_rdy_ext->mac_phy_caps; 4760 4761 for (hw_idx = 0; hw_idx < svc_ext_info->num_hw_modes; hw_idx++) { 4762 hw_mode_cap = &svc_rdy_ext->hw_mode_caps[hw_idx]; 4763 hw_mode_id = __le32_to_cpu(hw_mode_cap->hw_mode_id); 4764 phy_bit_map = __le32_to_cpu(hw_mode_cap->phy_id_map); 4765 4766 while (phy_bit_map) { 4767 ath12k_wmi_save_mac_phy_info(ab, mac_phy_cap, mac_phy_info); 4768 mac_phy_info->hw_mode_config_type = 4769 le32_get_bits(hw_mode_cap->hw_mode_config_type, 4770 WMI_HW_MODE_CAP_CFG_TYPE); 4771 ath12k_dbg(ab, ATH12K_DBG_WMI, 4772 "hw_idx %u hw_mode_id %u hw_mode_config_type %u supported_bands %u phy_id %u 2 GHz [%u - %u] 5 GHz [%u - %u]\n", 4773 hw_idx, hw_mode_id, 4774 mac_phy_info->hw_mode_config_type, 4775 mac_phy_info->supported_bands, mac_phy_info->phy_id, 4776 mac_phy_info->hw_freq_range.low_2ghz_freq, 4777 mac_phy_info->hw_freq_range.high_2ghz_freq, 4778 mac_phy_info->hw_freq_range.low_5ghz_freq, 4779 mac_phy_info->hw_freq_range.high_5ghz_freq); 4780 4781 mac_phy_cap++; 4782 mac_phy_info++; 4783 4784 phy_bit_map >>= 1; 4785 } 4786 } 4787 } 4788 4789 static int ath12k_wmi_svc_rdy_ext_parse(struct ath12k_base *ab, 4790 u16 tag, u16 len, 4791 const void *ptr, void *data) 4792 { 4793 struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0]; 4794 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; 4795 int ret; 4796 4797 switch (tag) { 4798 case WMI_TAG_SERVICE_READY_EXT_EVENT: 4799 ret = ath12k_pull_svc_ready_ext(wmi_handle, ptr, 4800 &svc_rdy_ext->arg); 4801 if (ret) { 4802 ath12k_warn(ab, "unable to extract ext params\n"); 4803 return ret; 4804 } 4805 break; 4806 4807 case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS: 4808 svc_rdy_ext->hw_caps = ptr; 4809 svc_rdy_ext->arg.num_hw_modes = 4810 le32_to_cpu(svc_rdy_ext->hw_caps->num_hw_modes); 4811 break; 4812 4813 case WMI_TAG_SOC_HAL_REG_CAPABILITIES: 4814 ret = ath12k_wmi_ext_soc_hal_reg_caps_parse(ab, len, ptr, 4815 svc_rdy_ext); 4816 if (ret) 4817 return ret; 4818 break; 4819 4820 case WMI_TAG_ARRAY_STRUCT: 4821 if (!svc_rdy_ext->hw_mode_done) { 4822 ret = ath12k_wmi_hw_mode_caps(ab, len, ptr, svc_rdy_ext); 4823 if (ret) 4824 return ret; 4825 4826 svc_rdy_ext->hw_mode_done = true; 4827 } else if (!svc_rdy_ext->mac_phy_done) { 4828 svc_rdy_ext->n_mac_phy_caps = 0; 4829 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 4830 ath12k_wmi_mac_phy_caps_parse, 4831 svc_rdy_ext); 4832 if (ret) { 4833 ath12k_warn(ab, "failed to parse tlv %d\n", ret); 4834 return ret; 4835 } 4836 4837 ath12k_wmi_save_all_mac_phy_info(ab, svc_rdy_ext); 4838 4839 svc_rdy_ext->mac_phy_done = true; 4840 } else if (!svc_rdy_ext->ext_hal_reg_done) { 4841 ret = ath12k_wmi_ext_hal_reg_caps(ab, len, ptr, svc_rdy_ext); 4842 if (ret) 4843 return ret; 4844 4845 svc_rdy_ext->ext_hal_reg_done = true; 4846 } else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) { 4847 svc_rdy_ext->mac_phy_chainmask_combo_done = true; 4848 } else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) { 4849 svc_rdy_ext->mac_phy_chainmask_cap_done = true; 4850 } else if (!svc_rdy_ext->oem_dma_ring_cap_done) { 4851 svc_rdy_ext->oem_dma_ring_cap_done = true; 4852 } else if (!svc_rdy_ext->dma_ring_cap_done) { 4853 ret = ath12k_wmi_dma_ring_caps(ab, len, ptr, 4854 &svc_rdy_ext->dma_caps_parse); 4855 if (ret) 4856 return ret; 4857 4858 svc_rdy_ext->dma_ring_cap_done = true; 4859 } 4860 break; 4861 4862 default: 4863 break; 4864 } 4865 return 0; 4866 } 4867 4868 static int ath12k_service_ready_ext_event(struct ath12k_base *ab, 4869 struct sk_buff *skb) 4870 { 4871 struct ath12k_wmi_svc_rdy_ext_parse svc_rdy_ext = { }; 4872 int ret; 4873 4874 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 4875 ath12k_wmi_svc_rdy_ext_parse, 4876 &svc_rdy_ext); 4877 if (ret) { 4878 ath12k_warn(ab, "failed to parse tlv %d\n", ret); 4879 goto err; 4880 } 4881 4882 if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG, ab->wmi_ab.svc_map)) 4883 complete(&ab->wmi_ab.service_ready); 4884 4885 kfree(svc_rdy_ext.mac_phy_caps); 4886 return 0; 4887 4888 err: 4889 kfree(svc_rdy_ext.mac_phy_caps); 4890 ath12k_wmi_free_dbring_caps(ab); 4891 return ret; 4892 } 4893 4894 static int ath12k_pull_svc_ready_ext2(struct ath12k_wmi_pdev *wmi_handle, 4895 const void *ptr, 4896 struct ath12k_wmi_svc_rdy_ext2_arg *arg) 4897 { 4898 const struct wmi_service_ready_ext2_event *ev = ptr; 4899 4900 if (!ev) 4901 return -EINVAL; 4902 4903 arg->reg_db_version = le32_to_cpu(ev->reg_db_version); 4904 arg->hw_min_max_tx_power_2ghz = le32_to_cpu(ev->hw_min_max_tx_power_2ghz); 4905 arg->hw_min_max_tx_power_5ghz = le32_to_cpu(ev->hw_min_max_tx_power_5ghz); 4906 arg->chwidth_num_peer_caps = le32_to_cpu(ev->chwidth_num_peer_caps); 4907 arg->preamble_puncture_bw = le32_to_cpu(ev->preamble_puncture_bw); 4908 arg->max_user_per_ppdu_ofdma = le32_to_cpu(ev->max_user_per_ppdu_ofdma); 4909 arg->max_user_per_ppdu_mumimo = le32_to_cpu(ev->max_user_per_ppdu_mumimo); 4910 arg->target_cap_flags = le32_to_cpu(ev->target_cap_flags); 4911 return 0; 4912 } 4913 4914 static void ath12k_wmi_eht_caps_parse(struct ath12k_pdev *pdev, u32 band, 4915 const __le32 cap_mac_info[], 4916 const __le32 cap_phy_info[], 4917 const __le32 supp_mcs[], 4918 const struct ath12k_wmi_ppe_threshold_params *ppet, 4919 __le32 cap_info_internal) 4920 { 4921 struct ath12k_band_cap *cap_band = &pdev->cap.band[band]; 4922 u32 support_320mhz; 4923 u8 i; 4924 4925 if (band == NL80211_BAND_6GHZ) 4926 support_320mhz = cap_band->eht_cap_phy_info[0] & 4927 IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ; 4928 4929 for (i = 0; i < WMI_MAX_EHTCAP_MAC_SIZE; i++) 4930 cap_band->eht_cap_mac_info[i] = le32_to_cpu(cap_mac_info[i]); 4931 4932 for (i = 0; i < WMI_MAX_EHTCAP_PHY_SIZE; i++) 4933 cap_band->eht_cap_phy_info[i] = le32_to_cpu(cap_phy_info[i]); 4934 4935 if (band == NL80211_BAND_6GHZ) 4936 cap_band->eht_cap_phy_info[0] |= support_320mhz; 4937 4938 cap_band->eht_mcs_20_only = le32_to_cpu(supp_mcs[0]); 4939 cap_band->eht_mcs_80 = le32_to_cpu(supp_mcs[1]); 4940 if (band != NL80211_BAND_2GHZ) { 4941 cap_band->eht_mcs_160 = le32_to_cpu(supp_mcs[2]); 4942 cap_band->eht_mcs_320 = le32_to_cpu(supp_mcs[3]); 4943 } 4944 4945 cap_band->eht_ppet.numss_m1 = le32_to_cpu(ppet->numss_m1); 4946 cap_band->eht_ppet.ru_bit_mask = le32_to_cpu(ppet->ru_info); 4947 for (i = 0; i < WMI_MAX_NUM_SS; i++) 4948 cap_band->eht_ppet.ppet16_ppet8_ru3_ru0[i] = 4949 le32_to_cpu(ppet->ppet16_ppet8_ru3_ru0[i]); 4950 4951 cap_band->eht_cap_info_internal = le32_to_cpu(cap_info_internal); 4952 } 4953 4954 static int 4955 ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab, 4956 const struct ath12k_wmi_caps_ext_params *caps, 4957 struct ath12k_pdev *pdev) 4958 { 4959 struct ath12k_band_cap *cap_band; 4960 u32 bands, support_320mhz; 4961 int i; 4962 4963 if (ab->hw_params->single_pdev_only) { 4964 if (caps->hw_mode_id == WMI_HOST_HW_MODE_SINGLE) { 4965 support_320mhz = le32_to_cpu(caps->eht_cap_phy_info_5ghz[0]) & 4966 IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ; 4967 cap_band = &pdev->cap.band[NL80211_BAND_6GHZ]; 4968 cap_band->eht_cap_phy_info[0] |= support_320mhz; 4969 return 0; 4970 } 4971 4972 for (i = 0; i < ab->fw_pdev_count; i++) { 4973 struct ath12k_fw_pdev *fw_pdev = &ab->fw_pdev[i]; 4974 4975 if (fw_pdev->pdev_id == ath12k_wmi_caps_ext_get_pdev_id(caps) && 4976 fw_pdev->phy_id == le32_to_cpu(caps->phy_id)) { 4977 bands = fw_pdev->supported_bands; 4978 break; 4979 } 4980 } 4981 4982 if (i == ab->fw_pdev_count) 4983 return -EINVAL; 4984 } else { 4985 bands = pdev->cap.supported_bands; 4986 } 4987 4988 if (bands & WMI_HOST_WLAN_2GHZ_CAP) { 4989 ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_2GHZ, 4990 caps->eht_cap_mac_info_2ghz, 4991 caps->eht_cap_phy_info_2ghz, 4992 caps->eht_supp_mcs_ext_2ghz, 4993 &caps->eht_ppet_2ghz, 4994 caps->eht_cap_info_internal); 4995 } 4996 4997 if (bands & WMI_HOST_WLAN_5GHZ_CAP) { 4998 ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_5GHZ, 4999 caps->eht_cap_mac_info_5ghz, 5000 caps->eht_cap_phy_info_5ghz, 5001 caps->eht_supp_mcs_ext_5ghz, 5002 &caps->eht_ppet_5ghz, 5003 caps->eht_cap_info_internal); 5004 5005 ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_6GHZ, 5006 caps->eht_cap_mac_info_5ghz, 5007 caps->eht_cap_phy_info_5ghz, 5008 caps->eht_supp_mcs_ext_5ghz, 5009 &caps->eht_ppet_5ghz, 5010 caps->eht_cap_info_internal); 5011 } 5012 5013 pdev->cap.eml_cap = le32_to_cpu(caps->eml_capability); 5014 pdev->cap.mld_cap = le32_to_cpu(caps->mld_capability); 5015 5016 return 0; 5017 } 5018 5019 static int ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base *ab, u16 tag, 5020 u16 len, const void *ptr, 5021 void *data) 5022 { 5023 const struct ath12k_wmi_caps_ext_params *caps = ptr; 5024 int i = 0, ret; 5025 5026 if (tag != WMI_TAG_MAC_PHY_CAPABILITIES_EXT) 5027 return -EPROTO; 5028 5029 if (ab->hw_params->single_pdev_only) { 5030 if (ab->wmi_ab.preferred_hw_mode != le32_to_cpu(caps->hw_mode_id) && 5031 caps->hw_mode_id != WMI_HOST_HW_MODE_SINGLE) 5032 return 0; 5033 } else { 5034 for (i = 0; i < ab->num_radios; i++) { 5035 if (ab->pdevs[i].pdev_id == 5036 ath12k_wmi_caps_ext_get_pdev_id(caps)) 5037 break; 5038 } 5039 5040 if (i == ab->num_radios) 5041 return -EINVAL; 5042 } 5043 5044 ret = ath12k_wmi_tlv_mac_phy_caps_ext_parse(ab, caps, &ab->pdevs[i]); 5045 if (ret) { 5046 ath12k_warn(ab, 5047 "failed to parse extended MAC PHY capabilities for pdev %d: %d\n", 5048 ret, ab->pdevs[i].pdev_id); 5049 return ret; 5050 } 5051 5052 return 0; 5053 } 5054 5055 static void 5056 ath12k_wmi_update_freq_info(struct ath12k_base *ab, 5057 struct ath12k_svc_ext_mac_phy_info *mac_cap, 5058 enum ath12k_hw_mode mode, 5059 u32 phy_id) 5060 { 5061 struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info; 5062 struct ath12k_hw_mode_freq_range_arg *mac_range; 5063 5064 mac_range = &hw_mode_info->freq_range_caps[mode][phy_id]; 5065 5066 if (mac_cap->supported_bands & WMI_HOST_WLAN_2GHZ_CAP) { 5067 mac_range->low_2ghz_freq = max_t(u32, 5068 mac_cap->hw_freq_range.low_2ghz_freq, 5069 ATH12K_MIN_2GHZ_FREQ); 5070 mac_range->high_2ghz_freq = mac_cap->hw_freq_range.high_2ghz_freq ? 5071 min_t(u32, 5072 mac_cap->hw_freq_range.high_2ghz_freq, 5073 ATH12K_MAX_2GHZ_FREQ) : 5074 ATH12K_MAX_2GHZ_FREQ; 5075 } 5076 5077 if (mac_cap->supported_bands & WMI_HOST_WLAN_5GHZ_CAP) { 5078 mac_range->low_5ghz_freq = max_t(u32, 5079 mac_cap->hw_freq_range.low_5ghz_freq, 5080 ATH12K_MIN_5GHZ_FREQ); 5081 mac_range->high_5ghz_freq = mac_cap->hw_freq_range.high_5ghz_freq ? 5082 min_t(u32, 5083 mac_cap->hw_freq_range.high_5ghz_freq, 5084 ATH12K_MAX_6GHZ_FREQ) : 5085 ATH12K_MAX_6GHZ_FREQ; 5086 } 5087 } 5088 5089 static bool 5090 ath12k_wmi_all_phy_range_updated(struct ath12k_base *ab, 5091 enum ath12k_hw_mode hwmode) 5092 { 5093 struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info; 5094 struct ath12k_hw_mode_freq_range_arg *mac_range; 5095 u8 phy_id; 5096 5097 for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) { 5098 mac_range = &hw_mode_info->freq_range_caps[hwmode][phy_id]; 5099 /* modify SBS/DBS range only when both phy for DBS are filled */ 5100 if (!mac_range->low_2ghz_freq && !mac_range->low_5ghz_freq) 5101 return false; 5102 } 5103 5104 return true; 5105 } 5106 5107 static void ath12k_wmi_update_dbs_freq_info(struct ath12k_base *ab) 5108 { 5109 struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info; 5110 struct ath12k_hw_mode_freq_range_arg *mac_range; 5111 u8 phy_id; 5112 5113 mac_range = hw_mode_info->freq_range_caps[ATH12K_HW_MODE_DBS]; 5114 /* Reset 5 GHz range for shared mac for DBS */ 5115 for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) { 5116 if (mac_range[phy_id].low_2ghz_freq && 5117 mac_range[phy_id].low_5ghz_freq) { 5118 mac_range[phy_id].low_5ghz_freq = 0; 5119 mac_range[phy_id].high_5ghz_freq = 0; 5120 } 5121 } 5122 } 5123 5124 static u32 5125 ath12k_wmi_get_highest_5ghz_freq_from_range(struct ath12k_hw_mode_freq_range_arg *range) 5126 { 5127 u32 highest_freq = 0; 5128 u8 phy_id; 5129 5130 for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) { 5131 if (range[phy_id].high_5ghz_freq > highest_freq) 5132 highest_freq = range[phy_id].high_5ghz_freq; 5133 } 5134 5135 return highest_freq ? highest_freq : ATH12K_MAX_6GHZ_FREQ; 5136 } 5137 5138 static u32 5139 ath12k_wmi_get_lowest_5ghz_freq_from_range(struct ath12k_hw_mode_freq_range_arg *range) 5140 { 5141 u32 lowest_freq = 0; 5142 u8 phy_id; 5143 5144 for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) { 5145 if ((!lowest_freq && range[phy_id].low_5ghz_freq) || 5146 range[phy_id].low_5ghz_freq < lowest_freq) 5147 lowest_freq = range[phy_id].low_5ghz_freq; 5148 } 5149 5150 return lowest_freq ? lowest_freq : ATH12K_MIN_5GHZ_FREQ; 5151 } 5152 5153 static void 5154 ath12k_wmi_fill_upper_share_sbs_freq(struct ath12k_base *ab, 5155 u16 sbs_range_sep, 5156 struct ath12k_hw_mode_freq_range_arg *ref_freq) 5157 { 5158 struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info; 5159 struct ath12k_hw_mode_freq_range_arg *upper_sbs_freq_range; 5160 u8 phy_id; 5161 5162 upper_sbs_freq_range = 5163 hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS_UPPER_SHARE]; 5164 5165 for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) { 5166 upper_sbs_freq_range[phy_id].low_2ghz_freq = 5167 ref_freq[phy_id].low_2ghz_freq; 5168 upper_sbs_freq_range[phy_id].high_2ghz_freq = 5169 ref_freq[phy_id].high_2ghz_freq; 5170 5171 /* update for shared mac */ 5172 if (upper_sbs_freq_range[phy_id].low_2ghz_freq) { 5173 upper_sbs_freq_range[phy_id].low_5ghz_freq = sbs_range_sep + 10; 5174 upper_sbs_freq_range[phy_id].high_5ghz_freq = 5175 ath12k_wmi_get_highest_5ghz_freq_from_range(ref_freq); 5176 } else { 5177 upper_sbs_freq_range[phy_id].low_5ghz_freq = 5178 ath12k_wmi_get_lowest_5ghz_freq_from_range(ref_freq); 5179 upper_sbs_freq_range[phy_id].high_5ghz_freq = sbs_range_sep; 5180 } 5181 } 5182 } 5183 5184 static void 5185 ath12k_wmi_fill_lower_share_sbs_freq(struct ath12k_base *ab, 5186 u16 sbs_range_sep, 5187 struct ath12k_hw_mode_freq_range_arg *ref_freq) 5188 { 5189 struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info; 5190 struct ath12k_hw_mode_freq_range_arg *lower_sbs_freq_range; 5191 u8 phy_id; 5192 5193 lower_sbs_freq_range = 5194 hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS_LOWER_SHARE]; 5195 5196 for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) { 5197 lower_sbs_freq_range[phy_id].low_2ghz_freq = 5198 ref_freq[phy_id].low_2ghz_freq; 5199 lower_sbs_freq_range[phy_id].high_2ghz_freq = 5200 ref_freq[phy_id].high_2ghz_freq; 5201 5202 /* update for shared mac */ 5203 if (lower_sbs_freq_range[phy_id].low_2ghz_freq) { 5204 lower_sbs_freq_range[phy_id].low_5ghz_freq = 5205 ath12k_wmi_get_lowest_5ghz_freq_from_range(ref_freq); 5206 lower_sbs_freq_range[phy_id].high_5ghz_freq = sbs_range_sep; 5207 } else { 5208 lower_sbs_freq_range[phy_id].low_5ghz_freq = sbs_range_sep + 10; 5209 lower_sbs_freq_range[phy_id].high_5ghz_freq = 5210 ath12k_wmi_get_highest_5ghz_freq_from_range(ref_freq); 5211 } 5212 } 5213 } 5214 5215 static const char *ath12k_wmi_hw_mode_to_str(enum ath12k_hw_mode hw_mode) 5216 { 5217 static const char * const mode_str[] = { 5218 [ATH12K_HW_MODE_SMM] = "SMM", 5219 [ATH12K_HW_MODE_DBS] = "DBS", 5220 [ATH12K_HW_MODE_SBS] = "SBS", 5221 [ATH12K_HW_MODE_SBS_UPPER_SHARE] = "SBS_UPPER_SHARE", 5222 [ATH12K_HW_MODE_SBS_LOWER_SHARE] = "SBS_LOWER_SHARE", 5223 }; 5224 5225 if (hw_mode >= ARRAY_SIZE(mode_str)) 5226 return "Unknown"; 5227 5228 return mode_str[hw_mode]; 5229 } 5230 5231 static void 5232 ath12k_wmi_dump_freq_range_per_mac(struct ath12k_base *ab, 5233 struct ath12k_hw_mode_freq_range_arg *freq_range, 5234 enum ath12k_hw_mode hw_mode) 5235 { 5236 u8 i; 5237 5238 for (i = 0; i < MAX_RADIOS; i++) 5239 if (freq_range[i].low_2ghz_freq || freq_range[i].low_5ghz_freq) 5240 ath12k_dbg(ab, ATH12K_DBG_WMI, 5241 "frequency range: %s(%d) mac %d 2 GHz [%d - %d] 5 GHz [%d - %d]", 5242 ath12k_wmi_hw_mode_to_str(hw_mode), 5243 hw_mode, i, 5244 freq_range[i].low_2ghz_freq, 5245 freq_range[i].high_2ghz_freq, 5246 freq_range[i].low_5ghz_freq, 5247 freq_range[i].high_5ghz_freq); 5248 } 5249 5250 static void ath12k_wmi_dump_freq_range(struct ath12k_base *ab) 5251 { 5252 struct ath12k_hw_mode_freq_range_arg *freq_range; 5253 u8 i; 5254 5255 for (i = ATH12K_HW_MODE_SMM; i < ATH12K_HW_MODE_MAX; i++) { 5256 freq_range = ab->wmi_ab.hw_mode_info.freq_range_caps[i]; 5257 ath12k_wmi_dump_freq_range_per_mac(ab, freq_range, i); 5258 } 5259 } 5260 5261 static int ath12k_wmi_modify_sbs_freq(struct ath12k_base *ab, u8 phy_id) 5262 { 5263 struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info; 5264 struct ath12k_hw_mode_freq_range_arg *sbs_mac_range, *shared_mac_range; 5265 struct ath12k_hw_mode_freq_range_arg *non_shared_range; 5266 u8 shared_phy_id; 5267 5268 sbs_mac_range = &hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS][phy_id]; 5269 5270 /* if SBS mac range has both 2.4 and 5 GHz ranges, i.e. shared phy_id 5271 * keep the range as it is in SBS 5272 */ 5273 if (sbs_mac_range->low_2ghz_freq && sbs_mac_range->low_5ghz_freq) 5274 return 0; 5275 5276 if (sbs_mac_range->low_2ghz_freq && !sbs_mac_range->low_5ghz_freq) { 5277 ath12k_err(ab, "Invalid DBS/SBS mode with only 2.4Ghz"); 5278 ath12k_wmi_dump_freq_range_per_mac(ab, sbs_mac_range, ATH12K_HW_MODE_SBS); 5279 return -EINVAL; 5280 } 5281 5282 non_shared_range = sbs_mac_range; 5283 /* if SBS mac range has only 5 GHz then it's the non-shared phy, so 5284 * modify the range as per the shared mac. 5285 */ 5286 shared_phy_id = phy_id ? 0 : 1; 5287 shared_mac_range = 5288 &hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS][shared_phy_id]; 5289 5290 if (shared_mac_range->low_5ghz_freq > non_shared_range->low_5ghz_freq) { 5291 ath12k_dbg(ab, ATH12K_DBG_WMI, "high 5 GHz shared"); 5292 /* If the shared mac lower 5 GHz frequency is greater than 5293 * non-shared mac lower 5 GHz frequency then the shared mac has 5294 * high 5 GHz shared with 2.4 GHz. So non-shared mac's 5 GHz high 5295 * freq should be less than the shared mac's low 5 GHz freq. 5296 */ 5297 if (non_shared_range->high_5ghz_freq >= 5298 shared_mac_range->low_5ghz_freq) 5299 non_shared_range->high_5ghz_freq = 5300 max_t(u32, shared_mac_range->low_5ghz_freq - 10, 5301 non_shared_range->low_5ghz_freq); 5302 } else if (shared_mac_range->high_5ghz_freq < 5303 non_shared_range->high_5ghz_freq) { 5304 ath12k_dbg(ab, ATH12K_DBG_WMI, "low 5 GHz shared"); 5305 /* If the shared mac high 5 GHz frequency is less than 5306 * non-shared mac high 5 GHz frequency then the shared mac has 5307 * low 5 GHz shared with 2.4 GHz. So non-shared mac's 5 GHz low 5308 * freq should be greater than the shared mac's high 5 GHz freq. 5309 */ 5310 if (shared_mac_range->high_5ghz_freq >= 5311 non_shared_range->low_5ghz_freq) 5312 non_shared_range->low_5ghz_freq = 5313 min_t(u32, shared_mac_range->high_5ghz_freq + 10, 5314 non_shared_range->high_5ghz_freq); 5315 } else { 5316 ath12k_warn(ab, "invalid SBS range with all 5 GHz shared"); 5317 return -EINVAL; 5318 } 5319 5320 return 0; 5321 } 5322 5323 static void ath12k_wmi_update_sbs_freq_info(struct ath12k_base *ab) 5324 { 5325 struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info; 5326 struct ath12k_hw_mode_freq_range_arg *mac_range; 5327 u16 sbs_range_sep; 5328 u8 phy_id; 5329 int ret; 5330 5331 mac_range = hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS]; 5332 5333 /* If sbs_lower_band_end_freq has a value, then the frequency range 5334 * will be split using that value. 5335 */ 5336 sbs_range_sep = ab->wmi_ab.sbs_lower_band_end_freq; 5337 if (sbs_range_sep) { 5338 ath12k_wmi_fill_upper_share_sbs_freq(ab, sbs_range_sep, 5339 mac_range); 5340 ath12k_wmi_fill_lower_share_sbs_freq(ab, sbs_range_sep, 5341 mac_range); 5342 /* Hardware specifies the range boundary with sbs_range_sep, 5343 * (i.e. the boundary between 5 GHz high and 5 GHz low), 5344 * reset the original one to make sure it will not get used. 5345 */ 5346 memset(mac_range, 0, sizeof(*mac_range) * MAX_RADIOS); 5347 return; 5348 } 5349 5350 /* If sbs_lower_band_end_freq is not set that means firmware will send one 5351 * shared mac range and one non-shared mac range. so update that freq. 5352 */ 5353 for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) { 5354 ret = ath12k_wmi_modify_sbs_freq(ab, phy_id); 5355 if (ret) { 5356 memset(mac_range, 0, sizeof(*mac_range) * MAX_RADIOS); 5357 break; 5358 } 5359 } 5360 } 5361 5362 static void 5363 ath12k_wmi_update_mac_freq_info(struct ath12k_base *ab, 5364 enum wmi_host_hw_mode_config_type hw_config_type, 5365 u32 phy_id, 5366 struct ath12k_svc_ext_mac_phy_info *mac_cap) 5367 { 5368 if (phy_id >= MAX_RADIOS) { 5369 ath12k_err(ab, "mac more than two not supported: %d", phy_id); 5370 return; 5371 } 5372 5373 ath12k_dbg(ab, ATH12K_DBG_WMI, 5374 "hw_mode_cfg %d mac %d band 0x%x SBS cutoff freq %d 2 GHz [%d - %d] 5 GHz [%d - %d]", 5375 hw_config_type, phy_id, mac_cap->supported_bands, 5376 ab->wmi_ab.sbs_lower_band_end_freq, 5377 mac_cap->hw_freq_range.low_2ghz_freq, 5378 mac_cap->hw_freq_range.high_2ghz_freq, 5379 mac_cap->hw_freq_range.low_5ghz_freq, 5380 mac_cap->hw_freq_range.high_5ghz_freq); 5381 5382 switch (hw_config_type) { 5383 case WMI_HOST_HW_MODE_SINGLE: 5384 if (phy_id) { 5385 ath12k_dbg(ab, ATH12K_DBG_WMI, "mac phy 1 is not supported"); 5386 break; 5387 } 5388 ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SMM, phy_id); 5389 break; 5390 5391 case WMI_HOST_HW_MODE_DBS: 5392 if (!ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_DBS)) 5393 ath12k_wmi_update_freq_info(ab, mac_cap, 5394 ATH12K_HW_MODE_DBS, phy_id); 5395 break; 5396 case WMI_HOST_HW_MODE_DBS_SBS: 5397 case WMI_HOST_HW_MODE_DBS_OR_SBS: 5398 ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_DBS, phy_id); 5399 if (ab->wmi_ab.sbs_lower_band_end_freq || 5400 mac_cap->hw_freq_range.low_5ghz_freq || 5401 mac_cap->hw_freq_range.low_2ghz_freq) 5402 ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SBS, 5403 phy_id); 5404 5405 if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_DBS)) 5406 ath12k_wmi_update_dbs_freq_info(ab); 5407 if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS)) 5408 ath12k_wmi_update_sbs_freq_info(ab); 5409 break; 5410 case WMI_HOST_HW_MODE_SBS: 5411 case WMI_HOST_HW_MODE_SBS_PASSIVE: 5412 ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SBS, phy_id); 5413 if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS)) 5414 ath12k_wmi_update_sbs_freq_info(ab); 5415 5416 break; 5417 default: 5418 break; 5419 } 5420 } 5421 5422 static bool ath12k_wmi_sbs_range_present(struct ath12k_base *ab) 5423 { 5424 if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS) || 5425 (ab->wmi_ab.sbs_lower_band_end_freq && 5426 ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS_LOWER_SHARE) && 5427 ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS_UPPER_SHARE))) 5428 return true; 5429 5430 return false; 5431 } 5432 5433 static int ath12k_wmi_update_hw_mode_list(struct ath12k_base *ab) 5434 { 5435 struct ath12k_svc_ext_info *svc_ext_info = &ab->wmi_ab.svc_ext_info; 5436 struct ath12k_hw_mode_info *info = &ab->wmi_ab.hw_mode_info; 5437 enum wmi_host_hw_mode_config_type hw_config_type; 5438 struct ath12k_svc_ext_mac_phy_info *tmp; 5439 bool dbs_mode = false, sbs_mode = false; 5440 u32 i, j = 0; 5441 5442 if (!svc_ext_info->num_hw_modes) { 5443 ath12k_err(ab, "invalid number of hw modes"); 5444 return -EINVAL; 5445 } 5446 5447 ath12k_dbg(ab, ATH12K_DBG_WMI, "updated HW mode list: num modes %d", 5448 svc_ext_info->num_hw_modes); 5449 5450 memset(info->freq_range_caps, 0, sizeof(info->freq_range_caps)); 5451 5452 for (i = 0; i < svc_ext_info->num_hw_modes; i++) { 5453 if (j >= ATH12K_MAX_MAC_PHY_CAP) 5454 return -EINVAL; 5455 5456 /* Update for MAC0 */ 5457 tmp = &svc_ext_info->mac_phy_info[j++]; 5458 hw_config_type = tmp->hw_mode_config_type; 5459 ath12k_wmi_update_mac_freq_info(ab, hw_config_type, tmp->phy_id, tmp); 5460 5461 /* SBS and DBS have dual MAC. Up to 2 MACs are considered. */ 5462 if (hw_config_type == WMI_HOST_HW_MODE_DBS || 5463 hw_config_type == WMI_HOST_HW_MODE_SBS_PASSIVE || 5464 hw_config_type == WMI_HOST_HW_MODE_SBS || 5465 hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS) { 5466 if (j >= ATH12K_MAX_MAC_PHY_CAP) 5467 return -EINVAL; 5468 /* Update for MAC1 */ 5469 tmp = &svc_ext_info->mac_phy_info[j++]; 5470 ath12k_wmi_update_mac_freq_info(ab, hw_config_type, 5471 tmp->phy_id, tmp); 5472 5473 if (hw_config_type == WMI_HOST_HW_MODE_DBS || 5474 hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS) 5475 dbs_mode = true; 5476 5477 if (ath12k_wmi_sbs_range_present(ab) && 5478 (hw_config_type == WMI_HOST_HW_MODE_SBS_PASSIVE || 5479 hw_config_type == WMI_HOST_HW_MODE_SBS || 5480 hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS)) 5481 sbs_mode = true; 5482 } 5483 } 5484 5485 info->support_dbs = dbs_mode; 5486 info->support_sbs = sbs_mode; 5487 5488 ath12k_wmi_dump_freq_range(ab); 5489 5490 return 0; 5491 } 5492 5493 static int ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base *ab, 5494 u16 tag, u16 len, 5495 const void *ptr, void *data) 5496 { 5497 const struct ath12k_wmi_dbs_or_sbs_cap_params *dbs_or_sbs_caps; 5498 struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0]; 5499 struct ath12k_wmi_svc_rdy_ext2_parse *parse = data; 5500 int ret; 5501 5502 switch (tag) { 5503 case WMI_TAG_SERVICE_READY_EXT2_EVENT: 5504 ret = ath12k_pull_svc_ready_ext2(wmi_handle, ptr, 5505 &parse->arg); 5506 if (ret) { 5507 ath12k_warn(ab, 5508 "failed to extract wmi service ready ext2 parameters: %d\n", 5509 ret); 5510 return ret; 5511 } 5512 break; 5513 5514 case WMI_TAG_ARRAY_STRUCT: 5515 if (!parse->dma_ring_cap_done) { 5516 ret = ath12k_wmi_dma_ring_caps(ab, len, ptr, 5517 &parse->dma_caps_parse); 5518 if (ret) 5519 return ret; 5520 5521 parse->dma_ring_cap_done = true; 5522 } else if (!parse->spectral_bin_scaling_done) { 5523 /* TODO: This is a place-holder as WMI tag for 5524 * spectral scaling is before 5525 * WMI_TAG_MAC_PHY_CAPABILITIES_EXT 5526 */ 5527 parse->spectral_bin_scaling_done = true; 5528 } else if (!parse->mac_phy_caps_ext_done) { 5529 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 5530 ath12k_wmi_tlv_mac_phy_caps_ext, 5531 parse); 5532 if (ret) { 5533 ath12k_warn(ab, "failed to parse extended MAC PHY capabilities WMI TLV: %d\n", 5534 ret); 5535 return ret; 5536 } 5537 5538 parse->mac_phy_caps_ext_done = true; 5539 } else if (!parse->hal_reg_caps_ext2_done) { 5540 parse->hal_reg_caps_ext2_done = true; 5541 } else if (!parse->scan_radio_caps_ext2_done) { 5542 parse->scan_radio_caps_ext2_done = true; 5543 } else if (!parse->twt_caps_done) { 5544 parse->twt_caps_done = true; 5545 } else if (!parse->htt_msdu_idx_to_qtype_map_done) { 5546 parse->htt_msdu_idx_to_qtype_map_done = true; 5547 } else if (!parse->dbs_or_sbs_cap_ext_done) { 5548 dbs_or_sbs_caps = ptr; 5549 ab->wmi_ab.sbs_lower_band_end_freq = 5550 __le32_to_cpu(dbs_or_sbs_caps->sbs_lower_band_end_freq); 5551 5552 ath12k_dbg(ab, ATH12K_DBG_WMI, "sbs_lower_band_end_freq %u\n", 5553 ab->wmi_ab.sbs_lower_band_end_freq); 5554 5555 ret = ath12k_wmi_update_hw_mode_list(ab); 5556 if (ret) { 5557 ath12k_warn(ab, "failed to update hw mode list: %d\n", 5558 ret); 5559 return ret; 5560 } 5561 5562 parse->dbs_or_sbs_cap_ext_done = true; 5563 } 5564 5565 break; 5566 default: 5567 break; 5568 } 5569 5570 return 0; 5571 } 5572 5573 static int ath12k_service_ready_ext2_event(struct ath12k_base *ab, 5574 struct sk_buff *skb) 5575 { 5576 struct ath12k_wmi_svc_rdy_ext2_parse svc_rdy_ext2 = { }; 5577 int ret; 5578 5579 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 5580 ath12k_wmi_svc_rdy_ext2_parse, 5581 &svc_rdy_ext2); 5582 if (ret) { 5583 ath12k_warn(ab, "failed to parse ext2 event tlv %d\n", ret); 5584 goto err; 5585 } 5586 5587 complete(&ab->wmi_ab.service_ready); 5588 5589 return 0; 5590 5591 err: 5592 ath12k_wmi_free_dbring_caps(ab); 5593 return ret; 5594 } 5595 5596 static int ath12k_pull_vdev_start_resp_tlv(struct ath12k_base *ab, struct sk_buff *skb, 5597 struct wmi_vdev_start_resp_event *vdev_rsp) 5598 { 5599 const void **tb; 5600 const struct wmi_vdev_start_resp_event *ev; 5601 int ret; 5602 5603 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 5604 if (IS_ERR(tb)) { 5605 ret = PTR_ERR(tb); 5606 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 5607 return ret; 5608 } 5609 5610 ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT]; 5611 if (!ev) { 5612 ath12k_warn(ab, "failed to fetch vdev start resp ev"); 5613 kfree(tb); 5614 return -EPROTO; 5615 } 5616 5617 *vdev_rsp = *ev; 5618 5619 kfree(tb); 5620 return 0; 5621 } 5622 5623 static struct ath12k_reg_rule 5624 *create_ext_reg_rules_from_wmi(u32 num_reg_rules, 5625 struct ath12k_wmi_reg_rule_ext_params *wmi_reg_rule) 5626 { 5627 struct ath12k_reg_rule *reg_rule_ptr; 5628 u32 count; 5629 5630 reg_rule_ptr = kzalloc((num_reg_rules * sizeof(*reg_rule_ptr)), 5631 GFP_ATOMIC); 5632 5633 if (!reg_rule_ptr) 5634 return NULL; 5635 5636 for (count = 0; count < num_reg_rules; count++) { 5637 reg_rule_ptr[count].start_freq = 5638 le32_get_bits(wmi_reg_rule[count].freq_info, 5639 REG_RULE_START_FREQ); 5640 reg_rule_ptr[count].end_freq = 5641 le32_get_bits(wmi_reg_rule[count].freq_info, 5642 REG_RULE_END_FREQ); 5643 reg_rule_ptr[count].max_bw = 5644 le32_get_bits(wmi_reg_rule[count].bw_pwr_info, 5645 REG_RULE_MAX_BW); 5646 reg_rule_ptr[count].reg_power = 5647 le32_get_bits(wmi_reg_rule[count].bw_pwr_info, 5648 REG_RULE_REG_PWR); 5649 reg_rule_ptr[count].ant_gain = 5650 le32_get_bits(wmi_reg_rule[count].bw_pwr_info, 5651 REG_RULE_ANT_GAIN); 5652 reg_rule_ptr[count].flags = 5653 le32_get_bits(wmi_reg_rule[count].flag_info, 5654 REG_RULE_FLAGS); 5655 reg_rule_ptr[count].psd_flag = 5656 le32_get_bits(wmi_reg_rule[count].psd_power_info, 5657 REG_RULE_PSD_INFO); 5658 reg_rule_ptr[count].psd_eirp = 5659 le32_get_bits(wmi_reg_rule[count].psd_power_info, 5660 REG_RULE_PSD_EIRP); 5661 } 5662 5663 return reg_rule_ptr; 5664 } 5665 5666 static u8 ath12k_wmi_ignore_num_extra_rules(struct ath12k_wmi_reg_rule_ext_params *rule, 5667 u32 num_reg_rules) 5668 { 5669 u8 num_invalid_5ghz_rules = 0; 5670 u32 count, start_freq; 5671 5672 for (count = 0; count < num_reg_rules; count++) { 5673 start_freq = le32_get_bits(rule[count].freq_info, REG_RULE_START_FREQ); 5674 5675 if (start_freq >= ATH12K_MIN_6GHZ_FREQ) 5676 num_invalid_5ghz_rules++; 5677 } 5678 5679 return num_invalid_5ghz_rules; 5680 } 5681 5682 static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab, 5683 struct sk_buff *skb, 5684 struct ath12k_reg_info *reg_info) 5685 { 5686 const void **tb; 5687 const struct wmi_reg_chan_list_cc_ext_event *ev; 5688 struct ath12k_wmi_reg_rule_ext_params *ext_wmi_reg_rule; 5689 u32 num_2g_reg_rules, num_5g_reg_rules; 5690 u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE]; 5691 u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE]; 5692 u8 num_invalid_5ghz_ext_rules; 5693 u32 total_reg_rules = 0; 5694 int ret, i, j; 5695 5696 ath12k_dbg(ab, ATH12K_DBG_WMI, "processing regulatory ext channel list\n"); 5697 5698 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 5699 if (IS_ERR(tb)) { 5700 ret = PTR_ERR(tb); 5701 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 5702 return ret; 5703 } 5704 5705 ev = tb[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT]; 5706 if (!ev) { 5707 ath12k_warn(ab, "failed to fetch reg chan list ext update ev\n"); 5708 kfree(tb); 5709 return -EPROTO; 5710 } 5711 5712 reg_info->num_2g_reg_rules = le32_to_cpu(ev->num_2g_reg_rules); 5713 reg_info->num_5g_reg_rules = le32_to_cpu(ev->num_5g_reg_rules); 5714 reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP] = 5715 le32_to_cpu(ev->num_6g_reg_rules_ap_lpi); 5716 reg_info->num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP] = 5717 le32_to_cpu(ev->num_6g_reg_rules_ap_sp); 5718 reg_info->num_6g_reg_rules_ap[WMI_REG_VLP_AP] = 5719 le32_to_cpu(ev->num_6g_reg_rules_ap_vlp); 5720 5721 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { 5722 reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] = 5723 le32_to_cpu(ev->num_6g_reg_rules_cl_lpi[i]); 5724 reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] = 5725 le32_to_cpu(ev->num_6g_reg_rules_cl_sp[i]); 5726 reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] = 5727 le32_to_cpu(ev->num_6g_reg_rules_cl_vlp[i]); 5728 } 5729 5730 num_2g_reg_rules = reg_info->num_2g_reg_rules; 5731 total_reg_rules += num_2g_reg_rules; 5732 num_5g_reg_rules = reg_info->num_5g_reg_rules; 5733 total_reg_rules += num_5g_reg_rules; 5734 5735 if (num_2g_reg_rules > MAX_REG_RULES || num_5g_reg_rules > MAX_REG_RULES) { 5736 ath12k_warn(ab, "Num reg rules for 2G/5G exceeds max limit (num_2g_reg_rules: %d num_5g_reg_rules: %d max_rules: %d)\n", 5737 num_2g_reg_rules, num_5g_reg_rules, MAX_REG_RULES); 5738 kfree(tb); 5739 return -EINVAL; 5740 } 5741 5742 for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) { 5743 num_6g_reg_rules_ap[i] = reg_info->num_6g_reg_rules_ap[i]; 5744 5745 if (num_6g_reg_rules_ap[i] > MAX_6GHZ_REG_RULES) { 5746 ath12k_warn(ab, "Num 6G reg rules for AP mode(%d) exceeds max limit (num_6g_reg_rules_ap: %d, max_rules: %d)\n", 5747 i, num_6g_reg_rules_ap[i], MAX_6GHZ_REG_RULES); 5748 kfree(tb); 5749 return -EINVAL; 5750 } 5751 5752 total_reg_rules += num_6g_reg_rules_ap[i]; 5753 } 5754 5755 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { 5756 num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] = 5757 reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i]; 5758 total_reg_rules += num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i]; 5759 5760 num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] = 5761 reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i]; 5762 total_reg_rules += num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i]; 5763 5764 num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] = 5765 reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i]; 5766 total_reg_rules += num_6g_reg_rules_cl[WMI_REG_VLP_AP][i]; 5767 5768 if (num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] > MAX_6GHZ_REG_RULES || 5769 num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] > MAX_6GHZ_REG_RULES || 5770 num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] > MAX_6GHZ_REG_RULES) { 5771 ath12k_warn(ab, "Num 6g client reg rules exceeds max limit, for client(type: %d)\n", 5772 i); 5773 kfree(tb); 5774 return -EINVAL; 5775 } 5776 } 5777 5778 if (!total_reg_rules) { 5779 ath12k_warn(ab, "No reg rules available\n"); 5780 kfree(tb); 5781 return -EINVAL; 5782 } 5783 5784 memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN); 5785 5786 reg_info->dfs_region = le32_to_cpu(ev->dfs_region); 5787 reg_info->phybitmap = le32_to_cpu(ev->phybitmap); 5788 reg_info->num_phy = le32_to_cpu(ev->num_phy); 5789 reg_info->phy_id = le32_to_cpu(ev->phy_id); 5790 reg_info->ctry_code = le32_to_cpu(ev->country_id); 5791 reg_info->reg_dmn_pair = le32_to_cpu(ev->domain_code); 5792 5793 switch (le32_to_cpu(ev->status_code)) { 5794 case WMI_REG_SET_CC_STATUS_PASS: 5795 reg_info->status_code = REG_SET_CC_STATUS_PASS; 5796 break; 5797 case WMI_REG_CURRENT_ALPHA2_NOT_FOUND: 5798 reg_info->status_code = REG_CURRENT_ALPHA2_NOT_FOUND; 5799 break; 5800 case WMI_REG_INIT_ALPHA2_NOT_FOUND: 5801 reg_info->status_code = REG_INIT_ALPHA2_NOT_FOUND; 5802 break; 5803 case WMI_REG_SET_CC_CHANGE_NOT_ALLOWED: 5804 reg_info->status_code = REG_SET_CC_CHANGE_NOT_ALLOWED; 5805 break; 5806 case WMI_REG_SET_CC_STATUS_NO_MEMORY: 5807 reg_info->status_code = REG_SET_CC_STATUS_NO_MEMORY; 5808 break; 5809 case WMI_REG_SET_CC_STATUS_FAIL: 5810 reg_info->status_code = REG_SET_CC_STATUS_FAIL; 5811 break; 5812 } 5813 5814 reg_info->is_ext_reg_event = true; 5815 5816 reg_info->min_bw_2g = le32_to_cpu(ev->min_bw_2g); 5817 reg_info->max_bw_2g = le32_to_cpu(ev->max_bw_2g); 5818 reg_info->min_bw_5g = le32_to_cpu(ev->min_bw_5g); 5819 reg_info->max_bw_5g = le32_to_cpu(ev->max_bw_5g); 5820 reg_info->min_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->min_bw_6g_ap_lpi); 5821 reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->max_bw_6g_ap_lpi); 5822 reg_info->min_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->min_bw_6g_ap_sp); 5823 reg_info->max_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->max_bw_6g_ap_sp); 5824 reg_info->min_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->min_bw_6g_ap_vlp); 5825 reg_info->max_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->max_bw_6g_ap_vlp); 5826 5827 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { 5828 reg_info->min_bw_6g_client[WMI_REG_INDOOR_AP][i] = 5829 le32_to_cpu(ev->min_bw_6g_client_lpi[i]); 5830 reg_info->max_bw_6g_client[WMI_REG_INDOOR_AP][i] = 5831 le32_to_cpu(ev->max_bw_6g_client_lpi[i]); 5832 reg_info->min_bw_6g_client[WMI_REG_STD_POWER_AP][i] = 5833 le32_to_cpu(ev->min_bw_6g_client_sp[i]); 5834 reg_info->max_bw_6g_client[WMI_REG_STD_POWER_AP][i] = 5835 le32_to_cpu(ev->max_bw_6g_client_sp[i]); 5836 reg_info->min_bw_6g_client[WMI_REG_VLP_AP][i] = 5837 le32_to_cpu(ev->min_bw_6g_client_vlp[i]); 5838 reg_info->max_bw_6g_client[WMI_REG_VLP_AP][i] = 5839 le32_to_cpu(ev->max_bw_6g_client_vlp[i]); 5840 } 5841 5842 ath12k_dbg(ab, ATH12K_DBG_WMI, 5843 "%s:cc_ext %s dfs %d BW: min_2g %d max_2g %d min_5g %d max_5g %d phy_bitmap 0x%x", 5844 __func__, reg_info->alpha2, reg_info->dfs_region, 5845 reg_info->min_bw_2g, reg_info->max_bw_2g, 5846 reg_info->min_bw_5g, reg_info->max_bw_5g, 5847 reg_info->phybitmap); 5848 5849 ath12k_dbg(ab, ATH12K_DBG_WMI, 5850 "num_2g_reg_rules %d num_5g_reg_rules %d", 5851 num_2g_reg_rules, num_5g_reg_rules); 5852 5853 ath12k_dbg(ab, ATH12K_DBG_WMI, 5854 "num_6g_reg_rules_ap_lpi: %d num_6g_reg_rules_ap_sp: %d num_6g_reg_rules_ap_vlp: %d", 5855 num_6g_reg_rules_ap[WMI_REG_INDOOR_AP], 5856 num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP], 5857 num_6g_reg_rules_ap[WMI_REG_VLP_AP]); 5858 5859 ath12k_dbg(ab, ATH12K_DBG_WMI, 5860 "6g Regular client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d", 5861 num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_DEFAULT_CLIENT], 5862 num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_DEFAULT_CLIENT], 5863 num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_DEFAULT_CLIENT]); 5864 5865 ath12k_dbg(ab, ATH12K_DBG_WMI, 5866 "6g Subordinate client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d", 5867 num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_SUBORDINATE_CLIENT], 5868 num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_SUBORDINATE_CLIENT], 5869 num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_SUBORDINATE_CLIENT]); 5870 5871 ext_wmi_reg_rule = 5872 (struct ath12k_wmi_reg_rule_ext_params *)((u8 *)ev 5873 + sizeof(*ev) 5874 + sizeof(struct wmi_tlv)); 5875 5876 if (num_2g_reg_rules) { 5877 reg_info->reg_rules_2g_ptr = 5878 create_ext_reg_rules_from_wmi(num_2g_reg_rules, 5879 ext_wmi_reg_rule); 5880 5881 if (!reg_info->reg_rules_2g_ptr) { 5882 kfree(tb); 5883 ath12k_warn(ab, "Unable to Allocate memory for 2g rules\n"); 5884 return -ENOMEM; 5885 } 5886 } 5887 5888 ext_wmi_reg_rule += num_2g_reg_rules; 5889 5890 /* Firmware might include 6 GHz reg rule in 5 GHz rule list 5891 * for few countries along with separate 6 GHz rule. 5892 * Having same 6 GHz reg rule in 5 GHz and 6 GHz rules list 5893 * causes intersect check to be true, and same rules will be 5894 * shown multiple times in iw cmd. 5895 * Hence, avoid parsing 6 GHz rule from 5 GHz reg rule list 5896 */ 5897 num_invalid_5ghz_ext_rules = ath12k_wmi_ignore_num_extra_rules(ext_wmi_reg_rule, 5898 num_5g_reg_rules); 5899 5900 if (num_invalid_5ghz_ext_rules) { 5901 ath12k_dbg(ab, ATH12K_DBG_WMI, 5902 "CC: %s 5 GHz reg rules number %d from fw, %d number of invalid 5 GHz rules", 5903 reg_info->alpha2, reg_info->num_5g_reg_rules, 5904 num_invalid_5ghz_ext_rules); 5905 5906 num_5g_reg_rules = num_5g_reg_rules - num_invalid_5ghz_ext_rules; 5907 reg_info->num_5g_reg_rules = num_5g_reg_rules; 5908 } 5909 5910 if (num_5g_reg_rules) { 5911 reg_info->reg_rules_5g_ptr = 5912 create_ext_reg_rules_from_wmi(num_5g_reg_rules, 5913 ext_wmi_reg_rule); 5914 5915 if (!reg_info->reg_rules_5g_ptr) { 5916 kfree(tb); 5917 ath12k_warn(ab, "Unable to Allocate memory for 5g rules\n"); 5918 return -ENOMEM; 5919 } 5920 } 5921 5922 /* We have adjusted the number of 5 GHz reg rules above. But still those 5923 * many rules needs to be adjusted in ext_wmi_reg_rule. 5924 * 5925 * NOTE: num_invalid_5ghz_ext_rules will be 0 for rest other cases. 5926 */ 5927 ext_wmi_reg_rule += (num_5g_reg_rules + num_invalid_5ghz_ext_rules); 5928 5929 for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) { 5930 reg_info->reg_rules_6g_ap_ptr[i] = 5931 create_ext_reg_rules_from_wmi(num_6g_reg_rules_ap[i], 5932 ext_wmi_reg_rule); 5933 5934 if (!reg_info->reg_rules_6g_ap_ptr[i]) { 5935 kfree(tb); 5936 ath12k_warn(ab, "Unable to Allocate memory for 6g ap rules\n"); 5937 return -ENOMEM; 5938 } 5939 5940 ext_wmi_reg_rule += num_6g_reg_rules_ap[i]; 5941 } 5942 5943 for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++) { 5944 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { 5945 reg_info->reg_rules_6g_client_ptr[j][i] = 5946 create_ext_reg_rules_from_wmi(num_6g_reg_rules_cl[j][i], 5947 ext_wmi_reg_rule); 5948 5949 if (!reg_info->reg_rules_6g_client_ptr[j][i]) { 5950 kfree(tb); 5951 ath12k_warn(ab, "Unable to Allocate memory for 6g client rules\n"); 5952 return -ENOMEM; 5953 } 5954 5955 ext_wmi_reg_rule += num_6g_reg_rules_cl[j][i]; 5956 } 5957 } 5958 5959 reg_info->client_type = le32_to_cpu(ev->client_type); 5960 reg_info->rnr_tpe_usable = ev->rnr_tpe_usable; 5961 reg_info->unspecified_ap_usable = ev->unspecified_ap_usable; 5962 reg_info->domain_code_6g_ap[WMI_REG_INDOOR_AP] = 5963 le32_to_cpu(ev->domain_code_6g_ap_lpi); 5964 reg_info->domain_code_6g_ap[WMI_REG_STD_POWER_AP] = 5965 le32_to_cpu(ev->domain_code_6g_ap_sp); 5966 reg_info->domain_code_6g_ap[WMI_REG_VLP_AP] = 5967 le32_to_cpu(ev->domain_code_6g_ap_vlp); 5968 5969 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { 5970 reg_info->domain_code_6g_client[WMI_REG_INDOOR_AP][i] = 5971 le32_to_cpu(ev->domain_code_6g_client_lpi[i]); 5972 reg_info->domain_code_6g_client[WMI_REG_STD_POWER_AP][i] = 5973 le32_to_cpu(ev->domain_code_6g_client_sp[i]); 5974 reg_info->domain_code_6g_client[WMI_REG_VLP_AP][i] = 5975 le32_to_cpu(ev->domain_code_6g_client_vlp[i]); 5976 } 5977 5978 reg_info->domain_code_6g_super_id = le32_to_cpu(ev->domain_code_6g_super_id); 5979 5980 ath12k_dbg(ab, ATH12K_DBG_WMI, "6g client_type: %d domain_code_6g_super_id: %d", 5981 reg_info->client_type, reg_info->domain_code_6g_super_id); 5982 5983 ath12k_dbg(ab, ATH12K_DBG_WMI, "processed regulatory ext channel list\n"); 5984 5985 kfree(tb); 5986 return 0; 5987 } 5988 5989 static int ath12k_pull_peer_del_resp_ev(struct ath12k_base *ab, struct sk_buff *skb, 5990 struct wmi_peer_delete_resp_event *peer_del_resp) 5991 { 5992 const void **tb; 5993 const struct wmi_peer_delete_resp_event *ev; 5994 int ret; 5995 5996 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 5997 if (IS_ERR(tb)) { 5998 ret = PTR_ERR(tb); 5999 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6000 return ret; 6001 } 6002 6003 ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT]; 6004 if (!ev) { 6005 ath12k_warn(ab, "failed to fetch peer delete resp ev"); 6006 kfree(tb); 6007 return -EPROTO; 6008 } 6009 6010 memset(peer_del_resp, 0, sizeof(*peer_del_resp)); 6011 6012 peer_del_resp->vdev_id = ev->vdev_id; 6013 ether_addr_copy(peer_del_resp->peer_macaddr.addr, 6014 ev->peer_macaddr.addr); 6015 6016 kfree(tb); 6017 return 0; 6018 } 6019 6020 static int ath12k_pull_vdev_del_resp_ev(struct ath12k_base *ab, 6021 struct sk_buff *skb, 6022 u32 *vdev_id) 6023 { 6024 const void **tb; 6025 const struct wmi_vdev_delete_resp_event *ev; 6026 int ret; 6027 6028 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6029 if (IS_ERR(tb)) { 6030 ret = PTR_ERR(tb); 6031 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6032 return ret; 6033 } 6034 6035 ev = tb[WMI_TAG_VDEV_DELETE_RESP_EVENT]; 6036 if (!ev) { 6037 ath12k_warn(ab, "failed to fetch vdev delete resp ev"); 6038 kfree(tb); 6039 return -EPROTO; 6040 } 6041 6042 *vdev_id = le32_to_cpu(ev->vdev_id); 6043 6044 kfree(tb); 6045 return 0; 6046 } 6047 6048 static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab, 6049 struct sk_buff *skb, 6050 u32 *vdev_id, u32 *tx_status) 6051 { 6052 const void **tb; 6053 const struct wmi_bcn_tx_status_event *ev; 6054 int ret; 6055 6056 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6057 if (IS_ERR(tb)) { 6058 ret = PTR_ERR(tb); 6059 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6060 return ret; 6061 } 6062 6063 ev = tb[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT]; 6064 if (!ev) { 6065 ath12k_warn(ab, "failed to fetch bcn tx status ev"); 6066 kfree(tb); 6067 return -EPROTO; 6068 } 6069 6070 *vdev_id = le32_to_cpu(ev->vdev_id); 6071 *tx_status = le32_to_cpu(ev->tx_status); 6072 6073 kfree(tb); 6074 return 0; 6075 } 6076 6077 static int ath12k_pull_vdev_stopped_param_tlv(struct ath12k_base *ab, struct sk_buff *skb, 6078 u32 *vdev_id) 6079 { 6080 const void **tb; 6081 const struct wmi_vdev_stopped_event *ev; 6082 int ret; 6083 6084 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6085 if (IS_ERR(tb)) { 6086 ret = PTR_ERR(tb); 6087 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6088 return ret; 6089 } 6090 6091 ev = tb[WMI_TAG_VDEV_STOPPED_EVENT]; 6092 if (!ev) { 6093 ath12k_warn(ab, "failed to fetch vdev stop ev"); 6094 kfree(tb); 6095 return -EPROTO; 6096 } 6097 6098 *vdev_id = le32_to_cpu(ev->vdev_id); 6099 6100 kfree(tb); 6101 return 0; 6102 } 6103 6104 static int ath12k_wmi_tlv_mgmt_rx_parse(struct ath12k_base *ab, 6105 u16 tag, u16 len, 6106 const void *ptr, void *data) 6107 { 6108 struct wmi_tlv_mgmt_rx_parse *parse = data; 6109 6110 switch (tag) { 6111 case WMI_TAG_MGMT_RX_HDR: 6112 parse->fixed = ptr; 6113 break; 6114 case WMI_TAG_ARRAY_BYTE: 6115 if (!parse->frame_buf_done) { 6116 parse->frame_buf = ptr; 6117 parse->frame_buf_done = true; 6118 } 6119 break; 6120 } 6121 return 0; 6122 } 6123 6124 static int ath12k_pull_mgmt_rx_params_tlv(struct ath12k_base *ab, 6125 struct sk_buff *skb, 6126 struct ath12k_wmi_mgmt_rx_arg *hdr) 6127 { 6128 struct wmi_tlv_mgmt_rx_parse parse = { }; 6129 const struct ath12k_wmi_mgmt_rx_params *ev; 6130 const u8 *frame; 6131 int i, ret; 6132 6133 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 6134 ath12k_wmi_tlv_mgmt_rx_parse, 6135 &parse); 6136 if (ret) { 6137 ath12k_warn(ab, "failed to parse mgmt rx tlv %d\n", ret); 6138 return ret; 6139 } 6140 6141 ev = parse.fixed; 6142 frame = parse.frame_buf; 6143 6144 if (!ev || !frame) { 6145 ath12k_warn(ab, "failed to fetch mgmt rx hdr"); 6146 return -EPROTO; 6147 } 6148 6149 hdr->pdev_id = le32_to_cpu(ev->pdev_id); 6150 hdr->chan_freq = le32_to_cpu(ev->chan_freq); 6151 hdr->channel = le32_to_cpu(ev->channel); 6152 hdr->snr = le32_to_cpu(ev->snr); 6153 hdr->rate = le32_to_cpu(ev->rate); 6154 hdr->phy_mode = le32_to_cpu(ev->phy_mode); 6155 hdr->buf_len = le32_to_cpu(ev->buf_len); 6156 hdr->status = le32_to_cpu(ev->status); 6157 hdr->flags = le32_to_cpu(ev->flags); 6158 hdr->rssi = a_sle32_to_cpu(ev->rssi); 6159 hdr->tsf_delta = le32_to_cpu(ev->tsf_delta); 6160 6161 for (i = 0; i < ATH_MAX_ANTENNA; i++) 6162 hdr->rssi_ctl[i] = le32_to_cpu(ev->rssi_ctl[i]); 6163 6164 if (skb->len < (frame - skb->data) + hdr->buf_len) { 6165 ath12k_warn(ab, "invalid length in mgmt rx hdr ev"); 6166 return -EPROTO; 6167 } 6168 6169 /* shift the sk_buff to point to `frame` */ 6170 skb_trim(skb, 0); 6171 skb_put(skb, frame - skb->data); 6172 skb_pull(skb, frame - skb->data); 6173 skb_put(skb, hdr->buf_len); 6174 6175 return 0; 6176 } 6177 6178 static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id, 6179 u32 status, u32 ack_rssi) 6180 { 6181 struct sk_buff *msdu; 6182 struct ieee80211_tx_info *info; 6183 struct ath12k_skb_cb *skb_cb; 6184 int num_mgmt; 6185 6186 spin_lock_bh(&ar->txmgmt_idr_lock); 6187 msdu = idr_find(&ar->txmgmt_idr, desc_id); 6188 6189 if (!msdu) { 6190 ath12k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d\n", 6191 desc_id); 6192 spin_unlock_bh(&ar->txmgmt_idr_lock); 6193 return -ENOENT; 6194 } 6195 6196 idr_remove(&ar->txmgmt_idr, desc_id); 6197 spin_unlock_bh(&ar->txmgmt_idr_lock); 6198 6199 skb_cb = ATH12K_SKB_CB(msdu); 6200 dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); 6201 6202 info = IEEE80211_SKB_CB(msdu); 6203 memset(&info->status, 0, sizeof(info->status)); 6204 6205 /* skip tx rate update from ieee80211_status*/ 6206 info->status.rates[0].idx = -1; 6207 6208 if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status) { 6209 info->flags |= IEEE80211_TX_STAT_ACK; 6210 info->status.ack_signal = ack_rssi; 6211 info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID; 6212 } 6213 6214 if ((info->flags & IEEE80211_TX_CTL_NO_ACK) && !status) 6215 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; 6216 6217 ieee80211_tx_status_irqsafe(ath12k_ar_to_hw(ar), msdu); 6218 6219 num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx); 6220 6221 /* WARN when we received this event without doing any mgmt tx */ 6222 if (num_mgmt < 0) 6223 WARN_ON_ONCE(1); 6224 6225 if (!num_mgmt) 6226 wake_up(&ar->txmgmt_empty_waitq); 6227 6228 return 0; 6229 } 6230 6231 static int ath12k_pull_mgmt_tx_compl_param_tlv(struct ath12k_base *ab, 6232 struct sk_buff *skb, 6233 struct wmi_mgmt_tx_compl_event *param) 6234 { 6235 const void **tb; 6236 const struct wmi_mgmt_tx_compl_event *ev; 6237 int ret; 6238 6239 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6240 if (IS_ERR(tb)) { 6241 ret = PTR_ERR(tb); 6242 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6243 return ret; 6244 } 6245 6246 ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT]; 6247 if (!ev) { 6248 ath12k_warn(ab, "failed to fetch mgmt tx compl ev"); 6249 kfree(tb); 6250 return -EPROTO; 6251 } 6252 6253 param->pdev_id = ev->pdev_id; 6254 param->desc_id = ev->desc_id; 6255 param->status = ev->status; 6256 param->ppdu_id = ev->ppdu_id; 6257 param->ack_rssi = ev->ack_rssi; 6258 6259 kfree(tb); 6260 return 0; 6261 } 6262 6263 static void ath12k_wmi_event_scan_started(struct ath12k *ar) 6264 { 6265 lockdep_assert_held(&ar->data_lock); 6266 6267 switch (ar->scan.state) { 6268 case ATH12K_SCAN_IDLE: 6269 case ATH12K_SCAN_RUNNING: 6270 case ATH12K_SCAN_ABORTING: 6271 ath12k_warn(ar->ab, "received scan started event in an invalid scan state: %s (%d)\n", 6272 ath12k_scan_state_str(ar->scan.state), 6273 ar->scan.state); 6274 break; 6275 case ATH12K_SCAN_STARTING: 6276 ar->scan.state = ATH12K_SCAN_RUNNING; 6277 6278 if (ar->scan.is_roc) 6279 ieee80211_ready_on_channel(ath12k_ar_to_hw(ar)); 6280 6281 complete(&ar->scan.started); 6282 break; 6283 } 6284 } 6285 6286 static void ath12k_wmi_event_scan_start_failed(struct ath12k *ar) 6287 { 6288 lockdep_assert_held(&ar->data_lock); 6289 6290 switch (ar->scan.state) { 6291 case ATH12K_SCAN_IDLE: 6292 case ATH12K_SCAN_RUNNING: 6293 case ATH12K_SCAN_ABORTING: 6294 ath12k_warn(ar->ab, "received scan start failed event in an invalid scan state: %s (%d)\n", 6295 ath12k_scan_state_str(ar->scan.state), 6296 ar->scan.state); 6297 break; 6298 case ATH12K_SCAN_STARTING: 6299 complete(&ar->scan.started); 6300 __ath12k_mac_scan_finish(ar); 6301 break; 6302 } 6303 } 6304 6305 static void ath12k_wmi_event_scan_completed(struct ath12k *ar) 6306 { 6307 lockdep_assert_held(&ar->data_lock); 6308 6309 switch (ar->scan.state) { 6310 case ATH12K_SCAN_IDLE: 6311 case ATH12K_SCAN_STARTING: 6312 /* One suspected reason scan can be completed while starting is 6313 * if firmware fails to deliver all scan events to the host, 6314 * e.g. when transport pipe is full. This has been observed 6315 * with spectral scan phyerr events starving wmi transport 6316 * pipe. In such case the "scan completed" event should be (and 6317 * is) ignored by the host as it may be just firmware's scan 6318 * state machine recovering. 6319 */ 6320 ath12k_warn(ar->ab, "received scan completed event in an invalid scan state: %s (%d)\n", 6321 ath12k_scan_state_str(ar->scan.state), 6322 ar->scan.state); 6323 break; 6324 case ATH12K_SCAN_RUNNING: 6325 case ATH12K_SCAN_ABORTING: 6326 __ath12k_mac_scan_finish(ar); 6327 break; 6328 } 6329 } 6330 6331 static void ath12k_wmi_event_scan_bss_chan(struct ath12k *ar) 6332 { 6333 lockdep_assert_held(&ar->data_lock); 6334 6335 switch (ar->scan.state) { 6336 case ATH12K_SCAN_IDLE: 6337 case ATH12K_SCAN_STARTING: 6338 ath12k_warn(ar->ab, "received scan bss chan event in an invalid scan state: %s (%d)\n", 6339 ath12k_scan_state_str(ar->scan.state), 6340 ar->scan.state); 6341 break; 6342 case ATH12K_SCAN_RUNNING: 6343 case ATH12K_SCAN_ABORTING: 6344 ar->scan_channel = NULL; 6345 break; 6346 } 6347 } 6348 6349 static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq) 6350 { 6351 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 6352 6353 lockdep_assert_held(&ar->data_lock); 6354 6355 switch (ar->scan.state) { 6356 case ATH12K_SCAN_IDLE: 6357 case ATH12K_SCAN_STARTING: 6358 ath12k_warn(ar->ab, "received scan foreign chan event in an invalid scan state: %s (%d)\n", 6359 ath12k_scan_state_str(ar->scan.state), 6360 ar->scan.state); 6361 break; 6362 case ATH12K_SCAN_RUNNING: 6363 case ATH12K_SCAN_ABORTING: 6364 ar->scan_channel = ieee80211_get_channel(hw->wiphy, freq); 6365 6366 if (ar->scan.is_roc && ar->scan.roc_freq == freq) 6367 complete(&ar->scan.on_channel); 6368 6369 break; 6370 } 6371 } 6372 6373 static const char * 6374 ath12k_wmi_event_scan_type_str(enum wmi_scan_event_type type, 6375 enum wmi_scan_completion_reason reason) 6376 { 6377 switch (type) { 6378 case WMI_SCAN_EVENT_STARTED: 6379 return "started"; 6380 case WMI_SCAN_EVENT_COMPLETED: 6381 switch (reason) { 6382 case WMI_SCAN_REASON_COMPLETED: 6383 return "completed"; 6384 case WMI_SCAN_REASON_CANCELLED: 6385 return "completed [cancelled]"; 6386 case WMI_SCAN_REASON_PREEMPTED: 6387 return "completed [preempted]"; 6388 case WMI_SCAN_REASON_TIMEDOUT: 6389 return "completed [timedout]"; 6390 case WMI_SCAN_REASON_INTERNAL_FAILURE: 6391 return "completed [internal err]"; 6392 case WMI_SCAN_REASON_MAX: 6393 break; 6394 } 6395 return "completed [unknown]"; 6396 case WMI_SCAN_EVENT_BSS_CHANNEL: 6397 return "bss channel"; 6398 case WMI_SCAN_EVENT_FOREIGN_CHAN: 6399 return "foreign channel"; 6400 case WMI_SCAN_EVENT_DEQUEUED: 6401 return "dequeued"; 6402 case WMI_SCAN_EVENT_PREEMPTED: 6403 return "preempted"; 6404 case WMI_SCAN_EVENT_START_FAILED: 6405 return "start failed"; 6406 case WMI_SCAN_EVENT_RESTARTED: 6407 return "restarted"; 6408 case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT: 6409 return "foreign channel exit"; 6410 default: 6411 return "unknown"; 6412 } 6413 } 6414 6415 static int ath12k_pull_scan_ev(struct ath12k_base *ab, struct sk_buff *skb, 6416 struct wmi_scan_event *scan_evt_param) 6417 { 6418 const void **tb; 6419 const struct wmi_scan_event *ev; 6420 int ret; 6421 6422 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6423 if (IS_ERR(tb)) { 6424 ret = PTR_ERR(tb); 6425 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6426 return ret; 6427 } 6428 6429 ev = tb[WMI_TAG_SCAN_EVENT]; 6430 if (!ev) { 6431 ath12k_warn(ab, "failed to fetch scan ev"); 6432 kfree(tb); 6433 return -EPROTO; 6434 } 6435 6436 scan_evt_param->event_type = ev->event_type; 6437 scan_evt_param->reason = ev->reason; 6438 scan_evt_param->channel_freq = ev->channel_freq; 6439 scan_evt_param->scan_req_id = ev->scan_req_id; 6440 scan_evt_param->scan_id = ev->scan_id; 6441 scan_evt_param->vdev_id = ev->vdev_id; 6442 scan_evt_param->tsf_timestamp = ev->tsf_timestamp; 6443 6444 kfree(tb); 6445 return 0; 6446 } 6447 6448 static int ath12k_pull_peer_sta_kickout_ev(struct ath12k_base *ab, struct sk_buff *skb, 6449 struct wmi_peer_sta_kickout_arg *arg) 6450 { 6451 const void **tb; 6452 const struct wmi_peer_sta_kickout_event *ev; 6453 int ret; 6454 6455 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6456 if (IS_ERR(tb)) { 6457 ret = PTR_ERR(tb); 6458 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6459 return ret; 6460 } 6461 6462 ev = tb[WMI_TAG_PEER_STA_KICKOUT_EVENT]; 6463 if (!ev) { 6464 ath12k_warn(ab, "failed to fetch peer sta kickout ev"); 6465 kfree(tb); 6466 return -EPROTO; 6467 } 6468 6469 arg->mac_addr = ev->peer_macaddr.addr; 6470 arg->reason = le32_to_cpu(ev->reason); 6471 arg->rssi = le32_to_cpu(ev->rssi); 6472 6473 kfree(tb); 6474 return 0; 6475 } 6476 6477 static int ath12k_pull_roam_ev(struct ath12k_base *ab, struct sk_buff *skb, 6478 struct wmi_roam_event *roam_ev) 6479 { 6480 const void **tb; 6481 const struct wmi_roam_event *ev; 6482 int ret; 6483 6484 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6485 if (IS_ERR(tb)) { 6486 ret = PTR_ERR(tb); 6487 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6488 return ret; 6489 } 6490 6491 ev = tb[WMI_TAG_ROAM_EVENT]; 6492 if (!ev) { 6493 ath12k_warn(ab, "failed to fetch roam ev"); 6494 kfree(tb); 6495 return -EPROTO; 6496 } 6497 6498 roam_ev->vdev_id = ev->vdev_id; 6499 roam_ev->reason = ev->reason; 6500 roam_ev->rssi = ev->rssi; 6501 6502 kfree(tb); 6503 return 0; 6504 } 6505 6506 static int freq_to_idx(struct ath12k *ar, int freq) 6507 { 6508 struct ieee80211_supported_band *sband; 6509 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 6510 int band, ch, idx = 0; 6511 6512 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 6513 if (!ar->mac.sbands[band].channels) 6514 continue; 6515 6516 sband = hw->wiphy->bands[band]; 6517 if (!sband) 6518 continue; 6519 6520 for (ch = 0; ch < sband->n_channels; ch++, idx++) { 6521 if (sband->channels[ch].center_freq < 6522 KHZ_TO_MHZ(ar->freq_range.start_freq) || 6523 sband->channels[ch].center_freq > 6524 KHZ_TO_MHZ(ar->freq_range.end_freq)) 6525 continue; 6526 6527 if (sband->channels[ch].center_freq == freq) 6528 goto exit; 6529 } 6530 } 6531 6532 exit: 6533 return idx; 6534 } 6535 6536 static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb, 6537 struct wmi_chan_info_event *ch_info_ev) 6538 { 6539 const void **tb; 6540 const struct wmi_chan_info_event *ev; 6541 int ret; 6542 6543 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6544 if (IS_ERR(tb)) { 6545 ret = PTR_ERR(tb); 6546 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6547 return ret; 6548 } 6549 6550 ev = tb[WMI_TAG_CHAN_INFO_EVENT]; 6551 if (!ev) { 6552 ath12k_warn(ab, "failed to fetch chan info ev"); 6553 kfree(tb); 6554 return -EPROTO; 6555 } 6556 6557 ch_info_ev->err_code = ev->err_code; 6558 ch_info_ev->freq = ev->freq; 6559 ch_info_ev->cmd_flags = ev->cmd_flags; 6560 ch_info_ev->noise_floor = ev->noise_floor; 6561 ch_info_ev->rx_clear_count = ev->rx_clear_count; 6562 ch_info_ev->cycle_count = ev->cycle_count; 6563 ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range; 6564 ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp; 6565 ch_info_ev->rx_frame_count = ev->rx_frame_count; 6566 ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt; 6567 ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz; 6568 ch_info_ev->vdev_id = ev->vdev_id; 6569 6570 kfree(tb); 6571 return 0; 6572 } 6573 6574 static int 6575 ath12k_pull_pdev_bss_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb, 6576 struct wmi_pdev_bss_chan_info_event *bss_ch_info_ev) 6577 { 6578 const void **tb; 6579 const struct wmi_pdev_bss_chan_info_event *ev; 6580 int ret; 6581 6582 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6583 if (IS_ERR(tb)) { 6584 ret = PTR_ERR(tb); 6585 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6586 return ret; 6587 } 6588 6589 ev = tb[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT]; 6590 if (!ev) { 6591 ath12k_warn(ab, "failed to fetch pdev bss chan info ev"); 6592 kfree(tb); 6593 return -EPROTO; 6594 } 6595 6596 bss_ch_info_ev->pdev_id = ev->pdev_id; 6597 bss_ch_info_ev->freq = ev->freq; 6598 bss_ch_info_ev->noise_floor = ev->noise_floor; 6599 bss_ch_info_ev->rx_clear_count_low = ev->rx_clear_count_low; 6600 bss_ch_info_ev->rx_clear_count_high = ev->rx_clear_count_high; 6601 bss_ch_info_ev->cycle_count_low = ev->cycle_count_low; 6602 bss_ch_info_ev->cycle_count_high = ev->cycle_count_high; 6603 bss_ch_info_ev->tx_cycle_count_low = ev->tx_cycle_count_low; 6604 bss_ch_info_ev->tx_cycle_count_high = ev->tx_cycle_count_high; 6605 bss_ch_info_ev->rx_cycle_count_low = ev->rx_cycle_count_low; 6606 bss_ch_info_ev->rx_cycle_count_high = ev->rx_cycle_count_high; 6607 bss_ch_info_ev->rx_bss_cycle_count_low = ev->rx_bss_cycle_count_low; 6608 bss_ch_info_ev->rx_bss_cycle_count_high = ev->rx_bss_cycle_count_high; 6609 6610 kfree(tb); 6611 return 0; 6612 } 6613 6614 static int 6615 ath12k_pull_vdev_install_key_compl_ev(struct ath12k_base *ab, struct sk_buff *skb, 6616 struct wmi_vdev_install_key_complete_arg *arg) 6617 { 6618 const void **tb; 6619 const struct wmi_vdev_install_key_compl_event *ev; 6620 int ret; 6621 6622 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6623 if (IS_ERR(tb)) { 6624 ret = PTR_ERR(tb); 6625 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6626 return ret; 6627 } 6628 6629 ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT]; 6630 if (!ev) { 6631 ath12k_warn(ab, "failed to fetch vdev install key compl ev"); 6632 kfree(tb); 6633 return -EPROTO; 6634 } 6635 6636 arg->vdev_id = le32_to_cpu(ev->vdev_id); 6637 arg->macaddr = ev->peer_macaddr.addr; 6638 arg->key_idx = le32_to_cpu(ev->key_idx); 6639 arg->key_flags = le32_to_cpu(ev->key_flags); 6640 arg->status = le32_to_cpu(ev->status); 6641 6642 kfree(tb); 6643 return 0; 6644 } 6645 6646 static int ath12k_pull_peer_assoc_conf_ev(struct ath12k_base *ab, struct sk_buff *skb, 6647 struct wmi_peer_assoc_conf_arg *peer_assoc_conf) 6648 { 6649 const void **tb; 6650 const struct wmi_peer_assoc_conf_event *ev; 6651 int ret; 6652 6653 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6654 if (IS_ERR(tb)) { 6655 ret = PTR_ERR(tb); 6656 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6657 return ret; 6658 } 6659 6660 ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT]; 6661 if (!ev) { 6662 ath12k_warn(ab, "failed to fetch peer assoc conf ev"); 6663 kfree(tb); 6664 return -EPROTO; 6665 } 6666 6667 peer_assoc_conf->vdev_id = le32_to_cpu(ev->vdev_id); 6668 peer_assoc_conf->macaddr = ev->peer_macaddr.addr; 6669 6670 kfree(tb); 6671 return 0; 6672 } 6673 6674 static int 6675 ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, struct sk_buff *skb, 6676 const struct wmi_pdev_temperature_event *ev) 6677 { 6678 const void **tb; 6679 int ret; 6680 6681 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6682 if (IS_ERR(tb)) { 6683 ret = PTR_ERR(tb); 6684 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6685 return ret; 6686 } 6687 6688 ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT]; 6689 if (!ev) { 6690 ath12k_warn(ab, "failed to fetch pdev temp ev"); 6691 kfree(tb); 6692 return -EPROTO; 6693 } 6694 6695 kfree(tb); 6696 return 0; 6697 } 6698 6699 static void ath12k_wmi_op_ep_tx_credits(struct ath12k_base *ab) 6700 { 6701 /* try to send pending beacons first. they take priority */ 6702 wake_up(&ab->wmi_ab.tx_credits_wq); 6703 } 6704 6705 static int ath12k_reg_11d_new_cc_event(struct ath12k_base *ab, struct sk_buff *skb) 6706 { 6707 const struct wmi_11d_new_cc_event *ev; 6708 struct ath12k *ar; 6709 struct ath12k_pdev *pdev; 6710 const void **tb; 6711 int ret, i; 6712 6713 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6714 if (IS_ERR(tb)) { 6715 ret = PTR_ERR(tb); 6716 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6717 return ret; 6718 } 6719 6720 ev = tb[WMI_TAG_11D_NEW_COUNTRY_EVENT]; 6721 if (!ev) { 6722 kfree(tb); 6723 ath12k_warn(ab, "failed to fetch 11d new cc ev"); 6724 return -EPROTO; 6725 } 6726 6727 spin_lock_bh(&ab->base_lock); 6728 memcpy(&ab->new_alpha2, &ev->new_alpha2, REG_ALPHA2_LEN); 6729 spin_unlock_bh(&ab->base_lock); 6730 6731 ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi 11d new cc %c%c\n", 6732 ab->new_alpha2[0], 6733 ab->new_alpha2[1]); 6734 6735 kfree(tb); 6736 6737 for (i = 0; i < ab->num_radios; i++) { 6738 pdev = &ab->pdevs[i]; 6739 ar = pdev->ar; 6740 ar->state_11d = ATH12K_11D_IDLE; 6741 ar->ah->regd_updated = false; 6742 complete(&ar->completed_11d_scan); 6743 } 6744 6745 queue_work(ab->workqueue, &ab->update_11d_work); 6746 6747 return 0; 6748 } 6749 6750 static void ath12k_wmi_htc_tx_complete(struct ath12k_base *ab, 6751 struct sk_buff *skb) 6752 { 6753 dev_kfree_skb(skb); 6754 } 6755 6756 static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *skb) 6757 { 6758 struct ath12k_reg_info *reg_info; 6759 struct ath12k *ar = NULL; 6760 u8 pdev_idx = 255; 6761 int ret; 6762 6763 reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC); 6764 if (!reg_info) { 6765 ret = -ENOMEM; 6766 goto fallback; 6767 } 6768 6769 ret = ath12k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info); 6770 if (ret) { 6771 ath12k_warn(ab, "failed to extract regulatory info from received event\n"); 6772 goto mem_free; 6773 } 6774 6775 ret = ath12k_reg_validate_reg_info(ab, reg_info); 6776 if (ret == ATH12K_REG_STATUS_FALLBACK) { 6777 ath12k_warn(ab, "failed to validate reg info %d\n", ret); 6778 /* firmware has successfully switches to new regd but host can not 6779 * continue, so free reginfo and fallback to old regd 6780 */ 6781 goto mem_free; 6782 } else if (ret == ATH12K_REG_STATUS_DROP) { 6783 /* reg info is valid but we will not store it and 6784 * not going to create new regd for it 6785 */ 6786 ret = ATH12K_REG_STATUS_VALID; 6787 goto mem_free; 6788 } 6789 6790 /* free old reg_info if it exist */ 6791 pdev_idx = reg_info->phy_id; 6792 if (ab->reg_info[pdev_idx]) { 6793 ath12k_reg_reset_reg_info(ab->reg_info[pdev_idx]); 6794 kfree(ab->reg_info[pdev_idx]); 6795 } 6796 /* reg_info is valid, we store it for later use 6797 * even below regd build failed 6798 */ 6799 ab->reg_info[pdev_idx] = reg_info; 6800 6801 ret = ath12k_reg_handle_chan_list(ab, reg_info, WMI_VDEV_TYPE_UNSPEC, 6802 IEEE80211_REG_UNSET_AP); 6803 if (ret) { 6804 ath12k_warn(ab, "failed to handle chan list %d\n", ret); 6805 goto fallback; 6806 } 6807 6808 goto out; 6809 6810 mem_free: 6811 ath12k_reg_reset_reg_info(reg_info); 6812 kfree(reg_info); 6813 6814 if (ret == ATH12K_REG_STATUS_VALID) 6815 goto out; 6816 6817 fallback: 6818 /* Fallback to older reg (by sending previous country setting 6819 * again if fw has succeeded and we failed to process here. 6820 * The Regdomain should be uniform across driver and fw. Since the 6821 * FW has processed the command and sent a success status, we expect 6822 * this function to succeed as well. If it doesn't, CTRY needs to be 6823 * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent. 6824 */ 6825 /* TODO: This is rare, but still should also be handled */ 6826 WARN_ON(1); 6827 6828 out: 6829 /* In some error cases, even a valid pdev_idx might not be available */ 6830 if (pdev_idx != 255) 6831 ar = ab->pdevs[pdev_idx].ar; 6832 6833 /* During the boot-time update, 'ar' might not be allocated, 6834 * so the completion cannot be marked at that point. 6835 * This boot-time update is handled in ath12k_mac_hw_register() 6836 * before registering the hardware. 6837 */ 6838 if (ar) 6839 complete_all(&ar->regd_update_completed); 6840 6841 return ret; 6842 } 6843 6844 static int ath12k_wmi_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len, 6845 const void *ptr, void *data) 6846 { 6847 struct ath12k_wmi_rdy_parse *rdy_parse = data; 6848 struct wmi_ready_event fixed_param; 6849 struct ath12k_wmi_mac_addr_params *addr_list; 6850 struct ath12k_pdev *pdev; 6851 u32 num_mac_addr; 6852 int i; 6853 6854 switch (tag) { 6855 case WMI_TAG_READY_EVENT: 6856 memset(&fixed_param, 0, sizeof(fixed_param)); 6857 memcpy(&fixed_param, (struct wmi_ready_event *)ptr, 6858 min_t(u16, sizeof(fixed_param), len)); 6859 ab->wlan_init_status = le32_to_cpu(fixed_param.ready_event_min.status); 6860 rdy_parse->num_extra_mac_addr = 6861 le32_to_cpu(fixed_param.ready_event_min.num_extra_mac_addr); 6862 6863 ether_addr_copy(ab->mac_addr, 6864 fixed_param.ready_event_min.mac_addr.addr); 6865 ab->pktlog_defs_checksum = le32_to_cpu(fixed_param.pktlog_defs_checksum); 6866 ab->wmi_ready = true; 6867 break; 6868 case WMI_TAG_ARRAY_FIXED_STRUCT: 6869 addr_list = (struct ath12k_wmi_mac_addr_params *)ptr; 6870 num_mac_addr = rdy_parse->num_extra_mac_addr; 6871 6872 if (!(ab->num_radios > 1 && num_mac_addr >= ab->num_radios)) 6873 break; 6874 6875 for (i = 0; i < ab->num_radios; i++) { 6876 pdev = &ab->pdevs[i]; 6877 ether_addr_copy(pdev->mac_addr, addr_list[i].addr); 6878 } 6879 ab->pdevs_macaddr_valid = true; 6880 break; 6881 default: 6882 break; 6883 } 6884 6885 return 0; 6886 } 6887 6888 static int ath12k_ready_event(struct ath12k_base *ab, struct sk_buff *skb) 6889 { 6890 struct ath12k_wmi_rdy_parse rdy_parse = { }; 6891 int ret; 6892 6893 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 6894 ath12k_wmi_rdy_parse, &rdy_parse); 6895 if (ret) { 6896 ath12k_warn(ab, "failed to parse tlv %d\n", ret); 6897 return ret; 6898 } 6899 6900 complete(&ab->wmi_ab.unified_ready); 6901 return 0; 6902 } 6903 6904 static void ath12k_peer_delete_resp_event(struct ath12k_base *ab, struct sk_buff *skb) 6905 { 6906 struct wmi_peer_delete_resp_event peer_del_resp; 6907 struct ath12k *ar; 6908 6909 if (ath12k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) { 6910 ath12k_warn(ab, "failed to extract peer delete resp"); 6911 return; 6912 } 6913 6914 rcu_read_lock(); 6915 ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(peer_del_resp.vdev_id)); 6916 if (!ar) { 6917 ath12k_warn(ab, "invalid vdev id in peer delete resp ev %d", 6918 peer_del_resp.vdev_id); 6919 rcu_read_unlock(); 6920 return; 6921 } 6922 6923 complete(&ar->peer_delete_done); 6924 rcu_read_unlock(); 6925 ath12k_dbg(ab, ATH12K_DBG_WMI, "peer delete resp for vdev id %d addr %pM\n", 6926 peer_del_resp.vdev_id, peer_del_resp.peer_macaddr.addr); 6927 } 6928 6929 static void ath12k_vdev_delete_resp_event(struct ath12k_base *ab, 6930 struct sk_buff *skb) 6931 { 6932 struct ath12k *ar; 6933 u32 vdev_id = 0; 6934 6935 if (ath12k_pull_vdev_del_resp_ev(ab, skb, &vdev_id) != 0) { 6936 ath12k_warn(ab, "failed to extract vdev delete resp"); 6937 return; 6938 } 6939 6940 rcu_read_lock(); 6941 ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id); 6942 if (!ar) { 6943 ath12k_warn(ab, "invalid vdev id in vdev delete resp ev %d", 6944 vdev_id); 6945 rcu_read_unlock(); 6946 return; 6947 } 6948 6949 complete(&ar->vdev_delete_done); 6950 6951 rcu_read_unlock(); 6952 6953 ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev delete resp for vdev id %d\n", 6954 vdev_id); 6955 } 6956 6957 static const char *ath12k_wmi_vdev_resp_print(u32 vdev_resp_status) 6958 { 6959 switch (vdev_resp_status) { 6960 case WMI_VDEV_START_RESPONSE_INVALID_VDEVID: 6961 return "invalid vdev id"; 6962 case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED: 6963 return "not supported"; 6964 case WMI_VDEV_START_RESPONSE_DFS_VIOLATION: 6965 return "dfs violation"; 6966 case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN: 6967 return "invalid regdomain"; 6968 default: 6969 return "unknown"; 6970 } 6971 } 6972 6973 static void ath12k_vdev_start_resp_event(struct ath12k_base *ab, struct sk_buff *skb) 6974 { 6975 struct wmi_vdev_start_resp_event vdev_start_resp; 6976 struct ath12k *ar; 6977 u32 status; 6978 6979 if (ath12k_pull_vdev_start_resp_tlv(ab, skb, &vdev_start_resp) != 0) { 6980 ath12k_warn(ab, "failed to extract vdev start resp"); 6981 return; 6982 } 6983 6984 rcu_read_lock(); 6985 ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(vdev_start_resp.vdev_id)); 6986 if (!ar) { 6987 ath12k_warn(ab, "invalid vdev id in vdev start resp ev %d", 6988 vdev_start_resp.vdev_id); 6989 rcu_read_unlock(); 6990 return; 6991 } 6992 6993 ar->last_wmi_vdev_start_status = 0; 6994 6995 status = le32_to_cpu(vdev_start_resp.status); 6996 if (WARN_ON_ONCE(status)) { 6997 ath12k_warn(ab, "vdev start resp error status %d (%s)\n", 6998 status, ath12k_wmi_vdev_resp_print(status)); 6999 ar->last_wmi_vdev_start_status = status; 7000 } 7001 7002 ar->max_allowed_tx_power = (s8)le32_to_cpu(vdev_start_resp.max_allowed_tx_power); 7003 7004 complete(&ar->vdev_setup_done); 7005 7006 rcu_read_unlock(); 7007 7008 ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev start resp for vdev id %d", 7009 vdev_start_resp.vdev_id); 7010 } 7011 7012 static void ath12k_bcn_tx_status_event(struct ath12k_base *ab, struct sk_buff *skb) 7013 { 7014 u32 vdev_id, tx_status; 7015 7016 if (ath12k_pull_bcn_tx_status_ev(ab, skb, &vdev_id, &tx_status) != 0) { 7017 ath12k_warn(ab, "failed to extract bcn tx status"); 7018 return; 7019 } 7020 } 7021 7022 static void ath12k_vdev_stopped_event(struct ath12k_base *ab, struct sk_buff *skb) 7023 { 7024 struct ath12k *ar; 7025 u32 vdev_id = 0; 7026 7027 if (ath12k_pull_vdev_stopped_param_tlv(ab, skb, &vdev_id) != 0) { 7028 ath12k_warn(ab, "failed to extract vdev stopped event"); 7029 return; 7030 } 7031 7032 rcu_read_lock(); 7033 ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id); 7034 if (!ar) { 7035 ath12k_warn(ab, "invalid vdev id in vdev stopped ev %d", 7036 vdev_id); 7037 rcu_read_unlock(); 7038 return; 7039 } 7040 7041 complete(&ar->vdev_setup_done); 7042 7043 rcu_read_unlock(); 7044 7045 ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev stopped for vdev id %d", vdev_id); 7046 } 7047 7048 static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb) 7049 { 7050 struct ath12k_wmi_mgmt_rx_arg rx_ev = {}; 7051 struct ath12k *ar; 7052 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 7053 struct ieee80211_hdr *hdr; 7054 u16 fc; 7055 struct ieee80211_supported_band *sband; 7056 s32 noise_floor; 7057 7058 if (ath12k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) { 7059 ath12k_warn(ab, "failed to extract mgmt rx event"); 7060 dev_kfree_skb(skb); 7061 return; 7062 } 7063 7064 memset(status, 0, sizeof(*status)); 7065 7066 ath12k_dbg(ab, ATH12K_DBG_MGMT, "mgmt rx event status %08x\n", 7067 rx_ev.status); 7068 7069 rcu_read_lock(); 7070 ar = ath12k_mac_get_ar_by_pdev_id(ab, rx_ev.pdev_id); 7071 7072 if (!ar) { 7073 ath12k_warn(ab, "invalid pdev_id %d in mgmt_rx_event\n", 7074 rx_ev.pdev_id); 7075 dev_kfree_skb(skb); 7076 goto exit; 7077 } 7078 7079 if ((test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) || 7080 (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT | 7081 WMI_RX_STATUS_ERR_KEY_CACHE_MISS | 7082 WMI_RX_STATUS_ERR_CRC))) { 7083 dev_kfree_skb(skb); 7084 goto exit; 7085 } 7086 7087 if (rx_ev.status & WMI_RX_STATUS_ERR_MIC) 7088 status->flag |= RX_FLAG_MMIC_ERROR; 7089 7090 if (rx_ev.chan_freq >= ATH12K_MIN_6GHZ_FREQ && 7091 rx_ev.chan_freq <= ATH12K_MAX_6GHZ_FREQ) { 7092 status->band = NL80211_BAND_6GHZ; 7093 status->freq = rx_ev.chan_freq; 7094 } else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) { 7095 status->band = NL80211_BAND_2GHZ; 7096 } else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5GHZ_CHAN) { 7097 status->band = NL80211_BAND_5GHZ; 7098 } else { 7099 /* Shouldn't happen unless list of advertised channels to 7100 * mac80211 has been changed. 7101 */ 7102 WARN_ON_ONCE(1); 7103 dev_kfree_skb(skb); 7104 goto exit; 7105 } 7106 7107 if (rx_ev.phy_mode == MODE_11B && 7108 (status->band == NL80211_BAND_5GHZ || status->band == NL80211_BAND_6GHZ)) 7109 ath12k_dbg(ab, ATH12K_DBG_WMI, 7110 "wmi mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band); 7111 7112 sband = &ar->mac.sbands[status->band]; 7113 7114 if (status->band != NL80211_BAND_6GHZ) 7115 status->freq = ieee80211_channel_to_frequency(rx_ev.channel, 7116 status->band); 7117 7118 spin_lock_bh(&ar->data_lock); 7119 noise_floor = ath12k_pdev_get_noise_floor(ar); 7120 spin_unlock_bh(&ar->data_lock); 7121 7122 status->signal = rx_ev.snr + noise_floor; 7123 status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev.rate / 100); 7124 7125 hdr = (struct ieee80211_hdr *)skb->data; 7126 fc = le16_to_cpu(hdr->frame_control); 7127 7128 /* Firmware is guaranteed to report all essential management frames via 7129 * WMI while it can deliver some extra via HTT. Since there can be 7130 * duplicates split the reporting wrt monitor/sniffing. 7131 */ 7132 status->flag |= RX_FLAG_SKIP_MONITOR; 7133 7134 /* In case of PMF, FW delivers decrypted frames with Protected Bit set 7135 * including group privacy action frames. 7136 */ 7137 if (ieee80211_has_protected(hdr->frame_control)) { 7138 status->flag |= RX_FLAG_DECRYPTED; 7139 7140 if (!ieee80211_is_robust_mgmt_frame(skb)) { 7141 status->flag |= RX_FLAG_IV_STRIPPED | 7142 RX_FLAG_MMIC_STRIPPED; 7143 hdr->frame_control = __cpu_to_le16(fc & 7144 ~IEEE80211_FCTL_PROTECTED); 7145 } 7146 } 7147 7148 if (ieee80211_is_beacon(hdr->frame_control)) 7149 ath12k_mac_handle_beacon(ar, skb); 7150 7151 ath12k_dbg(ab, ATH12K_DBG_MGMT, 7152 "event mgmt rx skb %p len %d ftype %02x stype %02x\n", 7153 skb, skb->len, 7154 fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE); 7155 7156 ath12k_dbg(ab, ATH12K_DBG_MGMT, 7157 "event mgmt rx freq %d band %d snr %d, rate_idx %d\n", 7158 status->freq, status->band, status->signal, 7159 status->rate_idx); 7160 7161 ieee80211_rx_ni(ath12k_ar_to_hw(ar), skb); 7162 7163 exit: 7164 rcu_read_unlock(); 7165 } 7166 7167 static void ath12k_mgmt_tx_compl_event(struct ath12k_base *ab, struct sk_buff *skb) 7168 { 7169 struct wmi_mgmt_tx_compl_event tx_compl_param = {}; 7170 struct ath12k *ar; 7171 7172 if (ath12k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) { 7173 ath12k_warn(ab, "failed to extract mgmt tx compl event"); 7174 return; 7175 } 7176 7177 rcu_read_lock(); 7178 ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(tx_compl_param.pdev_id)); 7179 if (!ar) { 7180 ath12k_warn(ab, "invalid pdev id %d in mgmt_tx_compl_event\n", 7181 tx_compl_param.pdev_id); 7182 goto exit; 7183 } 7184 7185 wmi_process_mgmt_tx_comp(ar, le32_to_cpu(tx_compl_param.desc_id), 7186 le32_to_cpu(tx_compl_param.status), 7187 le32_to_cpu(tx_compl_param.ack_rssi)); 7188 7189 ath12k_dbg(ab, ATH12K_DBG_MGMT, 7190 "mgmt tx compl ev pdev_id %d, desc_id %d, status %d", 7191 tx_compl_param.pdev_id, tx_compl_param.desc_id, 7192 tx_compl_param.status); 7193 7194 exit: 7195 rcu_read_unlock(); 7196 } 7197 7198 static struct ath12k *ath12k_get_ar_on_scan_state(struct ath12k_base *ab, 7199 u32 vdev_id, 7200 enum ath12k_scan_state state) 7201 { 7202 int i; 7203 struct ath12k_pdev *pdev; 7204 struct ath12k *ar; 7205 7206 for (i = 0; i < ab->num_radios; i++) { 7207 pdev = rcu_dereference(ab->pdevs_active[i]); 7208 if (pdev && pdev->ar) { 7209 ar = pdev->ar; 7210 7211 spin_lock_bh(&ar->data_lock); 7212 if (ar->scan.state == state && 7213 ar->scan.arvif && 7214 ar->scan.arvif->vdev_id == vdev_id) { 7215 spin_unlock_bh(&ar->data_lock); 7216 return ar; 7217 } 7218 spin_unlock_bh(&ar->data_lock); 7219 } 7220 } 7221 return NULL; 7222 } 7223 7224 static void ath12k_scan_event(struct ath12k_base *ab, struct sk_buff *skb) 7225 { 7226 struct ath12k *ar; 7227 struct wmi_scan_event scan_ev = {}; 7228 7229 if (ath12k_pull_scan_ev(ab, skb, &scan_ev) != 0) { 7230 ath12k_warn(ab, "failed to extract scan event"); 7231 return; 7232 } 7233 7234 rcu_read_lock(); 7235 7236 /* In case the scan was cancelled, ex. during interface teardown, 7237 * the interface will not be found in active interfaces. 7238 * Rather, in such scenarios, iterate over the active pdev's to 7239 * search 'ar' if the corresponding 'ar' scan is ABORTING and the 7240 * aborting scan's vdev id matches this event info. 7241 */ 7242 if (le32_to_cpu(scan_ev.event_type) == WMI_SCAN_EVENT_COMPLETED && 7243 le32_to_cpu(scan_ev.reason) == WMI_SCAN_REASON_CANCELLED) { 7244 ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id), 7245 ATH12K_SCAN_ABORTING); 7246 if (!ar) 7247 ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id), 7248 ATH12K_SCAN_RUNNING); 7249 } else { 7250 ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(scan_ev.vdev_id)); 7251 } 7252 7253 if (!ar) { 7254 ath12k_warn(ab, "Received scan event for unknown vdev"); 7255 rcu_read_unlock(); 7256 return; 7257 } 7258 7259 spin_lock_bh(&ar->data_lock); 7260 7261 ath12k_dbg(ab, ATH12K_DBG_WMI, 7262 "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n", 7263 ath12k_wmi_event_scan_type_str(le32_to_cpu(scan_ev.event_type), 7264 le32_to_cpu(scan_ev.reason)), 7265 le32_to_cpu(scan_ev.event_type), 7266 le32_to_cpu(scan_ev.reason), 7267 le32_to_cpu(scan_ev.channel_freq), 7268 le32_to_cpu(scan_ev.scan_req_id), 7269 le32_to_cpu(scan_ev.scan_id), 7270 le32_to_cpu(scan_ev.vdev_id), 7271 ath12k_scan_state_str(ar->scan.state), ar->scan.state); 7272 7273 switch (le32_to_cpu(scan_ev.event_type)) { 7274 case WMI_SCAN_EVENT_STARTED: 7275 ath12k_wmi_event_scan_started(ar); 7276 break; 7277 case WMI_SCAN_EVENT_COMPLETED: 7278 ath12k_wmi_event_scan_completed(ar); 7279 break; 7280 case WMI_SCAN_EVENT_BSS_CHANNEL: 7281 ath12k_wmi_event_scan_bss_chan(ar); 7282 break; 7283 case WMI_SCAN_EVENT_FOREIGN_CHAN: 7284 ath12k_wmi_event_scan_foreign_chan(ar, le32_to_cpu(scan_ev.channel_freq)); 7285 break; 7286 case WMI_SCAN_EVENT_START_FAILED: 7287 ath12k_warn(ab, "received scan start failure event\n"); 7288 ath12k_wmi_event_scan_start_failed(ar); 7289 break; 7290 case WMI_SCAN_EVENT_DEQUEUED: 7291 __ath12k_mac_scan_finish(ar); 7292 break; 7293 case WMI_SCAN_EVENT_PREEMPTED: 7294 case WMI_SCAN_EVENT_RESTARTED: 7295 case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT: 7296 default: 7297 break; 7298 } 7299 7300 spin_unlock_bh(&ar->data_lock); 7301 7302 rcu_read_unlock(); 7303 } 7304 7305 static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff *skb) 7306 { 7307 struct wmi_peer_sta_kickout_arg arg = {}; 7308 struct ath12k_link_vif *arvif; 7309 struct ieee80211_sta *sta; 7310 struct ath12k_peer *peer; 7311 unsigned int link_id; 7312 struct ath12k *ar; 7313 7314 if (ath12k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) { 7315 ath12k_warn(ab, "failed to extract peer sta kickout event"); 7316 return; 7317 } 7318 7319 rcu_read_lock(); 7320 7321 spin_lock_bh(&ab->base_lock); 7322 7323 peer = ath12k_peer_find_by_addr(ab, arg.mac_addr); 7324 7325 if (!peer) { 7326 ath12k_warn(ab, "peer not found %pM\n", 7327 arg.mac_addr); 7328 goto exit; 7329 } 7330 7331 arvif = ath12k_mac_get_arvif_by_vdev_id(ab, peer->vdev_id); 7332 if (!arvif) { 7333 ath12k_warn(ab, "invalid vdev id in peer sta kickout ev %d", 7334 peer->vdev_id); 7335 goto exit; 7336 } 7337 7338 ar = arvif->ar; 7339 7340 if (peer->mlo) { 7341 sta = ieee80211_find_sta_by_link_addrs(ath12k_ar_to_hw(ar), 7342 arg.mac_addr, 7343 NULL, &link_id); 7344 if (peer->link_id != link_id) { 7345 ath12k_warn(ab, 7346 "Spurious quick kickout for MLO STA %pM with invalid link_id, peer: %d, sta: %d\n", 7347 arg.mac_addr, peer->link_id, link_id); 7348 goto exit; 7349 } 7350 } else { 7351 sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar), 7352 arg.mac_addr, NULL); 7353 } 7354 if (!sta) { 7355 ath12k_warn(ab, "Spurious quick kickout for %sSTA %pM\n", 7356 peer->mlo ? "MLO " : "", arg.mac_addr); 7357 goto exit; 7358 } 7359 7360 ath12k_dbg(ab, ATH12K_DBG_WMI, 7361 "peer sta kickout event %pM reason: %d rssi: %d\n", 7362 arg.mac_addr, arg.reason, arg.rssi); 7363 7364 switch (arg.reason) { 7365 case WMI_PEER_STA_KICKOUT_REASON_INACTIVITY: 7366 if (arvif->ahvif->vif->type == NL80211_IFTYPE_STATION) { 7367 ath12k_mac_handle_beacon_miss(ar, arvif); 7368 break; 7369 } 7370 fallthrough; 7371 default: 7372 ieee80211_report_low_ack(sta, 10); 7373 } 7374 7375 exit: 7376 spin_unlock_bh(&ab->base_lock); 7377 rcu_read_unlock(); 7378 } 7379 7380 static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb) 7381 { 7382 struct ath12k_link_vif *arvif; 7383 struct wmi_roam_event roam_ev = {}; 7384 struct ath12k *ar; 7385 u32 vdev_id; 7386 u8 roam_reason; 7387 7388 if (ath12k_pull_roam_ev(ab, skb, &roam_ev) != 0) { 7389 ath12k_warn(ab, "failed to extract roam event"); 7390 return; 7391 } 7392 7393 vdev_id = le32_to_cpu(roam_ev.vdev_id); 7394 roam_reason = u32_get_bits(le32_to_cpu(roam_ev.reason), 7395 WMI_ROAM_REASON_MASK); 7396 7397 ath12k_dbg(ab, ATH12K_DBG_WMI, 7398 "wmi roam event vdev %u reason %d rssi %d\n", 7399 vdev_id, roam_reason, roam_ev.rssi); 7400 7401 guard(rcu)(); 7402 arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_id); 7403 if (!arvif) { 7404 ath12k_warn(ab, "invalid vdev id in roam ev %d", vdev_id); 7405 return; 7406 } 7407 7408 ar = arvif->ar; 7409 7410 if (roam_reason >= WMI_ROAM_REASON_MAX) 7411 ath12k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n", 7412 roam_reason, vdev_id); 7413 7414 switch (roam_reason) { 7415 case WMI_ROAM_REASON_BEACON_MISS: 7416 ath12k_mac_handle_beacon_miss(ar, arvif); 7417 break; 7418 case WMI_ROAM_REASON_BETTER_AP: 7419 case WMI_ROAM_REASON_LOW_RSSI: 7420 case WMI_ROAM_REASON_SUITABLE_AP_FOUND: 7421 case WMI_ROAM_REASON_HO_FAILED: 7422 ath12k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n", 7423 roam_reason, vdev_id); 7424 break; 7425 } 7426 } 7427 7428 static void ath12k_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb) 7429 { 7430 struct wmi_chan_info_event ch_info_ev = {}; 7431 struct ath12k *ar; 7432 struct survey_info *survey; 7433 int idx; 7434 /* HW channel counters frequency value in hertz */ 7435 u32 cc_freq_hz = ab->cc_freq_hz; 7436 7437 if (ath12k_pull_chan_info_ev(ab, skb, &ch_info_ev) != 0) { 7438 ath12k_warn(ab, "failed to extract chan info event"); 7439 return; 7440 } 7441 7442 ath12k_dbg(ab, ATH12K_DBG_WMI, 7443 "chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n", 7444 ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq, 7445 ch_info_ev.cmd_flags, ch_info_ev.noise_floor, 7446 ch_info_ev.rx_clear_count, ch_info_ev.cycle_count, 7447 ch_info_ev.mac_clk_mhz); 7448 7449 if (le32_to_cpu(ch_info_ev.cmd_flags) == WMI_CHAN_INFO_END_RESP) { 7450 ath12k_dbg(ab, ATH12K_DBG_WMI, "chan info report completed\n"); 7451 return; 7452 } 7453 7454 rcu_read_lock(); 7455 ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(ch_info_ev.vdev_id)); 7456 if (!ar) { 7457 ath12k_warn(ab, "invalid vdev id in chan info ev %d", 7458 ch_info_ev.vdev_id); 7459 rcu_read_unlock(); 7460 return; 7461 } 7462 spin_lock_bh(&ar->data_lock); 7463 7464 switch (ar->scan.state) { 7465 case ATH12K_SCAN_IDLE: 7466 case ATH12K_SCAN_STARTING: 7467 ath12k_warn(ab, "received chan info event without a scan request, ignoring\n"); 7468 goto exit; 7469 case ATH12K_SCAN_RUNNING: 7470 case ATH12K_SCAN_ABORTING: 7471 break; 7472 } 7473 7474 idx = freq_to_idx(ar, le32_to_cpu(ch_info_ev.freq)); 7475 if (idx >= ARRAY_SIZE(ar->survey)) { 7476 ath12k_warn(ab, "chan info: invalid frequency %d (idx %d out of bounds)\n", 7477 ch_info_ev.freq, idx); 7478 goto exit; 7479 } 7480 7481 /* If FW provides MAC clock frequency in Mhz, overriding the initialized 7482 * HW channel counters frequency value 7483 */ 7484 if (ch_info_ev.mac_clk_mhz) 7485 cc_freq_hz = (le32_to_cpu(ch_info_ev.mac_clk_mhz) * 1000); 7486 7487 if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) { 7488 survey = &ar->survey[idx]; 7489 memset(survey, 0, sizeof(*survey)); 7490 survey->noise = le32_to_cpu(ch_info_ev.noise_floor); 7491 survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME | 7492 SURVEY_INFO_TIME_BUSY; 7493 survey->time = div_u64(le32_to_cpu(ch_info_ev.cycle_count), cc_freq_hz); 7494 survey->time_busy = div_u64(le32_to_cpu(ch_info_ev.rx_clear_count), 7495 cc_freq_hz); 7496 } 7497 exit: 7498 spin_unlock_bh(&ar->data_lock); 7499 rcu_read_unlock(); 7500 } 7501 7502 static void 7503 ath12k_pdev_bss_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb) 7504 { 7505 struct wmi_pdev_bss_chan_info_event bss_ch_info_ev = {}; 7506 struct survey_info *survey; 7507 struct ath12k *ar; 7508 u32 cc_freq_hz = ab->cc_freq_hz; 7509 u64 busy, total, tx, rx, rx_bss; 7510 int idx; 7511 7512 if (ath12k_pull_pdev_bss_chan_info_ev(ab, skb, &bss_ch_info_ev) != 0) { 7513 ath12k_warn(ab, "failed to extract pdev bss chan info event"); 7514 return; 7515 } 7516 7517 busy = (u64)(le32_to_cpu(bss_ch_info_ev.rx_clear_count_high)) << 32 | 7518 le32_to_cpu(bss_ch_info_ev.rx_clear_count_low); 7519 7520 total = (u64)(le32_to_cpu(bss_ch_info_ev.cycle_count_high)) << 32 | 7521 le32_to_cpu(bss_ch_info_ev.cycle_count_low); 7522 7523 tx = (u64)(le32_to_cpu(bss_ch_info_ev.tx_cycle_count_high)) << 32 | 7524 le32_to_cpu(bss_ch_info_ev.tx_cycle_count_low); 7525 7526 rx = (u64)(le32_to_cpu(bss_ch_info_ev.rx_cycle_count_high)) << 32 | 7527 le32_to_cpu(bss_ch_info_ev.rx_cycle_count_low); 7528 7529 rx_bss = (u64)(le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_high)) << 32 | 7530 le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_low); 7531 7532 ath12k_dbg(ab, ATH12K_DBG_WMI, 7533 "pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n", 7534 bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq, 7535 bss_ch_info_ev.noise_floor, busy, total, 7536 tx, rx, rx_bss); 7537 7538 rcu_read_lock(); 7539 ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(bss_ch_info_ev.pdev_id)); 7540 7541 if (!ar) { 7542 ath12k_warn(ab, "invalid pdev id %d in bss_chan_info event\n", 7543 bss_ch_info_ev.pdev_id); 7544 rcu_read_unlock(); 7545 return; 7546 } 7547 7548 spin_lock_bh(&ar->data_lock); 7549 idx = freq_to_idx(ar, le32_to_cpu(bss_ch_info_ev.freq)); 7550 if (idx >= ARRAY_SIZE(ar->survey)) { 7551 ath12k_warn(ab, "bss chan info: invalid frequency %d (idx %d out of bounds)\n", 7552 bss_ch_info_ev.freq, idx); 7553 goto exit; 7554 } 7555 7556 survey = &ar->survey[idx]; 7557 7558 survey->noise = le32_to_cpu(bss_ch_info_ev.noise_floor); 7559 survey->time = div_u64(total, cc_freq_hz); 7560 survey->time_busy = div_u64(busy, cc_freq_hz); 7561 survey->time_rx = div_u64(rx_bss, cc_freq_hz); 7562 survey->time_tx = div_u64(tx, cc_freq_hz); 7563 survey->filled |= (SURVEY_INFO_NOISE_DBM | 7564 SURVEY_INFO_TIME | 7565 SURVEY_INFO_TIME_BUSY | 7566 SURVEY_INFO_TIME_RX | 7567 SURVEY_INFO_TIME_TX); 7568 exit: 7569 spin_unlock_bh(&ar->data_lock); 7570 complete(&ar->bss_survey_done); 7571 7572 rcu_read_unlock(); 7573 } 7574 7575 static void ath12k_vdev_install_key_compl_event(struct ath12k_base *ab, 7576 struct sk_buff *skb) 7577 { 7578 struct wmi_vdev_install_key_complete_arg install_key_compl = {}; 7579 struct ath12k *ar; 7580 7581 if (ath12k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) { 7582 ath12k_warn(ab, "failed to extract install key compl event"); 7583 return; 7584 } 7585 7586 ath12k_dbg(ab, ATH12K_DBG_WMI, 7587 "vdev install key ev idx %d flags %08x macaddr %pM status %d\n", 7588 install_key_compl.key_idx, install_key_compl.key_flags, 7589 install_key_compl.macaddr, install_key_compl.status); 7590 7591 rcu_read_lock(); 7592 ar = ath12k_mac_get_ar_by_vdev_id(ab, install_key_compl.vdev_id); 7593 if (!ar) { 7594 ath12k_warn(ab, "invalid vdev id in install key compl ev %d", 7595 install_key_compl.vdev_id); 7596 rcu_read_unlock(); 7597 return; 7598 } 7599 7600 ar->install_key_status = 0; 7601 7602 if (install_key_compl.status != WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) { 7603 ath12k_warn(ab, "install key failed for %pM status %d\n", 7604 install_key_compl.macaddr, install_key_compl.status); 7605 ar->install_key_status = install_key_compl.status; 7606 } 7607 7608 complete(&ar->install_key_done); 7609 rcu_read_unlock(); 7610 } 7611 7612 static int ath12k_wmi_tlv_services_parser(struct ath12k_base *ab, 7613 u16 tag, u16 len, 7614 const void *ptr, 7615 void *data) 7616 { 7617 const struct wmi_service_available_event *ev; 7618 u16 wmi_ext2_service_words; 7619 __le32 *wmi_ext2_service_bitmap; 7620 int i, j; 7621 u16 expected_len; 7622 7623 expected_len = WMI_SERVICE_SEGMENT_BM_SIZE32 * sizeof(u32); 7624 if (len < expected_len) { 7625 ath12k_warn(ab, "invalid length %d for the WMI services available tag 0x%x\n", 7626 len, tag); 7627 return -EINVAL; 7628 } 7629 7630 switch (tag) { 7631 case WMI_TAG_SERVICE_AVAILABLE_EVENT: 7632 ev = (struct wmi_service_available_event *)ptr; 7633 for (i = 0, j = WMI_MAX_SERVICE; 7634 i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE; 7635 i++) { 7636 do { 7637 if (le32_to_cpu(ev->wmi_service_segment_bitmap[i]) & 7638 BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32)) 7639 set_bit(j, ab->wmi_ab.svc_map); 7640 } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32); 7641 } 7642 7643 ath12k_dbg(ab, ATH12K_DBG_WMI, 7644 "wmi_ext_service_bitmap 0x%x 0x%x 0x%x 0x%x", 7645 ev->wmi_service_segment_bitmap[0], 7646 ev->wmi_service_segment_bitmap[1], 7647 ev->wmi_service_segment_bitmap[2], 7648 ev->wmi_service_segment_bitmap[3]); 7649 break; 7650 case WMI_TAG_ARRAY_UINT32: 7651 wmi_ext2_service_bitmap = (__le32 *)ptr; 7652 wmi_ext2_service_words = len / sizeof(u32); 7653 for (i = 0, j = WMI_MAX_EXT_SERVICE; 7654 i < wmi_ext2_service_words && j < WMI_MAX_EXT2_SERVICE; 7655 i++) { 7656 do { 7657 if (__le32_to_cpu(wmi_ext2_service_bitmap[i]) & 7658 BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32)) 7659 set_bit(j, ab->wmi_ab.svc_map); 7660 } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32); 7661 ath12k_dbg(ab, ATH12K_DBG_WMI, 7662 "wmi_ext2_service bitmap 0x%08x\n", 7663 __le32_to_cpu(wmi_ext2_service_bitmap[i])); 7664 } 7665 7666 break; 7667 } 7668 return 0; 7669 } 7670 7671 static int ath12k_service_available_event(struct ath12k_base *ab, struct sk_buff *skb) 7672 { 7673 int ret; 7674 7675 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 7676 ath12k_wmi_tlv_services_parser, 7677 NULL); 7678 return ret; 7679 } 7680 7681 static void ath12k_peer_assoc_conf_event(struct ath12k_base *ab, struct sk_buff *skb) 7682 { 7683 struct wmi_peer_assoc_conf_arg peer_assoc_conf = {}; 7684 struct ath12k *ar; 7685 7686 if (ath12k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) { 7687 ath12k_warn(ab, "failed to extract peer assoc conf event"); 7688 return; 7689 } 7690 7691 ath12k_dbg(ab, ATH12K_DBG_WMI, 7692 "peer assoc conf ev vdev id %d macaddr %pM\n", 7693 peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr); 7694 7695 rcu_read_lock(); 7696 ar = ath12k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id); 7697 7698 if (!ar) { 7699 ath12k_warn(ab, "invalid vdev id in peer assoc conf ev %d", 7700 peer_assoc_conf.vdev_id); 7701 rcu_read_unlock(); 7702 return; 7703 } 7704 7705 complete(&ar->peer_assoc_done); 7706 rcu_read_unlock(); 7707 } 7708 7709 static void 7710 ath12k_wmi_fw_vdev_stats_dump(struct ath12k *ar, 7711 struct ath12k_fw_stats *fw_stats, 7712 char *buf, u32 *length) 7713 { 7714 const struct ath12k_fw_stats_vdev *vdev; 7715 u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; 7716 struct ath12k_link_vif *arvif; 7717 u32 len = *length; 7718 u8 *vif_macaddr; 7719 int i; 7720 7721 len += scnprintf(buf + len, buf_len - len, "\n"); 7722 len += scnprintf(buf + len, buf_len - len, "%30s\n", 7723 "ath12k VDEV stats"); 7724 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 7725 "================="); 7726 7727 list_for_each_entry(vdev, &fw_stats->vdevs, list) { 7728 arvif = ath12k_mac_get_arvif(ar, vdev->vdev_id); 7729 if (!arvif) 7730 continue; 7731 vif_macaddr = arvif->ahvif->vif->addr; 7732 7733 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7734 "VDEV ID", vdev->vdev_id); 7735 len += scnprintf(buf + len, buf_len - len, "%30s %pM\n", 7736 "VDEV MAC address", vif_macaddr); 7737 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7738 "beacon snr", vdev->beacon_snr); 7739 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7740 "data snr", vdev->data_snr); 7741 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7742 "num rx frames", vdev->num_rx_frames); 7743 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7744 "num rts fail", vdev->num_rts_fail); 7745 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7746 "num rts success", vdev->num_rts_success); 7747 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7748 "num rx err", vdev->num_rx_err); 7749 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7750 "num rx discard", vdev->num_rx_discard); 7751 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7752 "num tx not acked", vdev->num_tx_not_acked); 7753 7754 for (i = 0 ; i < WLAN_MAX_AC; i++) 7755 len += scnprintf(buf + len, buf_len - len, 7756 "%25s [%02d] %u\n", 7757 "num tx frames", i, 7758 vdev->num_tx_frames[i]); 7759 7760 for (i = 0 ; i < WLAN_MAX_AC; i++) 7761 len += scnprintf(buf + len, buf_len - len, 7762 "%25s [%02d] %u\n", 7763 "num tx frames retries", i, 7764 vdev->num_tx_frames_retries[i]); 7765 7766 for (i = 0 ; i < WLAN_MAX_AC; i++) 7767 len += scnprintf(buf + len, buf_len - len, 7768 "%25s [%02d] %u\n", 7769 "num tx frames failures", i, 7770 vdev->num_tx_frames_failures[i]); 7771 7772 for (i = 0 ; i < MAX_TX_RATE_VALUES; i++) 7773 len += scnprintf(buf + len, buf_len - len, 7774 "%25s [%02d] 0x%08x\n", 7775 "tx rate history", i, 7776 vdev->tx_rate_history[i]); 7777 for (i = 0 ; i < MAX_TX_RATE_VALUES; i++) 7778 len += scnprintf(buf + len, buf_len - len, 7779 "%25s [%02d] %u\n", 7780 "beacon rssi history", i, 7781 vdev->beacon_rssi_history[i]); 7782 7783 len += scnprintf(buf + len, buf_len - len, "\n"); 7784 *length = len; 7785 } 7786 } 7787 7788 static void 7789 ath12k_wmi_fw_bcn_stats_dump(struct ath12k *ar, 7790 struct ath12k_fw_stats *fw_stats, 7791 char *buf, u32 *length) 7792 { 7793 const struct ath12k_fw_stats_bcn *bcn; 7794 u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; 7795 struct ath12k_link_vif *arvif; 7796 u32 len = *length; 7797 size_t num_bcn; 7798 7799 num_bcn = list_count_nodes(&fw_stats->bcn); 7800 7801 len += scnprintf(buf + len, buf_len - len, "\n"); 7802 len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n", 7803 "ath12k Beacon stats", num_bcn); 7804 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 7805 "==================="); 7806 7807 list_for_each_entry(bcn, &fw_stats->bcn, list) { 7808 arvif = ath12k_mac_get_arvif(ar, bcn->vdev_id); 7809 if (!arvif) 7810 continue; 7811 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7812 "VDEV ID", bcn->vdev_id); 7813 len += scnprintf(buf + len, buf_len - len, "%30s %pM\n", 7814 "VDEV MAC address", arvif->ahvif->vif->addr); 7815 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 7816 "================"); 7817 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7818 "Num of beacon tx success", bcn->tx_bcn_succ_cnt); 7819 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7820 "Num of beacon tx failures", bcn->tx_bcn_outage_cnt); 7821 7822 len += scnprintf(buf + len, buf_len - len, "\n"); 7823 *length = len; 7824 } 7825 } 7826 7827 static void 7828 ath12k_wmi_fw_pdev_base_stats_dump(const struct ath12k_fw_stats_pdev *pdev, 7829 char *buf, u32 *length, u64 fw_soc_drop_cnt) 7830 { 7831 u32 len = *length; 7832 u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; 7833 7834 len = scnprintf(buf + len, buf_len - len, "\n"); 7835 len += scnprintf(buf + len, buf_len - len, "%30s\n", 7836 "ath12k PDEV stats"); 7837 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 7838 "================="); 7839 7840 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7841 "Channel noise floor", pdev->ch_noise_floor); 7842 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7843 "Channel TX power", pdev->chan_tx_power); 7844 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7845 "TX frame count", pdev->tx_frame_count); 7846 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7847 "RX frame count", pdev->rx_frame_count); 7848 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7849 "RX clear count", pdev->rx_clear_count); 7850 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7851 "Cycle count", pdev->cycle_count); 7852 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7853 "PHY error count", pdev->phy_err_count); 7854 len += scnprintf(buf + len, buf_len - len, "%30s %10llu\n", 7855 "soc drop count", fw_soc_drop_cnt); 7856 7857 *length = len; 7858 } 7859 7860 static void 7861 ath12k_wmi_fw_pdev_tx_stats_dump(const struct ath12k_fw_stats_pdev *pdev, 7862 char *buf, u32 *length) 7863 { 7864 u32 len = *length; 7865 u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; 7866 7867 len += scnprintf(buf + len, buf_len - len, "\n%30s\n", 7868 "ath12k PDEV TX stats"); 7869 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 7870 "===================="); 7871 7872 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7873 "HTT cookies queued", pdev->comp_queued); 7874 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7875 "HTT cookies disp.", pdev->comp_delivered); 7876 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7877 "MSDU queued", pdev->msdu_enqued); 7878 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7879 "MPDU queued", pdev->mpdu_enqued); 7880 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7881 "MSDUs dropped", pdev->wmm_drop); 7882 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7883 "Local enqued", pdev->local_enqued); 7884 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7885 "Local freed", pdev->local_freed); 7886 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7887 "HW queued", pdev->hw_queued); 7888 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7889 "PPDUs reaped", pdev->hw_reaped); 7890 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7891 "Num underruns", pdev->underrun); 7892 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7893 "PPDUs cleaned", pdev->tx_abort); 7894 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7895 "MPDUs requeued", pdev->mpdus_requed); 7896 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7897 "Excessive retries", pdev->tx_ko); 7898 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7899 "HW rate", pdev->data_rc); 7900 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7901 "Sched self triggers", pdev->self_triggers); 7902 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7903 "Dropped due to SW retries", 7904 pdev->sw_retry_failure); 7905 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7906 "Illegal rate phy errors", 7907 pdev->illgl_rate_phy_err); 7908 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7909 "PDEV continuous xretry", pdev->pdev_cont_xretry); 7910 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7911 "TX timeout", pdev->pdev_tx_timeout); 7912 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7913 "PDEV resets", pdev->pdev_resets); 7914 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7915 "Stateless TIDs alloc failures", 7916 pdev->stateless_tid_alloc_failure); 7917 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7918 "PHY underrun", pdev->phy_underrun); 7919 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7920 "MPDU is more than txop limit", pdev->txop_ovf); 7921 *length = len; 7922 } 7923 7924 static void 7925 ath12k_wmi_fw_pdev_rx_stats_dump(const struct ath12k_fw_stats_pdev *pdev, 7926 char *buf, u32 *length) 7927 { 7928 u32 len = *length; 7929 u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; 7930 7931 len += scnprintf(buf + len, buf_len - len, "\n%30s\n", 7932 "ath12k PDEV RX stats"); 7933 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 7934 "===================="); 7935 7936 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7937 "Mid PPDU route change", 7938 pdev->mid_ppdu_route_change); 7939 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7940 "Tot. number of statuses", pdev->status_rcvd); 7941 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7942 "Extra frags on rings 0", pdev->r0_frags); 7943 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7944 "Extra frags on rings 1", pdev->r1_frags); 7945 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7946 "Extra frags on rings 2", pdev->r2_frags); 7947 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7948 "Extra frags on rings 3", pdev->r3_frags); 7949 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7950 "MSDUs delivered to HTT", pdev->htt_msdus); 7951 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7952 "MPDUs delivered to HTT", pdev->htt_mpdus); 7953 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7954 "MSDUs delivered to stack", pdev->loc_msdus); 7955 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7956 "MPDUs delivered to stack", pdev->loc_mpdus); 7957 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7958 "Oversized AMSUs", pdev->oversize_amsdu); 7959 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7960 "PHY errors", pdev->phy_errs); 7961 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7962 "PHY errors drops", pdev->phy_err_drop); 7963 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7964 "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs); 7965 *length = len; 7966 } 7967 7968 static void 7969 ath12k_wmi_fw_pdev_stats_dump(struct ath12k *ar, 7970 struct ath12k_fw_stats *fw_stats, 7971 char *buf, u32 *length) 7972 { 7973 const struct ath12k_fw_stats_pdev *pdev; 7974 u32 len = *length; 7975 7976 pdev = list_first_entry_or_null(&fw_stats->pdevs, 7977 struct ath12k_fw_stats_pdev, list); 7978 if (!pdev) { 7979 ath12k_warn(ar->ab, "failed to get pdev stats\n"); 7980 return; 7981 } 7982 7983 ath12k_wmi_fw_pdev_base_stats_dump(pdev, buf, &len, 7984 ar->ab->fw_soc_drop_count); 7985 ath12k_wmi_fw_pdev_tx_stats_dump(pdev, buf, &len); 7986 ath12k_wmi_fw_pdev_rx_stats_dump(pdev, buf, &len); 7987 7988 *length = len; 7989 } 7990 7991 void ath12k_wmi_fw_stats_dump(struct ath12k *ar, 7992 struct ath12k_fw_stats *fw_stats, 7993 u32 stats_id, char *buf) 7994 { 7995 u32 len = 0; 7996 u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; 7997 7998 spin_lock_bh(&ar->data_lock); 7999 8000 switch (stats_id) { 8001 case WMI_REQUEST_VDEV_STAT: 8002 ath12k_wmi_fw_vdev_stats_dump(ar, fw_stats, buf, &len); 8003 break; 8004 case WMI_REQUEST_BCN_STAT: 8005 ath12k_wmi_fw_bcn_stats_dump(ar, fw_stats, buf, &len); 8006 break; 8007 case WMI_REQUEST_PDEV_STAT: 8008 ath12k_wmi_fw_pdev_stats_dump(ar, fw_stats, buf, &len); 8009 break; 8010 default: 8011 break; 8012 } 8013 8014 spin_unlock_bh(&ar->data_lock); 8015 8016 if (len >= buf_len) 8017 buf[len - 1] = 0; 8018 else 8019 buf[len] = 0; 8020 8021 ath12k_fw_stats_reset(ar); 8022 } 8023 8024 static void 8025 ath12k_wmi_pull_vdev_stats(const struct wmi_vdev_stats_params *src, 8026 struct ath12k_fw_stats_vdev *dst) 8027 { 8028 int i; 8029 8030 dst->vdev_id = le32_to_cpu(src->vdev_id); 8031 dst->beacon_snr = le32_to_cpu(src->beacon_snr); 8032 dst->data_snr = le32_to_cpu(src->data_snr); 8033 dst->num_rx_frames = le32_to_cpu(src->num_rx_frames); 8034 dst->num_rts_fail = le32_to_cpu(src->num_rts_fail); 8035 dst->num_rts_success = le32_to_cpu(src->num_rts_success); 8036 dst->num_rx_err = le32_to_cpu(src->num_rx_err); 8037 dst->num_rx_discard = le32_to_cpu(src->num_rx_discard); 8038 dst->num_tx_not_acked = le32_to_cpu(src->num_tx_not_acked); 8039 8040 for (i = 0; i < WLAN_MAX_AC; i++) 8041 dst->num_tx_frames[i] = 8042 le32_to_cpu(src->num_tx_frames[i]); 8043 8044 for (i = 0; i < WLAN_MAX_AC; i++) 8045 dst->num_tx_frames_retries[i] = 8046 le32_to_cpu(src->num_tx_frames_retries[i]); 8047 8048 for (i = 0; i < WLAN_MAX_AC; i++) 8049 dst->num_tx_frames_failures[i] = 8050 le32_to_cpu(src->num_tx_frames_failures[i]); 8051 8052 for (i = 0; i < MAX_TX_RATE_VALUES; i++) 8053 dst->tx_rate_history[i] = 8054 le32_to_cpu(src->tx_rate_history[i]); 8055 8056 for (i = 0; i < MAX_TX_RATE_VALUES; i++) 8057 dst->beacon_rssi_history[i] = 8058 le32_to_cpu(src->beacon_rssi_history[i]); 8059 } 8060 8061 static void 8062 ath12k_wmi_pull_bcn_stats(const struct ath12k_wmi_bcn_stats_params *src, 8063 struct ath12k_fw_stats_bcn *dst) 8064 { 8065 dst->vdev_id = le32_to_cpu(src->vdev_id); 8066 dst->tx_bcn_succ_cnt = le32_to_cpu(src->tx_bcn_succ_cnt); 8067 dst->tx_bcn_outage_cnt = le32_to_cpu(src->tx_bcn_outage_cnt); 8068 } 8069 8070 static void 8071 ath12k_wmi_pull_pdev_stats_base(const struct ath12k_wmi_pdev_base_stats_params *src, 8072 struct ath12k_fw_stats_pdev *dst) 8073 { 8074 dst->ch_noise_floor = a_sle32_to_cpu(src->chan_nf); 8075 dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count); 8076 dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count); 8077 dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count); 8078 dst->cycle_count = __le32_to_cpu(src->cycle_count); 8079 dst->phy_err_count = __le32_to_cpu(src->phy_err_count); 8080 dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr); 8081 } 8082 8083 static void 8084 ath12k_wmi_pull_pdev_stats_tx(const struct ath12k_wmi_pdev_tx_stats_params *src, 8085 struct ath12k_fw_stats_pdev *dst) 8086 { 8087 dst->comp_queued = a_sle32_to_cpu(src->comp_queued); 8088 dst->comp_delivered = a_sle32_to_cpu(src->comp_delivered); 8089 dst->msdu_enqued = a_sle32_to_cpu(src->msdu_enqued); 8090 dst->mpdu_enqued = a_sle32_to_cpu(src->mpdu_enqued); 8091 dst->wmm_drop = a_sle32_to_cpu(src->wmm_drop); 8092 dst->local_enqued = a_sle32_to_cpu(src->local_enqued); 8093 dst->local_freed = a_sle32_to_cpu(src->local_freed); 8094 dst->hw_queued = a_sle32_to_cpu(src->hw_queued); 8095 dst->hw_reaped = a_sle32_to_cpu(src->hw_reaped); 8096 dst->underrun = a_sle32_to_cpu(src->underrun); 8097 dst->tx_abort = a_sle32_to_cpu(src->tx_abort); 8098 dst->mpdus_requed = a_sle32_to_cpu(src->mpdus_requed); 8099 dst->tx_ko = __le32_to_cpu(src->tx_ko); 8100 dst->data_rc = __le32_to_cpu(src->data_rc); 8101 dst->self_triggers = __le32_to_cpu(src->self_triggers); 8102 dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure); 8103 dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err); 8104 dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry); 8105 dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout); 8106 dst->pdev_resets = __le32_to_cpu(src->pdev_resets); 8107 dst->stateless_tid_alloc_failure = 8108 __le32_to_cpu(src->stateless_tid_alloc_failure); 8109 dst->phy_underrun = __le32_to_cpu(src->phy_underrun); 8110 dst->txop_ovf = __le32_to_cpu(src->txop_ovf); 8111 } 8112 8113 static void 8114 ath12k_wmi_pull_pdev_stats_rx(const struct ath12k_wmi_pdev_rx_stats_params *src, 8115 struct ath12k_fw_stats_pdev *dst) 8116 { 8117 dst->mid_ppdu_route_change = 8118 a_sle32_to_cpu(src->mid_ppdu_route_change); 8119 dst->status_rcvd = a_sle32_to_cpu(src->status_rcvd); 8120 dst->r0_frags = a_sle32_to_cpu(src->r0_frags); 8121 dst->r1_frags = a_sle32_to_cpu(src->r1_frags); 8122 dst->r2_frags = a_sle32_to_cpu(src->r2_frags); 8123 dst->r3_frags = a_sle32_to_cpu(src->r3_frags); 8124 dst->htt_msdus = a_sle32_to_cpu(src->htt_msdus); 8125 dst->htt_mpdus = a_sle32_to_cpu(src->htt_mpdus); 8126 dst->loc_msdus = a_sle32_to_cpu(src->loc_msdus); 8127 dst->loc_mpdus = a_sle32_to_cpu(src->loc_mpdus); 8128 dst->oversize_amsdu = a_sle32_to_cpu(src->oversize_amsdu); 8129 dst->phy_errs = a_sle32_to_cpu(src->phy_errs); 8130 dst->phy_err_drop = a_sle32_to_cpu(src->phy_err_drop); 8131 dst->mpdu_errs = a_sle32_to_cpu(src->mpdu_errs); 8132 } 8133 8134 static int ath12k_wmi_tlv_fw_stats_data_parse(struct ath12k_base *ab, 8135 struct wmi_tlv_fw_stats_parse *parse, 8136 const void *ptr, 8137 u16 len) 8138 { 8139 const struct wmi_stats_event *ev = parse->ev; 8140 struct ath12k_fw_stats *stats = parse->stats; 8141 struct ath12k *ar; 8142 struct ath12k_link_vif *arvif; 8143 struct ieee80211_sta *sta; 8144 struct ath12k_sta *ahsta; 8145 struct ath12k_link_sta *arsta; 8146 int i, ret = 0; 8147 const void *data = ptr; 8148 8149 if (!ev) { 8150 ath12k_warn(ab, "failed to fetch update stats ev"); 8151 return -EPROTO; 8152 } 8153 8154 if (!stats) 8155 return -EINVAL; 8156 8157 rcu_read_lock(); 8158 8159 stats->pdev_id = le32_to_cpu(ev->pdev_id); 8160 ar = ath12k_mac_get_ar_by_pdev_id(ab, stats->pdev_id); 8161 if (!ar) { 8162 ath12k_warn(ab, "invalid pdev id %d in update stats event\n", 8163 le32_to_cpu(ev->pdev_id)); 8164 ret = -EPROTO; 8165 goto exit; 8166 } 8167 8168 for (i = 0; i < le32_to_cpu(ev->num_vdev_stats); i++) { 8169 const struct wmi_vdev_stats_params *src; 8170 struct ath12k_fw_stats_vdev *dst; 8171 8172 src = data; 8173 if (len < sizeof(*src)) { 8174 ret = -EPROTO; 8175 goto exit; 8176 } 8177 8178 arvif = ath12k_mac_get_arvif(ar, le32_to_cpu(src->vdev_id)); 8179 if (arvif) { 8180 sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar), 8181 arvif->bssid, 8182 NULL); 8183 if (sta) { 8184 ahsta = ath12k_sta_to_ahsta(sta); 8185 arsta = &ahsta->deflink; 8186 arsta->rssi_beacon = le32_to_cpu(src->beacon_snr); 8187 ath12k_dbg(ab, ATH12K_DBG_WMI, 8188 "wmi stats vdev id %d snr %d\n", 8189 src->vdev_id, src->beacon_snr); 8190 } else { 8191 ath12k_dbg(ab, ATH12K_DBG_WMI, 8192 "not found station bssid %pM for vdev stat\n", 8193 arvif->bssid); 8194 } 8195 } 8196 8197 data += sizeof(*src); 8198 len -= sizeof(*src); 8199 dst = kzalloc(sizeof(*dst), GFP_ATOMIC); 8200 if (!dst) 8201 continue; 8202 ath12k_wmi_pull_vdev_stats(src, dst); 8203 stats->stats_id = WMI_REQUEST_VDEV_STAT; 8204 list_add_tail(&dst->list, &stats->vdevs); 8205 } 8206 for (i = 0; i < le32_to_cpu(ev->num_bcn_stats); i++) { 8207 const struct ath12k_wmi_bcn_stats_params *src; 8208 struct ath12k_fw_stats_bcn *dst; 8209 8210 src = data; 8211 if (len < sizeof(*src)) { 8212 ret = -EPROTO; 8213 goto exit; 8214 } 8215 8216 data += sizeof(*src); 8217 len -= sizeof(*src); 8218 dst = kzalloc(sizeof(*dst), GFP_ATOMIC); 8219 if (!dst) 8220 continue; 8221 ath12k_wmi_pull_bcn_stats(src, dst); 8222 stats->stats_id = WMI_REQUEST_BCN_STAT; 8223 list_add_tail(&dst->list, &stats->bcn); 8224 } 8225 for (i = 0; i < le32_to_cpu(ev->num_pdev_stats); i++) { 8226 const struct ath12k_wmi_pdev_stats_params *src; 8227 struct ath12k_fw_stats_pdev *dst; 8228 8229 src = data; 8230 if (len < sizeof(*src)) { 8231 ret = -EPROTO; 8232 goto exit; 8233 } 8234 8235 stats->stats_id = WMI_REQUEST_PDEV_STAT; 8236 8237 data += sizeof(*src); 8238 len -= sizeof(*src); 8239 8240 dst = kzalloc(sizeof(*dst), GFP_ATOMIC); 8241 if (!dst) 8242 continue; 8243 8244 ath12k_wmi_pull_pdev_stats_base(&src->base, dst); 8245 ath12k_wmi_pull_pdev_stats_tx(&src->tx, dst); 8246 ath12k_wmi_pull_pdev_stats_rx(&src->rx, dst); 8247 list_add_tail(&dst->list, &stats->pdevs); 8248 } 8249 8250 exit: 8251 rcu_read_unlock(); 8252 return ret; 8253 } 8254 8255 static int ath12k_wmi_tlv_rssi_chain_parse(struct ath12k_base *ab, 8256 u16 tag, u16 len, 8257 const void *ptr, void *data) 8258 { 8259 const struct wmi_rssi_stat_params *stats_rssi = ptr; 8260 struct wmi_tlv_fw_stats_parse *parse = data; 8261 const struct wmi_stats_event *ev = parse->ev; 8262 struct ath12k_fw_stats *stats = parse->stats; 8263 struct ath12k_link_vif *arvif; 8264 struct ath12k_link_sta *arsta; 8265 struct ieee80211_sta *sta; 8266 struct ath12k_sta *ahsta; 8267 struct ath12k *ar; 8268 int vdev_id; 8269 int j; 8270 8271 if (!ev) { 8272 ath12k_warn(ab, "failed to fetch update stats ev"); 8273 return -EPROTO; 8274 } 8275 8276 if (tag != WMI_TAG_RSSI_STATS) 8277 return -EPROTO; 8278 8279 if (!stats) 8280 return -EINVAL; 8281 8282 stats->pdev_id = le32_to_cpu(ev->pdev_id); 8283 vdev_id = le32_to_cpu(stats_rssi->vdev_id); 8284 guard(rcu)(); 8285 ar = ath12k_mac_get_ar_by_pdev_id(ab, stats->pdev_id); 8286 if (!ar) { 8287 ath12k_warn(ab, "invalid pdev id %d in rssi chain parse\n", 8288 stats->pdev_id); 8289 return -EPROTO; 8290 } 8291 8292 arvif = ath12k_mac_get_arvif(ar, vdev_id); 8293 if (!arvif) { 8294 ath12k_warn(ab, "not found vif for vdev id %d\n", vdev_id); 8295 return -EPROTO; 8296 } 8297 8298 ath12k_dbg(ab, ATH12K_DBG_WMI, 8299 "stats bssid %pM vif %p\n", 8300 arvif->bssid, arvif->ahvif->vif); 8301 8302 sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar), 8303 arvif->bssid, 8304 NULL); 8305 if (!sta) { 8306 ath12k_dbg(ab, ATH12K_DBG_WMI, 8307 "not found station of bssid %pM for rssi chain\n", 8308 arvif->bssid); 8309 return -EPROTO; 8310 } 8311 8312 ahsta = ath12k_sta_to_ahsta(sta); 8313 arsta = &ahsta->deflink; 8314 8315 BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) > 8316 ARRAY_SIZE(stats_rssi->rssi_avg_beacon)); 8317 8318 for (j = 0; j < ARRAY_SIZE(arsta->chain_signal); j++) 8319 arsta->chain_signal[j] = le32_to_cpu(stats_rssi->rssi_avg_beacon[j]); 8320 8321 stats->stats_id = WMI_REQUEST_RSSI_PER_CHAIN_STAT; 8322 8323 return 0; 8324 } 8325 8326 static int ath12k_wmi_tlv_fw_stats_parse(struct ath12k_base *ab, 8327 u16 tag, u16 len, 8328 const void *ptr, void *data) 8329 { 8330 struct wmi_tlv_fw_stats_parse *parse = data; 8331 int ret = 0; 8332 8333 switch (tag) { 8334 case WMI_TAG_STATS_EVENT: 8335 parse->ev = ptr; 8336 break; 8337 case WMI_TAG_ARRAY_BYTE: 8338 ret = ath12k_wmi_tlv_fw_stats_data_parse(ab, parse, ptr, len); 8339 break; 8340 case WMI_TAG_PER_CHAIN_RSSI_STATS: 8341 parse->rssi = ptr; 8342 if (le32_to_cpu(parse->ev->stats_id) & WMI_REQUEST_RSSI_PER_CHAIN_STAT) 8343 parse->rssi_num = le32_to_cpu(parse->rssi->num_per_chain_rssi); 8344 break; 8345 case WMI_TAG_ARRAY_STRUCT: 8346 if (parse->rssi_num && !parse->chain_rssi_done) { 8347 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 8348 ath12k_wmi_tlv_rssi_chain_parse, 8349 parse); 8350 if (ret) 8351 return ret; 8352 8353 parse->chain_rssi_done = true; 8354 } 8355 break; 8356 default: 8357 break; 8358 } 8359 return ret; 8360 } 8361 8362 static int ath12k_wmi_pull_fw_stats(struct ath12k_base *ab, struct sk_buff *skb, 8363 struct ath12k_fw_stats *stats) 8364 { 8365 struct wmi_tlv_fw_stats_parse parse = {}; 8366 8367 stats->stats_id = 0; 8368 parse.stats = stats; 8369 8370 return ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 8371 ath12k_wmi_tlv_fw_stats_parse, 8372 &parse); 8373 } 8374 8375 static void ath12k_wmi_fw_stats_process(struct ath12k *ar, 8376 struct ath12k_fw_stats *stats) 8377 { 8378 struct ath12k_base *ab = ar->ab; 8379 struct ath12k_pdev *pdev; 8380 bool is_end = true; 8381 size_t total_vdevs_started = 0; 8382 int i; 8383 8384 if (stats->stats_id == WMI_REQUEST_VDEV_STAT) { 8385 if (list_empty(&stats->vdevs)) { 8386 ath12k_warn(ab, "empty vdev stats"); 8387 return; 8388 } 8389 /* FW sends all the active VDEV stats irrespective of PDEV, 8390 * hence limit until the count of all VDEVs started 8391 */ 8392 rcu_read_lock(); 8393 for (i = 0; i < ab->num_radios; i++) { 8394 pdev = rcu_dereference(ab->pdevs_active[i]); 8395 if (pdev && pdev->ar) 8396 total_vdevs_started += pdev->ar->num_started_vdevs; 8397 } 8398 rcu_read_unlock(); 8399 8400 if (total_vdevs_started) 8401 is_end = ((++ar->fw_stats.num_vdev_recvd) == 8402 total_vdevs_started); 8403 8404 list_splice_tail_init(&stats->vdevs, 8405 &ar->fw_stats.vdevs); 8406 8407 if (is_end) 8408 complete(&ar->fw_stats_done); 8409 8410 return; 8411 } 8412 8413 if (stats->stats_id == WMI_REQUEST_BCN_STAT) { 8414 if (list_empty(&stats->bcn)) { 8415 ath12k_warn(ab, "empty beacon stats"); 8416 return; 8417 } 8418 /* Mark end until we reached the count of all started VDEVs 8419 * within the PDEV 8420 */ 8421 if (ar->num_started_vdevs) 8422 is_end = ((++ar->fw_stats.num_bcn_recvd) == 8423 ar->num_started_vdevs); 8424 8425 list_splice_tail_init(&stats->bcn, 8426 &ar->fw_stats.bcn); 8427 8428 if (is_end) 8429 complete(&ar->fw_stats_done); 8430 } 8431 } 8432 8433 static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *skb) 8434 { 8435 struct ath12k_fw_stats stats = {}; 8436 struct ath12k *ar; 8437 int ret; 8438 8439 INIT_LIST_HEAD(&stats.pdevs); 8440 INIT_LIST_HEAD(&stats.vdevs); 8441 INIT_LIST_HEAD(&stats.bcn); 8442 8443 ret = ath12k_wmi_pull_fw_stats(ab, skb, &stats); 8444 if (ret) { 8445 ath12k_warn(ab, "failed to pull fw stats: %d\n", ret); 8446 goto free; 8447 } 8448 8449 ath12k_dbg(ab, ATH12K_DBG_WMI, "event update stats"); 8450 8451 rcu_read_lock(); 8452 ar = ath12k_mac_get_ar_by_pdev_id(ab, stats.pdev_id); 8453 if (!ar) { 8454 rcu_read_unlock(); 8455 ath12k_warn(ab, "failed to get ar for pdev_id %d: %d\n", 8456 stats.pdev_id, ret); 8457 goto free; 8458 } 8459 8460 spin_lock_bh(&ar->data_lock); 8461 8462 /* Handle WMI_REQUEST_PDEV_STAT status update */ 8463 if (stats.stats_id == WMI_REQUEST_PDEV_STAT) { 8464 list_splice_tail_init(&stats.pdevs, &ar->fw_stats.pdevs); 8465 complete(&ar->fw_stats_done); 8466 goto complete; 8467 } 8468 8469 /* Handle WMI_REQUEST_RSSI_PER_CHAIN_STAT status update */ 8470 if (stats.stats_id == WMI_REQUEST_RSSI_PER_CHAIN_STAT) { 8471 complete(&ar->fw_stats_done); 8472 goto complete; 8473 } 8474 8475 /* Handle WMI_REQUEST_VDEV_STAT and WMI_REQUEST_BCN_STAT updates. */ 8476 ath12k_wmi_fw_stats_process(ar, &stats); 8477 8478 complete: 8479 complete(&ar->fw_stats_complete); 8480 spin_unlock_bh(&ar->data_lock); 8481 rcu_read_unlock(); 8482 8483 /* Since the stats's pdev, vdev and beacon list are spliced and reinitialised 8484 * at this point, no need to free the individual list. 8485 */ 8486 return; 8487 8488 free: 8489 ath12k_fw_stats_free(&stats); 8490 } 8491 8492 /* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned 8493 * is not part of BDF CTL(Conformance test limits) table entries. 8494 */ 8495 static void ath12k_pdev_ctl_failsafe_check_event(struct ath12k_base *ab, 8496 struct sk_buff *skb) 8497 { 8498 const void **tb; 8499 const struct wmi_pdev_ctl_failsafe_chk_event *ev; 8500 int ret; 8501 8502 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8503 if (IS_ERR(tb)) { 8504 ret = PTR_ERR(tb); 8505 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 8506 return; 8507 } 8508 8509 ev = tb[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT]; 8510 if (!ev) { 8511 ath12k_warn(ab, "failed to fetch pdev ctl failsafe check ev"); 8512 kfree(tb); 8513 return; 8514 } 8515 8516 ath12k_dbg(ab, ATH12K_DBG_WMI, 8517 "pdev ctl failsafe check ev status %d\n", 8518 ev->ctl_failsafe_status); 8519 8520 /* If ctl_failsafe_status is set to 1 FW will max out the Transmit power 8521 * to 10 dBm else the CTL power entry in the BDF would be picked up. 8522 */ 8523 if (ev->ctl_failsafe_status != 0) 8524 ath12k_warn(ab, "pdev ctl failsafe failure status %d", 8525 ev->ctl_failsafe_status); 8526 8527 kfree(tb); 8528 } 8529 8530 static void 8531 ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab, 8532 const struct ath12k_wmi_pdev_csa_event *ev, 8533 const u32 *vdev_ids) 8534 { 8535 u32 current_switch_count = le32_to_cpu(ev->current_switch_count); 8536 u32 num_vdevs = le32_to_cpu(ev->num_vdevs); 8537 struct ieee80211_bss_conf *conf; 8538 struct ath12k_link_vif *arvif; 8539 struct ath12k_vif *ahvif; 8540 int i; 8541 8542 rcu_read_lock(); 8543 for (i = 0; i < num_vdevs; i++) { 8544 arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]); 8545 8546 if (!arvif) { 8547 ath12k_warn(ab, "Recvd csa status for unknown vdev %d", 8548 vdev_ids[i]); 8549 continue; 8550 } 8551 ahvif = arvif->ahvif; 8552 8553 if (arvif->link_id >= IEEE80211_MLD_MAX_NUM_LINKS) { 8554 ath12k_warn(ab, "Invalid CSA switch count even link id: %d\n", 8555 arvif->link_id); 8556 continue; 8557 } 8558 8559 conf = rcu_dereference(ahvif->vif->link_conf[arvif->link_id]); 8560 if (!conf) { 8561 ath12k_warn(ab, "unable to access bss link conf in process csa for vif %pM link %u\n", 8562 ahvif->vif->addr, arvif->link_id); 8563 continue; 8564 } 8565 8566 if (!arvif->is_up || !conf->csa_active) 8567 continue; 8568 8569 /* Finish CSA when counter reaches zero */ 8570 if (!current_switch_count) { 8571 ieee80211_csa_finish(ahvif->vif, arvif->link_id); 8572 arvif->current_cntdown_counter = 0; 8573 } else if (current_switch_count > 1) { 8574 /* If the count in event is not what we expect, don't update the 8575 * mac80211 count. Since during beacon Tx failure, count in the 8576 * firmware will not decrement and this event will come with the 8577 * previous count value again 8578 */ 8579 if (current_switch_count != arvif->current_cntdown_counter) 8580 continue; 8581 8582 arvif->current_cntdown_counter = 8583 ieee80211_beacon_update_cntdwn(ahvif->vif, 8584 arvif->link_id); 8585 } 8586 } 8587 rcu_read_unlock(); 8588 } 8589 8590 static void 8591 ath12k_wmi_pdev_csa_switch_count_status_event(struct ath12k_base *ab, 8592 struct sk_buff *skb) 8593 { 8594 const void **tb; 8595 const struct ath12k_wmi_pdev_csa_event *ev; 8596 const u32 *vdev_ids; 8597 int ret; 8598 8599 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8600 if (IS_ERR(tb)) { 8601 ret = PTR_ERR(tb); 8602 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 8603 return; 8604 } 8605 8606 ev = tb[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT]; 8607 vdev_ids = tb[WMI_TAG_ARRAY_UINT32]; 8608 8609 if (!ev || !vdev_ids) { 8610 ath12k_warn(ab, "failed to fetch pdev csa switch count ev"); 8611 kfree(tb); 8612 return; 8613 } 8614 8615 ath12k_dbg(ab, ATH12K_DBG_WMI, 8616 "pdev csa switch count %d for pdev %d, num_vdevs %d", 8617 ev->current_switch_count, ev->pdev_id, 8618 ev->num_vdevs); 8619 8620 ath12k_wmi_process_csa_switch_count_event(ab, ev, vdev_ids); 8621 8622 kfree(tb); 8623 } 8624 8625 static void 8626 ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff *skb) 8627 { 8628 const void **tb; 8629 struct ath12k_mac_get_any_chanctx_conf_arg arg; 8630 const struct ath12k_wmi_pdev_radar_event *ev; 8631 struct ath12k *ar; 8632 int ret; 8633 8634 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8635 if (IS_ERR(tb)) { 8636 ret = PTR_ERR(tb); 8637 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 8638 return; 8639 } 8640 8641 ev = tb[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT]; 8642 8643 if (!ev) { 8644 ath12k_warn(ab, "failed to fetch pdev dfs radar detected ev"); 8645 kfree(tb); 8646 return; 8647 } 8648 8649 ath12k_dbg(ab, ATH12K_DBG_WMI, 8650 "pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d", 8651 ev->pdev_id, ev->detection_mode, ev->chan_freq, ev->chan_width, 8652 ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp, 8653 ev->freq_offset, ev->sidx); 8654 8655 rcu_read_lock(); 8656 8657 ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id)); 8658 8659 if (!ar) { 8660 ath12k_warn(ab, "radar detected in invalid pdev %d\n", 8661 ev->pdev_id); 8662 goto exit; 8663 } 8664 8665 arg.ar = ar; 8666 arg.chanctx_conf = NULL; 8667 ieee80211_iter_chan_contexts_atomic(ath12k_ar_to_hw(ar), 8668 ath12k_mac_get_any_chanctx_conf_iter, &arg); 8669 if (!arg.chanctx_conf) { 8670 ath12k_warn(ab, "failed to find valid chanctx_conf in radar detected event\n"); 8671 goto exit; 8672 } 8673 8674 ath12k_dbg(ar->ab, ATH12K_DBG_REG, "DFS Radar Detected in pdev %d\n", 8675 ev->pdev_id); 8676 8677 if (ar->dfs_block_radar_events) 8678 ath12k_info(ab, "DFS Radar detected, but ignored as requested\n"); 8679 else 8680 ieee80211_radar_detected(ath12k_ar_to_hw(ar), arg.chanctx_conf); 8681 8682 exit: 8683 rcu_read_unlock(); 8684 8685 kfree(tb); 8686 } 8687 8688 static void ath12k_tm_wmi_event_segmented(struct ath12k_base *ab, u32 cmd_id, 8689 struct sk_buff *skb) 8690 { 8691 const struct ath12k_wmi_ftm_event *ev; 8692 const void **tb; 8693 int ret; 8694 u16 length; 8695 8696 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8697 8698 if (IS_ERR(tb)) { 8699 ret = PTR_ERR(tb); 8700 ath12k_warn(ab, "failed to parse ftm event tlv: %d\n", ret); 8701 return; 8702 } 8703 8704 ev = tb[WMI_TAG_ARRAY_BYTE]; 8705 if (!ev) { 8706 ath12k_warn(ab, "failed to fetch ftm msg\n"); 8707 kfree(tb); 8708 return; 8709 } 8710 8711 length = skb->len - TLV_HDR_SIZE; 8712 ath12k_tm_process_event(ab, cmd_id, ev, length); 8713 kfree(tb); 8714 tb = NULL; 8715 } 8716 8717 static void 8718 ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab, 8719 struct sk_buff *skb) 8720 { 8721 struct ath12k *ar; 8722 struct wmi_pdev_temperature_event ev = {}; 8723 8724 if (ath12k_pull_pdev_temp_ev(ab, skb, &ev) != 0) { 8725 ath12k_warn(ab, "failed to extract pdev temperature event"); 8726 return; 8727 } 8728 8729 ath12k_dbg(ab, ATH12K_DBG_WMI, 8730 "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id); 8731 8732 rcu_read_lock(); 8733 8734 ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev.pdev_id)); 8735 if (!ar) { 8736 ath12k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id); 8737 goto exit; 8738 } 8739 8740 exit: 8741 rcu_read_unlock(); 8742 } 8743 8744 static void ath12k_fils_discovery_event(struct ath12k_base *ab, 8745 struct sk_buff *skb) 8746 { 8747 const void **tb; 8748 const struct wmi_fils_discovery_event *ev; 8749 int ret; 8750 8751 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8752 if (IS_ERR(tb)) { 8753 ret = PTR_ERR(tb); 8754 ath12k_warn(ab, 8755 "failed to parse FILS discovery event tlv %d\n", 8756 ret); 8757 return; 8758 } 8759 8760 ev = tb[WMI_TAG_HOST_SWFDA_EVENT]; 8761 if (!ev) { 8762 ath12k_warn(ab, "failed to fetch FILS discovery event\n"); 8763 kfree(tb); 8764 return; 8765 } 8766 8767 ath12k_warn(ab, 8768 "FILS discovery frame expected from host for vdev_id: %u, transmission scheduled at %u, next TBTT: %u\n", 8769 ev->vdev_id, ev->fils_tt, ev->tbtt); 8770 8771 kfree(tb); 8772 } 8773 8774 static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab, 8775 struct sk_buff *skb) 8776 { 8777 const void **tb; 8778 const struct wmi_probe_resp_tx_status_event *ev; 8779 int ret; 8780 8781 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8782 if (IS_ERR(tb)) { 8783 ret = PTR_ERR(tb); 8784 ath12k_warn(ab, 8785 "failed to parse probe response transmission status event tlv: %d\n", 8786 ret); 8787 return; 8788 } 8789 8790 ev = tb[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT]; 8791 if (!ev) { 8792 ath12k_warn(ab, 8793 "failed to fetch probe response transmission status event"); 8794 kfree(tb); 8795 return; 8796 } 8797 8798 if (ev->tx_status) 8799 ath12k_warn(ab, 8800 "Probe response transmission failed for vdev_id %u, status %u\n", 8801 ev->vdev_id, ev->tx_status); 8802 8803 kfree(tb); 8804 } 8805 8806 static int ath12k_wmi_p2p_noa_event(struct ath12k_base *ab, 8807 struct sk_buff *skb) 8808 { 8809 const void **tb; 8810 const struct wmi_p2p_noa_event *ev; 8811 const struct ath12k_wmi_p2p_noa_info *noa; 8812 struct ath12k *ar; 8813 int ret, vdev_id; 8814 8815 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8816 if (IS_ERR(tb)) { 8817 ret = PTR_ERR(tb); 8818 ath12k_warn(ab, "failed to parse P2P NoA TLV: %d\n", ret); 8819 return ret; 8820 } 8821 8822 ev = tb[WMI_TAG_P2P_NOA_EVENT]; 8823 noa = tb[WMI_TAG_P2P_NOA_INFO]; 8824 8825 if (!ev || !noa) { 8826 ret = -EPROTO; 8827 goto out; 8828 } 8829 8830 vdev_id = __le32_to_cpu(ev->vdev_id); 8831 8832 ath12k_dbg(ab, ATH12K_DBG_WMI, 8833 "wmi tlv p2p noa vdev_id %i descriptors %u\n", 8834 vdev_id, le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_DESC_NUM)); 8835 8836 rcu_read_lock(); 8837 ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id); 8838 if (!ar) { 8839 ath12k_warn(ab, "invalid vdev id %d in P2P NoA event\n", 8840 vdev_id); 8841 ret = -EINVAL; 8842 goto unlock; 8843 } 8844 8845 ath12k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa); 8846 8847 ret = 0; 8848 8849 unlock: 8850 rcu_read_unlock(); 8851 out: 8852 kfree(tb); 8853 return ret; 8854 } 8855 8856 static void ath12k_rfkill_state_change_event(struct ath12k_base *ab, 8857 struct sk_buff *skb) 8858 { 8859 const struct wmi_rfkill_state_change_event *ev; 8860 const void **tb; 8861 int ret; 8862 8863 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8864 if (IS_ERR(tb)) { 8865 ret = PTR_ERR(tb); 8866 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 8867 return; 8868 } 8869 8870 ev = tb[WMI_TAG_RFKILL_EVENT]; 8871 if (!ev) { 8872 kfree(tb); 8873 return; 8874 } 8875 8876 ath12k_dbg(ab, ATH12K_DBG_MAC, 8877 "wmi tlv rfkill state change gpio %d type %d radio_state %d\n", 8878 le32_to_cpu(ev->gpio_pin_num), 8879 le32_to_cpu(ev->int_type), 8880 le32_to_cpu(ev->radio_state)); 8881 8882 spin_lock_bh(&ab->base_lock); 8883 ab->rfkill_radio_on = (ev->radio_state == cpu_to_le32(WMI_RFKILL_RADIO_STATE_ON)); 8884 spin_unlock_bh(&ab->base_lock); 8885 8886 queue_work(ab->workqueue, &ab->rfkill_work); 8887 kfree(tb); 8888 } 8889 8890 static void 8891 ath12k_wmi_diag_event(struct ath12k_base *ab, struct sk_buff *skb) 8892 { 8893 trace_ath12k_wmi_diag(ab, skb->data, skb->len); 8894 } 8895 8896 static void ath12k_wmi_twt_enable_event(struct ath12k_base *ab, 8897 struct sk_buff *skb) 8898 { 8899 const void **tb; 8900 const struct wmi_twt_enable_event *ev; 8901 int ret; 8902 8903 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8904 if (IS_ERR(tb)) { 8905 ret = PTR_ERR(tb); 8906 ath12k_warn(ab, "failed to parse wmi twt enable status event tlv: %d\n", 8907 ret); 8908 return; 8909 } 8910 8911 ev = tb[WMI_TAG_TWT_ENABLE_COMPLETE_EVENT]; 8912 if (!ev) { 8913 ath12k_warn(ab, "failed to fetch twt enable wmi event\n"); 8914 goto exit; 8915 } 8916 8917 ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt enable event pdev id %u status %u\n", 8918 le32_to_cpu(ev->pdev_id), 8919 le32_to_cpu(ev->status)); 8920 8921 exit: 8922 kfree(tb); 8923 } 8924 8925 static void ath12k_wmi_twt_disable_event(struct ath12k_base *ab, 8926 struct sk_buff *skb) 8927 { 8928 const void **tb; 8929 const struct wmi_twt_disable_event *ev; 8930 int ret; 8931 8932 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8933 if (IS_ERR(tb)) { 8934 ret = PTR_ERR(tb); 8935 ath12k_warn(ab, "failed to parse wmi twt disable status event tlv: %d\n", 8936 ret); 8937 return; 8938 } 8939 8940 ev = tb[WMI_TAG_TWT_DISABLE_COMPLETE_EVENT]; 8941 if (!ev) { 8942 ath12k_warn(ab, "failed to fetch twt disable wmi event\n"); 8943 goto exit; 8944 } 8945 8946 ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt disable event pdev id %d status %u\n", 8947 le32_to_cpu(ev->pdev_id), 8948 le32_to_cpu(ev->status)); 8949 8950 exit: 8951 kfree(tb); 8952 } 8953 8954 static int ath12k_wmi_wow_wakeup_host_parse(struct ath12k_base *ab, 8955 u16 tag, u16 len, 8956 const void *ptr, void *data) 8957 { 8958 const struct wmi_wow_ev_pg_fault_param *pf_param; 8959 const struct wmi_wow_ev_param *param; 8960 struct wmi_wow_ev_arg *arg = data; 8961 int pf_len; 8962 8963 switch (tag) { 8964 case WMI_TAG_WOW_EVENT_INFO: 8965 param = ptr; 8966 arg->wake_reason = le32_to_cpu(param->wake_reason); 8967 ath12k_dbg(ab, ATH12K_DBG_WMI, "wow wakeup host reason %d %s\n", 8968 arg->wake_reason, wow_reason(arg->wake_reason)); 8969 break; 8970 8971 case WMI_TAG_ARRAY_BYTE: 8972 if (arg && arg->wake_reason == WOW_REASON_PAGE_FAULT) { 8973 pf_param = ptr; 8974 pf_len = le32_to_cpu(pf_param->len); 8975 if (pf_len > len - sizeof(pf_len) || 8976 pf_len < 0) { 8977 ath12k_warn(ab, "invalid wo reason page fault buffer len %d\n", 8978 pf_len); 8979 return -EINVAL; 8980 } 8981 ath12k_dbg(ab, ATH12K_DBG_WMI, "wow_reason_page_fault len %d\n", 8982 pf_len); 8983 ath12k_dbg_dump(ab, ATH12K_DBG_WMI, 8984 "wow_reason_page_fault packet present", 8985 "wow_pg_fault ", 8986 pf_param->data, 8987 pf_len); 8988 } 8989 break; 8990 default: 8991 break; 8992 } 8993 8994 return 0; 8995 } 8996 8997 static void ath12k_wmi_event_wow_wakeup_host(struct ath12k_base *ab, struct sk_buff *skb) 8998 { 8999 struct wmi_wow_ev_arg arg = { }; 9000 int ret; 9001 9002 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 9003 ath12k_wmi_wow_wakeup_host_parse, 9004 &arg); 9005 if (ret) { 9006 ath12k_warn(ab, "failed to parse wmi wow wakeup host event tlv: %d\n", 9007 ret); 9008 return; 9009 } 9010 9011 complete(&ab->wow.wakeup_completed); 9012 } 9013 9014 static void ath12k_wmi_gtk_offload_status_event(struct ath12k_base *ab, 9015 struct sk_buff *skb) 9016 { 9017 const struct wmi_gtk_offload_status_event *ev; 9018 struct ath12k_link_vif *arvif; 9019 __be64 replay_ctr_be; 9020 u64 replay_ctr; 9021 const void **tb; 9022 int ret; 9023 9024 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 9025 if (IS_ERR(tb)) { 9026 ret = PTR_ERR(tb); 9027 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 9028 return; 9029 } 9030 9031 ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT]; 9032 if (!ev) { 9033 ath12k_warn(ab, "failed to fetch gtk offload status ev"); 9034 kfree(tb); 9035 return; 9036 } 9037 9038 rcu_read_lock(); 9039 arvif = ath12k_mac_get_arvif_by_vdev_id(ab, le32_to_cpu(ev->vdev_id)); 9040 if (!arvif) { 9041 rcu_read_unlock(); 9042 ath12k_warn(ab, "failed to get arvif for vdev_id:%d\n", 9043 le32_to_cpu(ev->vdev_id)); 9044 kfree(tb); 9045 return; 9046 } 9047 9048 replay_ctr = le64_to_cpu(ev->replay_ctr); 9049 arvif->rekey_data.replay_ctr = replay_ctr; 9050 ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi gtk offload event refresh_cnt %d replay_ctr %llu\n", 9051 le32_to_cpu(ev->refresh_cnt), replay_ctr); 9052 9053 /* supplicant expects big-endian replay counter */ 9054 replay_ctr_be = cpu_to_be64(replay_ctr); 9055 9056 ieee80211_gtk_rekey_notify(arvif->ahvif->vif, arvif->bssid, 9057 (void *)&replay_ctr_be, GFP_ATOMIC); 9058 9059 rcu_read_unlock(); 9060 9061 kfree(tb); 9062 } 9063 9064 static void ath12k_wmi_event_mlo_setup_complete(struct ath12k_base *ab, 9065 struct sk_buff *skb) 9066 { 9067 const struct wmi_mlo_setup_complete_event *ev; 9068 struct ath12k *ar = NULL; 9069 struct ath12k_pdev *pdev; 9070 const void **tb; 9071 int ret, i; 9072 9073 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 9074 if (IS_ERR(tb)) { 9075 ret = PTR_ERR(tb); 9076 ath12k_warn(ab, "failed to parse mlo setup complete event tlv: %d\n", 9077 ret); 9078 return; 9079 } 9080 9081 ev = tb[WMI_TAG_MLO_SETUP_COMPLETE_EVENT]; 9082 if (!ev) { 9083 ath12k_warn(ab, "failed to fetch mlo setup complete event\n"); 9084 kfree(tb); 9085 return; 9086 } 9087 9088 if (le32_to_cpu(ev->pdev_id) > ab->num_radios) 9089 goto skip_lookup; 9090 9091 for (i = 0; i < ab->num_radios; i++) { 9092 pdev = &ab->pdevs[i]; 9093 if (pdev && pdev->pdev_id == le32_to_cpu(ev->pdev_id)) { 9094 ar = pdev->ar; 9095 break; 9096 } 9097 } 9098 9099 skip_lookup: 9100 if (!ar) { 9101 ath12k_warn(ab, "invalid pdev_id %d status %u in setup complete event\n", 9102 ev->pdev_id, ev->status); 9103 goto out; 9104 } 9105 9106 ar->mlo_setup_status = le32_to_cpu(ev->status); 9107 complete(&ar->mlo_setup_done); 9108 9109 out: 9110 kfree(tb); 9111 } 9112 9113 static void ath12k_wmi_event_teardown_complete(struct ath12k_base *ab, 9114 struct sk_buff *skb) 9115 { 9116 const struct wmi_mlo_teardown_complete_event *ev; 9117 const void **tb; 9118 int ret; 9119 9120 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 9121 if (IS_ERR(tb)) { 9122 ret = PTR_ERR(tb); 9123 ath12k_warn(ab, "failed to parse teardown complete event tlv: %d\n", ret); 9124 return; 9125 } 9126 9127 ev = tb[WMI_TAG_MLO_TEARDOWN_COMPLETE]; 9128 if (!ev) { 9129 ath12k_warn(ab, "failed to fetch teardown complete event\n"); 9130 kfree(tb); 9131 return; 9132 } 9133 9134 kfree(tb); 9135 } 9136 9137 #ifdef CONFIG_ATH12K_DEBUGFS 9138 static int ath12k_wmi_tpc_stats_copy_buffer(struct ath12k_base *ab, 9139 const void *ptr, u16 tag, u16 len, 9140 struct wmi_tpc_stats_arg *tpc_stats) 9141 { 9142 u32 len1, len2, len3, len4; 9143 s16 *dst_ptr; 9144 s8 *dst_ptr_ctl; 9145 9146 len1 = le32_to_cpu(tpc_stats->max_reg_allowed_power.tpc_reg_pwr.reg_array_len); 9147 len2 = le32_to_cpu(tpc_stats->rates_array1.tpc_rates_array.rate_array_len); 9148 len3 = le32_to_cpu(tpc_stats->rates_array2.tpc_rates_array.rate_array_len); 9149 len4 = le32_to_cpu(tpc_stats->ctl_array.tpc_ctl_pwr.ctl_array_len); 9150 9151 switch (tpc_stats->event_count) { 9152 case ATH12K_TPC_STATS_CONFIG_REG_PWR_EVENT: 9153 if (len1 > len) 9154 return -ENOBUFS; 9155 9156 if (tpc_stats->tlvs_rcvd & WMI_TPC_REG_PWR_ALLOWED) { 9157 dst_ptr = tpc_stats->max_reg_allowed_power.reg_pwr_array; 9158 memcpy(dst_ptr, ptr, len1); 9159 } 9160 break; 9161 case ATH12K_TPC_STATS_RATES_EVENT1: 9162 if (len2 > len) 9163 return -ENOBUFS; 9164 9165 if (tpc_stats->tlvs_rcvd & WMI_TPC_RATES_ARRAY1) { 9166 dst_ptr = tpc_stats->rates_array1.rate_array; 9167 memcpy(dst_ptr, ptr, len2); 9168 } 9169 break; 9170 case ATH12K_TPC_STATS_RATES_EVENT2: 9171 if (len3 > len) 9172 return -ENOBUFS; 9173 9174 if (tpc_stats->tlvs_rcvd & WMI_TPC_RATES_ARRAY2) { 9175 dst_ptr = tpc_stats->rates_array2.rate_array; 9176 memcpy(dst_ptr, ptr, len3); 9177 } 9178 break; 9179 case ATH12K_TPC_STATS_CTL_TABLE_EVENT: 9180 if (len4 > len) 9181 return -ENOBUFS; 9182 9183 if (tpc_stats->tlvs_rcvd & WMI_TPC_CTL_PWR_ARRAY) { 9184 dst_ptr_ctl = tpc_stats->ctl_array.ctl_pwr_table; 9185 memcpy(dst_ptr_ctl, ptr, len4); 9186 } 9187 break; 9188 } 9189 return 0; 9190 } 9191 9192 static int ath12k_tpc_get_reg_pwr(struct ath12k_base *ab, 9193 struct wmi_tpc_stats_arg *tpc_stats, 9194 struct wmi_max_reg_power_fixed_params *ev) 9195 { 9196 struct wmi_max_reg_power_allowed_arg *reg_pwr; 9197 u32 total_size; 9198 9199 ath12k_dbg(ab, ATH12K_DBG_WMI, 9200 "Received reg power array type %d length %d for tpc stats\n", 9201 ev->reg_power_type, ev->reg_array_len); 9202 9203 switch (le32_to_cpu(ev->reg_power_type)) { 9204 case TPC_STATS_REG_PWR_ALLOWED_TYPE: 9205 reg_pwr = &tpc_stats->max_reg_allowed_power; 9206 break; 9207 default: 9208 return -EINVAL; 9209 } 9210 9211 /* Each entry is 2 byte hence multiplying the indices with 2 */ 9212 total_size = le32_to_cpu(ev->d1) * le32_to_cpu(ev->d2) * 9213 le32_to_cpu(ev->d3) * le32_to_cpu(ev->d4) * 2; 9214 if (le32_to_cpu(ev->reg_array_len) != total_size) { 9215 ath12k_warn(ab, 9216 "Total size and reg_array_len doesn't match for tpc stats\n"); 9217 return -EINVAL; 9218 } 9219 9220 memcpy(®_pwr->tpc_reg_pwr, ev, sizeof(struct wmi_max_reg_power_fixed_params)); 9221 9222 reg_pwr->reg_pwr_array = kzalloc(le32_to_cpu(reg_pwr->tpc_reg_pwr.reg_array_len), 9223 GFP_ATOMIC); 9224 if (!reg_pwr->reg_pwr_array) 9225 return -ENOMEM; 9226 9227 tpc_stats->tlvs_rcvd |= WMI_TPC_REG_PWR_ALLOWED; 9228 9229 return 0; 9230 } 9231 9232 static int ath12k_tpc_get_rate_array(struct ath12k_base *ab, 9233 struct wmi_tpc_stats_arg *tpc_stats, 9234 struct wmi_tpc_rates_array_fixed_params *ev) 9235 { 9236 struct wmi_tpc_rates_array_arg *rates_array; 9237 u32 flag = 0, rate_array_len; 9238 9239 ath12k_dbg(ab, ATH12K_DBG_WMI, 9240 "Received rates array type %d length %d for tpc stats\n", 9241 ev->rate_array_type, ev->rate_array_len); 9242 9243 switch (le32_to_cpu(ev->rate_array_type)) { 9244 case ATH12K_TPC_STATS_RATES_ARRAY1: 9245 rates_array = &tpc_stats->rates_array1; 9246 flag = WMI_TPC_RATES_ARRAY1; 9247 break; 9248 case ATH12K_TPC_STATS_RATES_ARRAY2: 9249 rates_array = &tpc_stats->rates_array2; 9250 flag = WMI_TPC_RATES_ARRAY2; 9251 break; 9252 default: 9253 ath12k_warn(ab, 9254 "Received invalid type of rates array for tpc stats\n"); 9255 return -EINVAL; 9256 } 9257 memcpy(&rates_array->tpc_rates_array, ev, 9258 sizeof(struct wmi_tpc_rates_array_fixed_params)); 9259 rate_array_len = le32_to_cpu(rates_array->tpc_rates_array.rate_array_len); 9260 rates_array->rate_array = kzalloc(rate_array_len, GFP_ATOMIC); 9261 if (!rates_array->rate_array) 9262 return -ENOMEM; 9263 9264 tpc_stats->tlvs_rcvd |= flag; 9265 return 0; 9266 } 9267 9268 static int ath12k_tpc_get_ctl_pwr_tbl(struct ath12k_base *ab, 9269 struct wmi_tpc_stats_arg *tpc_stats, 9270 struct wmi_tpc_ctl_pwr_fixed_params *ev) 9271 { 9272 struct wmi_tpc_ctl_pwr_table_arg *ctl_array; 9273 u32 total_size, ctl_array_len, flag = 0; 9274 9275 ath12k_dbg(ab, ATH12K_DBG_WMI, 9276 "Received ctl array type %d length %d for tpc stats\n", 9277 ev->ctl_array_type, ev->ctl_array_len); 9278 9279 switch (le32_to_cpu(ev->ctl_array_type)) { 9280 case ATH12K_TPC_STATS_CTL_ARRAY: 9281 ctl_array = &tpc_stats->ctl_array; 9282 flag = WMI_TPC_CTL_PWR_ARRAY; 9283 break; 9284 default: 9285 ath12k_warn(ab, 9286 "Received invalid type of ctl pwr table for tpc stats\n"); 9287 return -EINVAL; 9288 } 9289 9290 total_size = le32_to_cpu(ev->d1) * le32_to_cpu(ev->d2) * 9291 le32_to_cpu(ev->d3) * le32_to_cpu(ev->d4); 9292 if (le32_to_cpu(ev->ctl_array_len) != total_size) { 9293 ath12k_warn(ab, 9294 "Total size and ctl_array_len doesn't match for tpc stats\n"); 9295 return -EINVAL; 9296 } 9297 9298 memcpy(&ctl_array->tpc_ctl_pwr, ev, sizeof(struct wmi_tpc_ctl_pwr_fixed_params)); 9299 ctl_array_len = le32_to_cpu(ctl_array->tpc_ctl_pwr.ctl_array_len); 9300 ctl_array->ctl_pwr_table = kzalloc(ctl_array_len, GFP_ATOMIC); 9301 if (!ctl_array->ctl_pwr_table) 9302 return -ENOMEM; 9303 9304 tpc_stats->tlvs_rcvd |= flag; 9305 return 0; 9306 } 9307 9308 static int ath12k_wmi_tpc_stats_subtlv_parser(struct ath12k_base *ab, 9309 u16 tag, u16 len, 9310 const void *ptr, void *data) 9311 { 9312 struct wmi_tpc_rates_array_fixed_params *tpc_rates_array; 9313 struct wmi_max_reg_power_fixed_params *tpc_reg_pwr; 9314 struct wmi_tpc_ctl_pwr_fixed_params *tpc_ctl_pwr; 9315 struct wmi_tpc_stats_arg *tpc_stats = data; 9316 struct wmi_tpc_config_params *tpc_config; 9317 int ret = 0; 9318 9319 if (!tpc_stats) { 9320 ath12k_warn(ab, "tpc stats memory unavailable\n"); 9321 return -EINVAL; 9322 } 9323 9324 switch (tag) { 9325 case WMI_TAG_TPC_STATS_CONFIG_EVENT: 9326 tpc_config = (struct wmi_tpc_config_params *)ptr; 9327 memcpy(&tpc_stats->tpc_config, tpc_config, 9328 sizeof(struct wmi_tpc_config_params)); 9329 break; 9330 case WMI_TAG_TPC_STATS_REG_PWR_ALLOWED: 9331 tpc_reg_pwr = (struct wmi_max_reg_power_fixed_params *)ptr; 9332 ret = ath12k_tpc_get_reg_pwr(ab, tpc_stats, tpc_reg_pwr); 9333 break; 9334 case WMI_TAG_TPC_STATS_RATES_ARRAY: 9335 tpc_rates_array = (struct wmi_tpc_rates_array_fixed_params *)ptr; 9336 ret = ath12k_tpc_get_rate_array(ab, tpc_stats, tpc_rates_array); 9337 break; 9338 case WMI_TAG_TPC_STATS_CTL_PWR_TABLE_EVENT: 9339 tpc_ctl_pwr = (struct wmi_tpc_ctl_pwr_fixed_params *)ptr; 9340 ret = ath12k_tpc_get_ctl_pwr_tbl(ab, tpc_stats, tpc_ctl_pwr); 9341 break; 9342 default: 9343 ath12k_warn(ab, 9344 "Received invalid tag for tpc stats in subtlvs\n"); 9345 return -EINVAL; 9346 } 9347 return ret; 9348 } 9349 9350 static int ath12k_wmi_tpc_stats_event_parser(struct ath12k_base *ab, 9351 u16 tag, u16 len, 9352 const void *ptr, void *data) 9353 { 9354 struct wmi_tpc_stats_arg *tpc_stats = (struct wmi_tpc_stats_arg *)data; 9355 int ret; 9356 9357 switch (tag) { 9358 case WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM: 9359 ret = 0; 9360 /* Fixed param is already processed*/ 9361 break; 9362 case WMI_TAG_ARRAY_STRUCT: 9363 /* len 0 is expected for array of struct when there 9364 * is no content of that type to pack inside that tlv 9365 */ 9366 if (len == 0) 9367 return 0; 9368 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 9369 ath12k_wmi_tpc_stats_subtlv_parser, 9370 tpc_stats); 9371 break; 9372 case WMI_TAG_ARRAY_INT16: 9373 if (len == 0) 9374 return 0; 9375 ret = ath12k_wmi_tpc_stats_copy_buffer(ab, ptr, 9376 WMI_TAG_ARRAY_INT16, 9377 len, tpc_stats); 9378 break; 9379 case WMI_TAG_ARRAY_BYTE: 9380 if (len == 0) 9381 return 0; 9382 ret = ath12k_wmi_tpc_stats_copy_buffer(ab, ptr, 9383 WMI_TAG_ARRAY_BYTE, 9384 len, tpc_stats); 9385 break; 9386 default: 9387 ath12k_warn(ab, "Received invalid tag for tpc stats\n"); 9388 ret = -EINVAL; 9389 break; 9390 } 9391 return ret; 9392 } 9393 9394 void ath12k_wmi_free_tpc_stats_mem(struct ath12k *ar) 9395 { 9396 struct wmi_tpc_stats_arg *tpc_stats = ar->debug.tpc_stats; 9397 9398 lockdep_assert_held(&ar->data_lock); 9399 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "tpc stats mem free\n"); 9400 if (tpc_stats) { 9401 kfree(tpc_stats->max_reg_allowed_power.reg_pwr_array); 9402 kfree(tpc_stats->rates_array1.rate_array); 9403 kfree(tpc_stats->rates_array2.rate_array); 9404 kfree(tpc_stats->ctl_array.ctl_pwr_table); 9405 kfree(tpc_stats); 9406 ar->debug.tpc_stats = NULL; 9407 } 9408 } 9409 9410 static void ath12k_wmi_process_tpc_stats(struct ath12k_base *ab, 9411 struct sk_buff *skb) 9412 { 9413 struct ath12k_wmi_pdev_tpc_stats_event_fixed_params *fixed_param; 9414 struct wmi_tpc_stats_arg *tpc_stats; 9415 const struct wmi_tlv *tlv; 9416 void *ptr = skb->data; 9417 struct ath12k *ar; 9418 u16 tlv_tag; 9419 u32 event_count; 9420 int ret; 9421 9422 if (!skb->data) { 9423 ath12k_warn(ab, "No data present in tpc stats event\n"); 9424 return; 9425 } 9426 9427 if (skb->len < (sizeof(*fixed_param) + TLV_HDR_SIZE)) { 9428 ath12k_warn(ab, "TPC stats event size invalid\n"); 9429 return; 9430 } 9431 9432 tlv = (struct wmi_tlv *)ptr; 9433 tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG); 9434 ptr += sizeof(*tlv); 9435 9436 if (tlv_tag != WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM) { 9437 ath12k_warn(ab, "TPC stats without fixed param tlv at start\n"); 9438 return; 9439 } 9440 9441 fixed_param = (struct ath12k_wmi_pdev_tpc_stats_event_fixed_params *)ptr; 9442 rcu_read_lock(); 9443 ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(fixed_param->pdev_id) + 1); 9444 if (!ar) { 9445 ath12k_warn(ab, "Failed to get ar for tpc stats\n"); 9446 rcu_read_unlock(); 9447 return; 9448 } 9449 spin_lock_bh(&ar->data_lock); 9450 if (!ar->debug.tpc_request) { 9451 /* Event is received either without request or the 9452 * timeout, if memory is already allocated free it 9453 */ 9454 if (ar->debug.tpc_stats) { 9455 ath12k_warn(ab, "Freeing memory for tpc_stats\n"); 9456 ath12k_wmi_free_tpc_stats_mem(ar); 9457 } 9458 goto unlock; 9459 } 9460 9461 event_count = le32_to_cpu(fixed_param->event_count); 9462 if (event_count == 0) { 9463 if (ar->debug.tpc_stats) { 9464 ath12k_warn(ab, 9465 "Invalid tpc memory present\n"); 9466 goto unlock; 9467 } 9468 ar->debug.tpc_stats = 9469 kzalloc(sizeof(struct wmi_tpc_stats_arg), 9470 GFP_ATOMIC); 9471 if (!ar->debug.tpc_stats) { 9472 ath12k_warn(ab, 9473 "Failed to allocate memory for tpc stats\n"); 9474 goto unlock; 9475 } 9476 } 9477 9478 tpc_stats = ar->debug.tpc_stats; 9479 if (!tpc_stats) { 9480 ath12k_warn(ab, "tpc stats memory unavailable\n"); 9481 goto unlock; 9482 } 9483 9484 if (!(event_count == 0)) { 9485 if (event_count != tpc_stats->event_count + 1) { 9486 ath12k_warn(ab, 9487 "Invalid tpc event received\n"); 9488 goto unlock; 9489 } 9490 } 9491 tpc_stats->pdev_id = le32_to_cpu(fixed_param->pdev_id); 9492 tpc_stats->end_of_event = le32_to_cpu(fixed_param->end_of_event); 9493 tpc_stats->event_count = le32_to_cpu(fixed_param->event_count); 9494 ath12k_dbg(ab, ATH12K_DBG_WMI, 9495 "tpc stats event_count %d\n", 9496 tpc_stats->event_count); 9497 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 9498 ath12k_wmi_tpc_stats_event_parser, 9499 tpc_stats); 9500 if (ret) { 9501 ath12k_wmi_free_tpc_stats_mem(ar); 9502 ath12k_warn(ab, "failed to parse tpc_stats tlv: %d\n", ret); 9503 goto unlock; 9504 } 9505 9506 if (tpc_stats->end_of_event) 9507 complete(&ar->debug.tpc_complete); 9508 9509 unlock: 9510 spin_unlock_bh(&ar->data_lock); 9511 rcu_read_unlock(); 9512 } 9513 #else 9514 static void ath12k_wmi_process_tpc_stats(struct ath12k_base *ab, 9515 struct sk_buff *skb) 9516 { 9517 } 9518 #endif 9519 9520 static int 9521 ath12k_wmi_rssi_dbm_conv_info_evt_subtlv_parser(struct ath12k_base *ab, 9522 u16 tag, u16 len, 9523 const void *ptr, void *data) 9524 { 9525 const struct ath12k_wmi_rssi_dbm_conv_temp_info_params *temp_info; 9526 const struct ath12k_wmi_rssi_dbm_conv_info_params *param_info; 9527 struct ath12k_wmi_rssi_dbm_conv_info_arg *rssi_info = data; 9528 struct ath12k_wmi_rssi_dbm_conv_param_arg param_arg; 9529 s32 nf_hw_dbm[ATH12K_MAX_NUM_NF_HW_DBM]; 9530 u8 num_20mhz_segments; 9531 s8 min_nf, *nf_ptr; 9532 int i, j; 9533 9534 switch (tag) { 9535 case WMI_TAG_RSSI_DBM_CONVERSION_PARAMS_INFO: 9536 if (len < sizeof(*param_info)) { 9537 ath12k_warn(ab, 9538 "RSSI dbm conv subtlv 0x%x invalid len %d rcvd", 9539 tag, len); 9540 return -EINVAL; 9541 } 9542 9543 param_info = ptr; 9544 9545 param_arg.curr_bw = le32_to_cpu(param_info->curr_bw); 9546 param_arg.curr_rx_chainmask = le32_to_cpu(param_info->curr_rx_chainmask); 9547 9548 /* The received array is actually a 2D byte-array for per chain, 9549 * per 20MHz subband. Convert to 2D byte-array 9550 */ 9551 nf_ptr = ¶m_arg.nf_hw_dbm[0][0]; 9552 9553 for (i = 0; i < ATH12K_MAX_NUM_NF_HW_DBM; i++) { 9554 nf_hw_dbm[i] = a_sle32_to_cpu(param_info->nf_hw_dbm[i]); 9555 9556 for (j = 0; j < 4; j++) { 9557 *nf_ptr = (nf_hw_dbm[i] >> (j * 8)) & 0xFF; 9558 nf_ptr++; 9559 } 9560 } 9561 9562 switch (param_arg.curr_bw) { 9563 case WMI_CHAN_WIDTH_20: 9564 num_20mhz_segments = 1; 9565 break; 9566 case WMI_CHAN_WIDTH_40: 9567 num_20mhz_segments = 2; 9568 break; 9569 case WMI_CHAN_WIDTH_80: 9570 num_20mhz_segments = 4; 9571 break; 9572 case WMI_CHAN_WIDTH_160: 9573 num_20mhz_segments = 8; 9574 break; 9575 case WMI_CHAN_WIDTH_320: 9576 num_20mhz_segments = 16; 9577 break; 9578 default: 9579 ath12k_warn(ab, "Invalid current bandwidth %d in RSSI dbm event", 9580 param_arg.curr_bw); 9581 /* In error case, still consider the primary 20 MHz segment since 9582 * that would be much better than instead of dropping the whole 9583 * event 9584 */ 9585 num_20mhz_segments = 1; 9586 } 9587 9588 min_nf = ATH12K_DEFAULT_NOISE_FLOOR; 9589 9590 for (i = 0; i < ATH12K_MAX_NUM_ANTENNA; i++) { 9591 if (!(param_arg.curr_rx_chainmask & BIT(i))) 9592 continue; 9593 9594 for (j = 0; j < num_20mhz_segments; j++) { 9595 if (param_arg.nf_hw_dbm[i][j] < min_nf) 9596 min_nf = param_arg.nf_hw_dbm[i][j]; 9597 } 9598 } 9599 9600 rssi_info->min_nf_dbm = min_nf; 9601 rssi_info->nf_dbm_present = true; 9602 break; 9603 case WMI_TAG_RSSI_DBM_CONVERSION_TEMP_OFFSET_INFO: 9604 if (len < sizeof(*temp_info)) { 9605 ath12k_warn(ab, 9606 "RSSI dbm conv subtlv 0x%x invalid len %d rcvd", 9607 tag, len); 9608 return -EINVAL; 9609 } 9610 9611 temp_info = ptr; 9612 rssi_info->temp_offset = a_sle32_to_cpu(temp_info->offset); 9613 rssi_info->temp_offset_present = true; 9614 break; 9615 default: 9616 ath12k_dbg(ab, ATH12K_DBG_WMI, 9617 "Unknown subtlv 0x%x in RSSI dbm conversion event\n", tag); 9618 } 9619 9620 return 0; 9621 } 9622 9623 static int 9624 ath12k_wmi_rssi_dbm_conv_info_event_parser(struct ath12k_base *ab, 9625 u16 tag, u16 len, 9626 const void *ptr, void *data) 9627 { 9628 int ret = 0; 9629 9630 switch (tag) { 9631 case WMI_TAG_RSSI_DBM_CONVERSION_PARAMS_INFO_FIXED_PARAM: 9632 /* Fixed param is already processed*/ 9633 break; 9634 case WMI_TAG_ARRAY_STRUCT: 9635 /* len 0 is expected for array of struct when there 9636 * is no content of that type inside that tlv 9637 */ 9638 if (len == 0) 9639 return 0; 9640 9641 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 9642 ath12k_wmi_rssi_dbm_conv_info_evt_subtlv_parser, 9643 data); 9644 break; 9645 default: 9646 ath12k_dbg(ab, ATH12K_DBG_WMI, 9647 "Received invalid tag 0x%x for RSSI dbm conv info event\n", 9648 tag); 9649 break; 9650 } 9651 9652 return ret; 9653 } 9654 9655 static int 9656 ath12k_wmi_rssi_dbm_conv_info_process_fixed_param(struct ath12k_base *ab, u8 *ptr, 9657 size_t len, int *pdev_id) 9658 { 9659 struct ath12k_wmi_rssi_dbm_conv_info_fixed_params *fixed_param; 9660 const struct wmi_tlv *tlv; 9661 u16 tlv_tag; 9662 9663 if (len < (sizeof(*fixed_param) + TLV_HDR_SIZE)) { 9664 ath12k_warn(ab, "invalid RSSI dbm conv event size %zu\n", len); 9665 return -EINVAL; 9666 } 9667 9668 tlv = (struct wmi_tlv *)ptr; 9669 tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG); 9670 ptr += sizeof(*tlv); 9671 9672 if (tlv_tag != WMI_TAG_RSSI_DBM_CONVERSION_PARAMS_INFO_FIXED_PARAM) { 9673 ath12k_warn(ab, "RSSI dbm conv event received without fixed param tlv\n"); 9674 return -EINVAL; 9675 } 9676 9677 fixed_param = (struct ath12k_wmi_rssi_dbm_conv_info_fixed_params *)ptr; 9678 *pdev_id = le32_to_cpu(fixed_param->pdev_id); 9679 9680 return 0; 9681 } 9682 9683 static void 9684 ath12k_wmi_update_rssi_offsets(struct ath12k *ar, 9685 struct ath12k_wmi_rssi_dbm_conv_info_arg *rssi_info) 9686 { 9687 struct ath12k_pdev_rssi_offsets *info = &ar->rssi_info; 9688 9689 lockdep_assert_held(&ar->data_lock); 9690 9691 if (rssi_info->temp_offset_present) 9692 info->temp_offset = rssi_info->temp_offset; 9693 9694 if (rssi_info->nf_dbm_present) 9695 info->min_nf_dbm = rssi_info->min_nf_dbm; 9696 9697 info->noise_floor = info->min_nf_dbm + info->temp_offset; 9698 } 9699 9700 static void 9701 ath12k_wmi_rssi_dbm_conversion_params_info_event(struct ath12k_base *ab, 9702 struct sk_buff *skb) 9703 { 9704 struct ath12k_wmi_rssi_dbm_conv_info_arg rssi_info; 9705 struct ath12k *ar; 9706 s32 noise_floor; 9707 u32 pdev_id; 9708 int ret; 9709 9710 ret = ath12k_wmi_rssi_dbm_conv_info_process_fixed_param(ab, skb->data, skb->len, 9711 &pdev_id); 9712 if (ret) { 9713 ath12k_warn(ab, "failed to parse fixed param in RSSI dbm conv event: %d\n", 9714 ret); 9715 return; 9716 } 9717 9718 rcu_read_lock(); 9719 ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id); 9720 /* If pdev is not active, ignore the event */ 9721 if (!ar) 9722 goto out_unlock; 9723 9724 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 9725 ath12k_wmi_rssi_dbm_conv_info_event_parser, 9726 &rssi_info); 9727 if (ret) { 9728 ath12k_warn(ab, "unable to parse RSSI dbm conversion event\n"); 9729 goto out_unlock; 9730 } 9731 9732 spin_lock_bh(&ar->data_lock); 9733 ath12k_wmi_update_rssi_offsets(ar, &rssi_info); 9734 noise_floor = ath12k_pdev_get_noise_floor(ar); 9735 spin_unlock_bh(&ar->data_lock); 9736 9737 ath12k_dbg(ab, ATH12K_DBG_WMI, 9738 "RSSI noise floor updated, new value is %d dbm\n", noise_floor); 9739 out_unlock: 9740 rcu_read_unlock(); 9741 } 9742 9743 static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb) 9744 { 9745 struct wmi_cmd_hdr *cmd_hdr; 9746 enum wmi_tlv_event_id id; 9747 9748 cmd_hdr = (struct wmi_cmd_hdr *)skb->data; 9749 id = le32_get_bits(cmd_hdr->cmd_id, WMI_CMD_HDR_CMD_ID); 9750 9751 if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr))) 9752 goto out; 9753 9754 switch (id) { 9755 /* Process all the WMI events here */ 9756 case WMI_SERVICE_READY_EVENTID: 9757 ath12k_service_ready_event(ab, skb); 9758 break; 9759 case WMI_SERVICE_READY_EXT_EVENTID: 9760 ath12k_service_ready_ext_event(ab, skb); 9761 break; 9762 case WMI_SERVICE_READY_EXT2_EVENTID: 9763 ath12k_service_ready_ext2_event(ab, skb); 9764 break; 9765 case WMI_REG_CHAN_LIST_CC_EXT_EVENTID: 9766 ath12k_reg_chan_list_event(ab, skb); 9767 break; 9768 case WMI_READY_EVENTID: 9769 ath12k_ready_event(ab, skb); 9770 break; 9771 case WMI_PEER_DELETE_RESP_EVENTID: 9772 ath12k_peer_delete_resp_event(ab, skb); 9773 break; 9774 case WMI_VDEV_START_RESP_EVENTID: 9775 ath12k_vdev_start_resp_event(ab, skb); 9776 break; 9777 case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID: 9778 ath12k_bcn_tx_status_event(ab, skb); 9779 break; 9780 case WMI_VDEV_STOPPED_EVENTID: 9781 ath12k_vdev_stopped_event(ab, skb); 9782 break; 9783 case WMI_MGMT_RX_EVENTID: 9784 ath12k_mgmt_rx_event(ab, skb); 9785 /* mgmt_rx_event() owns the skb now! */ 9786 return; 9787 case WMI_MGMT_TX_COMPLETION_EVENTID: 9788 ath12k_mgmt_tx_compl_event(ab, skb); 9789 break; 9790 case WMI_SCAN_EVENTID: 9791 ath12k_scan_event(ab, skb); 9792 break; 9793 case WMI_PEER_STA_KICKOUT_EVENTID: 9794 ath12k_peer_sta_kickout_event(ab, skb); 9795 break; 9796 case WMI_ROAM_EVENTID: 9797 ath12k_roam_event(ab, skb); 9798 break; 9799 case WMI_CHAN_INFO_EVENTID: 9800 ath12k_chan_info_event(ab, skb); 9801 break; 9802 case WMI_PDEV_BSS_CHAN_INFO_EVENTID: 9803 ath12k_pdev_bss_chan_info_event(ab, skb); 9804 break; 9805 case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID: 9806 ath12k_vdev_install_key_compl_event(ab, skb); 9807 break; 9808 case WMI_SERVICE_AVAILABLE_EVENTID: 9809 ath12k_service_available_event(ab, skb); 9810 break; 9811 case WMI_PEER_ASSOC_CONF_EVENTID: 9812 ath12k_peer_assoc_conf_event(ab, skb); 9813 break; 9814 case WMI_UPDATE_STATS_EVENTID: 9815 ath12k_update_stats_event(ab, skb); 9816 break; 9817 case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID: 9818 ath12k_pdev_ctl_failsafe_check_event(ab, skb); 9819 break; 9820 case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID: 9821 ath12k_wmi_pdev_csa_switch_count_status_event(ab, skb); 9822 break; 9823 case WMI_PDEV_TEMPERATURE_EVENTID: 9824 ath12k_wmi_pdev_temperature_event(ab, skb); 9825 break; 9826 case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID: 9827 ath12k_wmi_pdev_dma_ring_buf_release_event(ab, skb); 9828 break; 9829 case WMI_HOST_FILS_DISCOVERY_EVENTID: 9830 ath12k_fils_discovery_event(ab, skb); 9831 break; 9832 case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID: 9833 ath12k_probe_resp_tx_status_event(ab, skb); 9834 break; 9835 case WMI_RFKILL_STATE_CHANGE_EVENTID: 9836 ath12k_rfkill_state_change_event(ab, skb); 9837 break; 9838 case WMI_TWT_ENABLE_EVENTID: 9839 ath12k_wmi_twt_enable_event(ab, skb); 9840 break; 9841 case WMI_TWT_DISABLE_EVENTID: 9842 ath12k_wmi_twt_disable_event(ab, skb); 9843 break; 9844 case WMI_P2P_NOA_EVENTID: 9845 ath12k_wmi_p2p_noa_event(ab, skb); 9846 break; 9847 case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID: 9848 ath12k_wmi_pdev_dfs_radar_detected_event(ab, skb); 9849 break; 9850 case WMI_VDEV_DELETE_RESP_EVENTID: 9851 ath12k_vdev_delete_resp_event(ab, skb); 9852 break; 9853 case WMI_DIAG_EVENTID: 9854 ath12k_wmi_diag_event(ab, skb); 9855 break; 9856 case WMI_WOW_WAKEUP_HOST_EVENTID: 9857 ath12k_wmi_event_wow_wakeup_host(ab, skb); 9858 break; 9859 case WMI_GTK_OFFLOAD_STATUS_EVENTID: 9860 ath12k_wmi_gtk_offload_status_event(ab, skb); 9861 break; 9862 case WMI_MLO_SETUP_COMPLETE_EVENTID: 9863 ath12k_wmi_event_mlo_setup_complete(ab, skb); 9864 break; 9865 case WMI_MLO_TEARDOWN_COMPLETE_EVENTID: 9866 ath12k_wmi_event_teardown_complete(ab, skb); 9867 break; 9868 case WMI_HALPHY_STATS_CTRL_PATH_EVENTID: 9869 ath12k_wmi_process_tpc_stats(ab, skb); 9870 break; 9871 case WMI_11D_NEW_COUNTRY_EVENTID: 9872 ath12k_reg_11d_new_cc_event(ab, skb); 9873 break; 9874 case WMI_PDEV_RSSI_DBM_CONVERSION_PARAMS_INFO_EVENTID: 9875 ath12k_wmi_rssi_dbm_conversion_params_info_event(ab, skb); 9876 break; 9877 /* add Unsupported events (rare) here */ 9878 case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID: 9879 case WMI_PEER_OPER_MODE_CHANGE_EVENTID: 9880 case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID: 9881 ath12k_dbg(ab, ATH12K_DBG_WMI, 9882 "ignoring unsupported event 0x%x\n", id); 9883 break; 9884 /* add Unsupported events (frequent) here */ 9885 case WMI_PDEV_GET_HALPHY_CAL_STATUS_EVENTID: 9886 case WMI_MGMT_RX_FW_CONSUMED_EVENTID: 9887 case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID: 9888 /* debug might flood hence silently ignore (no-op) */ 9889 break; 9890 case WMI_PDEV_UTF_EVENTID: 9891 if (test_bit(ATH12K_FLAG_FTM_SEGMENTED, &ab->dev_flags)) 9892 ath12k_tm_wmi_event_segmented(ab, id, skb); 9893 else 9894 ath12k_tm_wmi_event_unsegmented(ab, id, skb); 9895 break; 9896 default: 9897 ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id); 9898 break; 9899 } 9900 9901 out: 9902 dev_kfree_skb(skb); 9903 } 9904 9905 static int ath12k_connect_pdev_htc_service(struct ath12k_base *ab, 9906 u32 pdev_idx) 9907 { 9908 int status; 9909 static const u32 svc_id[] = { 9910 ATH12K_HTC_SVC_ID_WMI_CONTROL, 9911 ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1, 9912 ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2 9913 }; 9914 struct ath12k_htc_svc_conn_req conn_req = {}; 9915 struct ath12k_htc_svc_conn_resp conn_resp = {}; 9916 9917 /* these fields are the same for all service endpoints */ 9918 conn_req.ep_ops.ep_tx_complete = ath12k_wmi_htc_tx_complete; 9919 conn_req.ep_ops.ep_rx_complete = ath12k_wmi_op_rx; 9920 conn_req.ep_ops.ep_tx_credits = ath12k_wmi_op_ep_tx_credits; 9921 9922 /* connect to control service */ 9923 conn_req.service_id = svc_id[pdev_idx]; 9924 9925 status = ath12k_htc_connect_service(&ab->htc, &conn_req, &conn_resp); 9926 if (status) { 9927 ath12k_warn(ab, "failed to connect to WMI CONTROL service status: %d\n", 9928 status); 9929 return status; 9930 } 9931 9932 ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid; 9933 ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid; 9934 ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len; 9935 9936 return 0; 9937 } 9938 9939 static int 9940 ath12k_wmi_send_unit_test_cmd(struct ath12k *ar, 9941 struct wmi_unit_test_cmd ut_cmd, 9942 u32 *test_args) 9943 { 9944 struct ath12k_wmi_pdev *wmi = ar->wmi; 9945 struct wmi_unit_test_cmd *cmd; 9946 struct sk_buff *skb; 9947 struct wmi_tlv *tlv; 9948 void *ptr; 9949 u32 *ut_cmd_args; 9950 int buf_len, arg_len; 9951 int ret; 9952 int i; 9953 9954 arg_len = sizeof(u32) * le32_to_cpu(ut_cmd.num_args); 9955 buf_len = sizeof(ut_cmd) + arg_len + TLV_HDR_SIZE; 9956 9957 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len); 9958 if (!skb) 9959 return -ENOMEM; 9960 9961 cmd = (struct wmi_unit_test_cmd *)skb->data; 9962 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_UNIT_TEST_CMD, 9963 sizeof(ut_cmd)); 9964 9965 cmd->vdev_id = ut_cmd.vdev_id; 9966 cmd->module_id = ut_cmd.module_id; 9967 cmd->num_args = ut_cmd.num_args; 9968 cmd->diag_token = ut_cmd.diag_token; 9969 9970 ptr = skb->data + sizeof(ut_cmd); 9971 9972 tlv = ptr; 9973 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len); 9974 9975 ptr += TLV_HDR_SIZE; 9976 9977 ut_cmd_args = ptr; 9978 for (i = 0; i < le32_to_cpu(ut_cmd.num_args); i++) 9979 ut_cmd_args[i] = test_args[i]; 9980 9981 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 9982 "WMI unit test : module %d vdev %d n_args %d token %d\n", 9983 cmd->module_id, cmd->vdev_id, cmd->num_args, 9984 cmd->diag_token); 9985 9986 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_UNIT_TEST_CMDID); 9987 9988 if (ret) { 9989 ath12k_warn(ar->ab, "failed to send WMI_UNIT_TEST CMD :%d\n", 9990 ret); 9991 dev_kfree_skb(skb); 9992 } 9993 9994 return ret; 9995 } 9996 9997 int ath12k_wmi_simulate_radar(struct ath12k *ar) 9998 { 9999 struct ath12k_link_vif *arvif; 10000 u32 dfs_args[DFS_MAX_TEST_ARGS]; 10001 struct wmi_unit_test_cmd wmi_ut; 10002 bool arvif_found = false; 10003 10004 list_for_each_entry(arvif, &ar->arvifs, list) { 10005 if (arvif->is_started && arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP) { 10006 arvif_found = true; 10007 break; 10008 } 10009 } 10010 10011 if (!arvif_found) 10012 return -EINVAL; 10013 10014 dfs_args[DFS_TEST_CMDID] = 0; 10015 dfs_args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id; 10016 /* Currently we could pass segment_id(b0 - b1), chirp(b2) 10017 * freq offset (b3 - b10) to unit test. For simulation 10018 * purpose this can be set to 0 which is valid. 10019 */ 10020 dfs_args[DFS_TEST_RADAR_PARAM] = 0; 10021 10022 wmi_ut.vdev_id = cpu_to_le32(arvif->vdev_id); 10023 wmi_ut.module_id = cpu_to_le32(DFS_UNIT_TEST_MODULE); 10024 wmi_ut.num_args = cpu_to_le32(DFS_MAX_TEST_ARGS); 10025 wmi_ut.diag_token = cpu_to_le32(DFS_UNIT_TEST_TOKEN); 10026 10027 ath12k_dbg(ar->ab, ATH12K_DBG_REG, "Triggering Radar Simulation\n"); 10028 10029 return ath12k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args); 10030 } 10031 10032 int ath12k_wmi_send_tpc_stats_request(struct ath12k *ar, 10033 enum wmi_halphy_ctrl_path_stats_id tpc_stats_type) 10034 { 10035 struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params *cmd; 10036 struct ath12k_wmi_pdev *wmi = ar->wmi; 10037 struct sk_buff *skb; 10038 struct wmi_tlv *tlv; 10039 __le32 *pdev_id; 10040 u32 buf_len; 10041 void *ptr; 10042 int ret; 10043 10044 buf_len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(u32) + TLV_HDR_SIZE + TLV_HDR_SIZE; 10045 10046 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len); 10047 if (!skb) 10048 return -ENOMEM; 10049 cmd = (struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params *)skb->data; 10050 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HALPHY_CTRL_PATH_CMD_FIXED_PARAM, 10051 sizeof(*cmd)); 10052 10053 cmd->stats_id_mask = cpu_to_le32(WMI_REQ_CTRL_PATH_PDEV_TX_STAT); 10054 cmd->action = cpu_to_le32(WMI_REQUEST_CTRL_PATH_STAT_GET); 10055 cmd->subid = cpu_to_le32(tpc_stats_type); 10056 10057 ptr = skb->data + sizeof(*cmd); 10058 10059 /* The below TLV arrays optionally follow this fixed param TLV structure 10060 * 1. ARRAY_UINT32 pdev_ids[] 10061 * If this array is present and non-zero length, stats should only 10062 * be provided from the pdevs identified in the array. 10063 * 2. ARRAY_UNIT32 vdev_ids[] 10064 * If this array is present and non-zero length, stats should only 10065 * be provided from the vdevs identified in the array. 10066 * 3. ath12k_wmi_mac_addr_params peer_macaddr[]; 10067 * If this array is present and non-zero length, stats should only 10068 * be provided from the peers with the MAC addresses specified 10069 * in the array 10070 */ 10071 tlv = ptr; 10072 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32)); 10073 ptr += TLV_HDR_SIZE; 10074 10075 pdev_id = ptr; 10076 *pdev_id = cpu_to_le32(ath12k_mac_get_target_pdev_id(ar)); 10077 ptr += sizeof(*pdev_id); 10078 10079 tlv = ptr; 10080 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0); 10081 ptr += TLV_HDR_SIZE; 10082 10083 tlv = ptr; 10084 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, 0); 10085 ptr += TLV_HDR_SIZE; 10086 10087 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_REQUEST_HALPHY_CTRL_PATH_STATS_CMDID); 10088 if (ret) { 10089 ath12k_warn(ar->ab, 10090 "failed to submit WMI_REQUEST_STATS_CTRL_PATH_CMDID\n"); 10091 dev_kfree_skb(skb); 10092 return ret; 10093 } 10094 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI get TPC STATS sent on pdev %d\n", 10095 ar->pdev->pdev_id); 10096 10097 return ret; 10098 } 10099 10100 int ath12k_wmi_connect(struct ath12k_base *ab) 10101 { 10102 u32 i; 10103 u8 wmi_ep_count; 10104 10105 wmi_ep_count = ab->htc.wmi_ep_count; 10106 if (wmi_ep_count > ab->hw_params->max_radios) 10107 return -1; 10108 10109 for (i = 0; i < wmi_ep_count; i++) 10110 ath12k_connect_pdev_htc_service(ab, i); 10111 10112 return 0; 10113 } 10114 10115 static void ath12k_wmi_pdev_detach(struct ath12k_base *ab, u8 pdev_id) 10116 { 10117 if (WARN_ON(pdev_id >= MAX_RADIOS)) 10118 return; 10119 10120 /* TODO: Deinit any pdev specific wmi resource */ 10121 } 10122 10123 int ath12k_wmi_pdev_attach(struct ath12k_base *ab, 10124 u8 pdev_id) 10125 { 10126 struct ath12k_wmi_pdev *wmi_handle; 10127 10128 if (pdev_id >= ab->hw_params->max_radios) 10129 return -EINVAL; 10130 10131 wmi_handle = &ab->wmi_ab.wmi[pdev_id]; 10132 10133 wmi_handle->wmi_ab = &ab->wmi_ab; 10134 10135 ab->wmi_ab.ab = ab; 10136 /* TODO: Init remaining resource specific to pdev */ 10137 10138 return 0; 10139 } 10140 10141 int ath12k_wmi_attach(struct ath12k_base *ab) 10142 { 10143 int ret; 10144 10145 ret = ath12k_wmi_pdev_attach(ab, 0); 10146 if (ret) 10147 return ret; 10148 10149 ab->wmi_ab.ab = ab; 10150 ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX; 10151 10152 /* It's overwritten when service_ext_ready is handled */ 10153 if (ab->hw_params->single_pdev_only) 10154 ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE; 10155 10156 /* TODO: Init remaining wmi soc resources required */ 10157 init_completion(&ab->wmi_ab.service_ready); 10158 init_completion(&ab->wmi_ab.unified_ready); 10159 10160 return 0; 10161 } 10162 10163 void ath12k_wmi_detach(struct ath12k_base *ab) 10164 { 10165 int i; 10166 10167 /* TODO: Deinit wmi resource specific to SOC as required */ 10168 10169 for (i = 0; i < ab->htc.wmi_ep_count; i++) 10170 ath12k_wmi_pdev_detach(ab, i); 10171 10172 ath12k_wmi_free_dbring_caps(ab); 10173 } 10174 10175 int ath12k_wmi_hw_data_filter_cmd(struct ath12k *ar, struct wmi_hw_data_filter_arg *arg) 10176 { 10177 struct wmi_hw_data_filter_cmd *cmd; 10178 struct sk_buff *skb; 10179 int len; 10180 10181 len = sizeof(*cmd); 10182 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10183 10184 if (!skb) 10185 return -ENOMEM; 10186 10187 cmd = (struct wmi_hw_data_filter_cmd *)skb->data; 10188 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HW_DATA_FILTER_CMD, 10189 sizeof(*cmd)); 10190 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 10191 cmd->enable = cpu_to_le32(arg->enable ? 1 : 0); 10192 10193 /* Set all modes in case of disable */ 10194 if (arg->enable) 10195 cmd->hw_filter_bitmap = cpu_to_le32(arg->hw_filter_bitmap); 10196 else 10197 cmd->hw_filter_bitmap = cpu_to_le32((u32)~0U); 10198 10199 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 10200 "wmi hw data filter enable %d filter_bitmap 0x%x\n", 10201 arg->enable, arg->hw_filter_bitmap); 10202 10203 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID); 10204 } 10205 10206 int ath12k_wmi_wow_host_wakeup_ind(struct ath12k *ar) 10207 { 10208 struct wmi_wow_host_wakeup_cmd *cmd; 10209 struct sk_buff *skb; 10210 size_t len; 10211 10212 len = sizeof(*cmd); 10213 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10214 if (!skb) 10215 return -ENOMEM; 10216 10217 cmd = (struct wmi_wow_host_wakeup_cmd *)skb->data; 10218 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD, 10219 sizeof(*cmd)); 10220 10221 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow host wakeup ind\n"); 10222 10223 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID); 10224 } 10225 10226 int ath12k_wmi_wow_enable(struct ath12k *ar) 10227 { 10228 struct wmi_wow_enable_cmd *cmd; 10229 struct sk_buff *skb; 10230 int len; 10231 10232 len = sizeof(*cmd); 10233 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10234 if (!skb) 10235 return -ENOMEM; 10236 10237 cmd = (struct wmi_wow_enable_cmd *)skb->data; 10238 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ENABLE_CMD, 10239 sizeof(*cmd)); 10240 10241 cmd->enable = cpu_to_le32(1); 10242 cmd->pause_iface_config = cpu_to_le32(WOW_IFACE_PAUSE_ENABLED); 10243 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow enable\n"); 10244 10245 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID); 10246 } 10247 10248 int ath12k_wmi_wow_add_wakeup_event(struct ath12k *ar, u32 vdev_id, 10249 enum wmi_wow_wakeup_event event, 10250 u32 enable) 10251 { 10252 struct wmi_wow_add_del_event_cmd *cmd; 10253 struct sk_buff *skb; 10254 size_t len; 10255 10256 len = sizeof(*cmd); 10257 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10258 if (!skb) 10259 return -ENOMEM; 10260 10261 cmd = (struct wmi_wow_add_del_event_cmd *)skb->data; 10262 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_DEL_EVT_CMD, 10263 sizeof(*cmd)); 10264 cmd->vdev_id = cpu_to_le32(vdev_id); 10265 cmd->is_add = cpu_to_le32(enable); 10266 cmd->event_bitmap = cpu_to_le32((1 << event)); 10267 10268 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n", 10269 wow_wakeup_event(event), enable, vdev_id); 10270 10271 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID); 10272 } 10273 10274 int ath12k_wmi_wow_add_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id, 10275 const u8 *pattern, const u8 *mask, 10276 int pattern_len, int pattern_offset) 10277 { 10278 struct wmi_wow_add_pattern_cmd *cmd; 10279 struct wmi_wow_bitmap_pattern_params *bitmap; 10280 struct wmi_tlv *tlv; 10281 struct sk_buff *skb; 10282 void *ptr; 10283 size_t len; 10284 10285 len = sizeof(*cmd) + 10286 sizeof(*tlv) + /* array struct */ 10287 sizeof(*bitmap) + /* bitmap */ 10288 sizeof(*tlv) + /* empty ipv4 sync */ 10289 sizeof(*tlv) + /* empty ipv6 sync */ 10290 sizeof(*tlv) + /* empty magic */ 10291 sizeof(*tlv) + /* empty info timeout */ 10292 sizeof(*tlv) + sizeof(u32); /* ratelimit interval */ 10293 10294 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10295 if (!skb) 10296 return -ENOMEM; 10297 10298 /* cmd */ 10299 ptr = skb->data; 10300 cmd = ptr; 10301 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_PATTERN_CMD, 10302 sizeof(*cmd)); 10303 cmd->vdev_id = cpu_to_le32(vdev_id); 10304 cmd->pattern_id = cpu_to_le32(pattern_id); 10305 cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN); 10306 10307 ptr += sizeof(*cmd); 10308 10309 /* bitmap */ 10310 tlv = ptr; 10311 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, sizeof(*bitmap)); 10312 10313 ptr += sizeof(*tlv); 10314 10315 bitmap = ptr; 10316 bitmap->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_BITMAP_PATTERN_T, 10317 sizeof(*bitmap)); 10318 memcpy(bitmap->patternbuf, pattern, pattern_len); 10319 memcpy(bitmap->bitmaskbuf, mask, pattern_len); 10320 bitmap->pattern_offset = cpu_to_le32(pattern_offset); 10321 bitmap->pattern_len = cpu_to_le32(pattern_len); 10322 bitmap->bitmask_len = cpu_to_le32(pattern_len); 10323 bitmap->pattern_id = cpu_to_le32(pattern_id); 10324 10325 ptr += sizeof(*bitmap); 10326 10327 /* ipv4 sync */ 10328 tlv = ptr; 10329 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0); 10330 10331 ptr += sizeof(*tlv); 10332 10333 /* ipv6 sync */ 10334 tlv = ptr; 10335 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0); 10336 10337 ptr += sizeof(*tlv); 10338 10339 /* magic */ 10340 tlv = ptr; 10341 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0); 10342 10343 ptr += sizeof(*tlv); 10344 10345 /* pattern info timeout */ 10346 tlv = ptr; 10347 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0); 10348 10349 ptr += sizeof(*tlv); 10350 10351 /* ratelimit interval */ 10352 tlv = ptr; 10353 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32)); 10354 10355 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d pattern_len %d\n", 10356 vdev_id, pattern_id, pattern_offset, pattern_len); 10357 10358 ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow pattern: ", 10359 bitmap->patternbuf, pattern_len); 10360 ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow bitmask: ", 10361 bitmap->bitmaskbuf, pattern_len); 10362 10363 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID); 10364 } 10365 10366 int ath12k_wmi_wow_del_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id) 10367 { 10368 struct wmi_wow_del_pattern_cmd *cmd; 10369 struct sk_buff *skb; 10370 size_t len; 10371 10372 len = sizeof(*cmd); 10373 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10374 if (!skb) 10375 return -ENOMEM; 10376 10377 cmd = (struct wmi_wow_del_pattern_cmd *)skb->data; 10378 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_DEL_PATTERN_CMD, 10379 sizeof(*cmd)); 10380 cmd->vdev_id = cpu_to_le32(vdev_id); 10381 cmd->pattern_id = cpu_to_le32(pattern_id); 10382 cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN); 10383 10384 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n", 10385 vdev_id, pattern_id); 10386 10387 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID); 10388 } 10389 10390 static struct sk_buff * 10391 ath12k_wmi_op_gen_config_pno_start(struct ath12k *ar, u32 vdev_id, 10392 struct wmi_pno_scan_req_arg *pno) 10393 { 10394 struct nlo_configured_params *nlo_list; 10395 size_t len, nlo_list_len, channel_list_len; 10396 struct wmi_wow_nlo_config_cmd *cmd; 10397 __le32 *channel_list; 10398 struct wmi_tlv *tlv; 10399 struct sk_buff *skb; 10400 void *ptr; 10401 u32 i; 10402 10403 len = sizeof(*cmd) + 10404 sizeof(*tlv) + 10405 /* TLV place holder for array of structures 10406 * nlo_configured_params(nlo_list) 10407 */ 10408 sizeof(*tlv); 10409 /* TLV place holder for array of uint32 channel_list */ 10410 10411 channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count; 10412 len += channel_list_len; 10413 10414 nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count; 10415 len += nlo_list_len; 10416 10417 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10418 if (!skb) 10419 return ERR_PTR(-ENOMEM); 10420 10421 ptr = skb->data; 10422 cmd = ptr; 10423 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, sizeof(*cmd)); 10424 10425 cmd->vdev_id = cpu_to_le32(pno->vdev_id); 10426 cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN); 10427 10428 /* current FW does not support min-max range for dwell time */ 10429 cmd->active_dwell_time = cpu_to_le32(pno->active_max_time); 10430 cmd->passive_dwell_time = cpu_to_le32(pno->passive_max_time); 10431 10432 if (pno->do_passive_scan) 10433 cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE); 10434 10435 cmd->fast_scan_period = cpu_to_le32(pno->fast_scan_period); 10436 cmd->slow_scan_period = cpu_to_le32(pno->slow_scan_period); 10437 cmd->fast_scan_max_cycles = cpu_to_le32(pno->fast_scan_max_cycles); 10438 cmd->delay_start_time = cpu_to_le32(pno->delay_start_time); 10439 10440 if (pno->enable_pno_scan_randomization) { 10441 cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ | 10442 WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ); 10443 ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr); 10444 ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask); 10445 } 10446 10447 ptr += sizeof(*cmd); 10448 10449 /* nlo_configured_params(nlo_list) */ 10450 cmd->no_of_ssids = cpu_to_le32(pno->uc_networks_count); 10451 tlv = ptr; 10452 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, nlo_list_len); 10453 10454 ptr += sizeof(*tlv); 10455 nlo_list = ptr; 10456 for (i = 0; i < pno->uc_networks_count; i++) { 10457 tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header); 10458 tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE, 10459 sizeof(*nlo_list)); 10460 10461 nlo_list[i].ssid.valid = cpu_to_le32(1); 10462 nlo_list[i].ssid.ssid.ssid_len = 10463 cpu_to_le32(pno->a_networks[i].ssid.ssid_len); 10464 memcpy(nlo_list[i].ssid.ssid.ssid, 10465 pno->a_networks[i].ssid.ssid, 10466 le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len)); 10467 10468 if (pno->a_networks[i].rssi_threshold && 10469 pno->a_networks[i].rssi_threshold > -300) { 10470 nlo_list[i].rssi_cond.valid = cpu_to_le32(1); 10471 nlo_list[i].rssi_cond.rssi = 10472 cpu_to_le32(pno->a_networks[i].rssi_threshold); 10473 } 10474 10475 nlo_list[i].bcast_nw_type.valid = cpu_to_le32(1); 10476 nlo_list[i].bcast_nw_type.bcast_nw_type = 10477 cpu_to_le32(pno->a_networks[i].bcast_nw_type); 10478 } 10479 10480 ptr += nlo_list_len; 10481 cmd->num_of_channels = cpu_to_le32(pno->a_networks[0].channel_count); 10482 tlv = ptr; 10483 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, channel_list_len); 10484 ptr += sizeof(*tlv); 10485 channel_list = ptr; 10486 10487 for (i = 0; i < pno->a_networks[0].channel_count; i++) 10488 channel_list[i] = cpu_to_le32(pno->a_networks[0].channels[i]); 10489 10490 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n", 10491 vdev_id); 10492 10493 return skb; 10494 } 10495 10496 static struct sk_buff *ath12k_wmi_op_gen_config_pno_stop(struct ath12k *ar, 10497 u32 vdev_id) 10498 { 10499 struct wmi_wow_nlo_config_cmd *cmd; 10500 struct sk_buff *skb; 10501 size_t len; 10502 10503 len = sizeof(*cmd); 10504 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10505 if (!skb) 10506 return ERR_PTR(-ENOMEM); 10507 10508 cmd = (struct wmi_wow_nlo_config_cmd *)skb->data; 10509 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, len); 10510 10511 cmd->vdev_id = cpu_to_le32(vdev_id); 10512 cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_STOP); 10513 10514 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 10515 "wmi tlv stop pno config vdev_id %d\n", vdev_id); 10516 return skb; 10517 } 10518 10519 int ath12k_wmi_wow_config_pno(struct ath12k *ar, u32 vdev_id, 10520 struct wmi_pno_scan_req_arg *pno_scan) 10521 { 10522 struct sk_buff *skb; 10523 10524 if (pno_scan->enable) 10525 skb = ath12k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan); 10526 else 10527 skb = ath12k_wmi_op_gen_config_pno_stop(ar, vdev_id); 10528 10529 if (IS_ERR_OR_NULL(skb)) 10530 return -ENOMEM; 10531 10532 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID); 10533 } 10534 10535 static void ath12k_wmi_fill_ns_offload(struct ath12k *ar, 10536 struct wmi_arp_ns_offload_arg *offload, 10537 void **ptr, 10538 bool enable, 10539 bool ext) 10540 { 10541 struct wmi_ns_offload_params *ns; 10542 struct wmi_tlv *tlv; 10543 void *buf_ptr = *ptr; 10544 u32 ns_cnt, ns_ext_tuples; 10545 int i, max_offloads; 10546 10547 ns_cnt = offload->ipv6_count; 10548 10549 tlv = buf_ptr; 10550 10551 if (ext) { 10552 ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS; 10553 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 10554 ns_ext_tuples * sizeof(*ns)); 10555 i = WMI_MAX_NS_OFFLOADS; 10556 max_offloads = offload->ipv6_count; 10557 } else { 10558 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 10559 WMI_MAX_NS_OFFLOADS * sizeof(*ns)); 10560 i = 0; 10561 max_offloads = WMI_MAX_NS_OFFLOADS; 10562 } 10563 10564 buf_ptr += sizeof(*tlv); 10565 10566 for (; i < max_offloads; i++) { 10567 ns = buf_ptr; 10568 ns->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NS_OFFLOAD_TUPLE, 10569 sizeof(*ns)); 10570 10571 if (enable) { 10572 if (i < ns_cnt) 10573 ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_VALID); 10574 10575 memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16); 10576 memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16); 10577 10578 if (offload->ipv6_type[i]) 10579 ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_IS_IPV6_ANYCAST); 10580 10581 memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN); 10582 10583 if (!is_zero_ether_addr(ns->target_mac.addr)) 10584 ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_MAC_VALID); 10585 10586 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 10587 "wmi index %d ns_solicited %pI6 target %pI6", 10588 i, ns->solicitation_ipaddr, 10589 ns->target_ipaddr[0]); 10590 } 10591 10592 buf_ptr += sizeof(*ns); 10593 } 10594 10595 *ptr = buf_ptr; 10596 } 10597 10598 static void ath12k_wmi_fill_arp_offload(struct ath12k *ar, 10599 struct wmi_arp_ns_offload_arg *offload, 10600 void **ptr, 10601 bool enable) 10602 { 10603 struct wmi_arp_offload_params *arp; 10604 struct wmi_tlv *tlv; 10605 void *buf_ptr = *ptr; 10606 int i; 10607 10608 /* fill arp tuple */ 10609 tlv = buf_ptr; 10610 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 10611 WMI_MAX_ARP_OFFLOADS * sizeof(*arp)); 10612 buf_ptr += sizeof(*tlv); 10613 10614 for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) { 10615 arp = buf_ptr; 10616 arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARP_OFFLOAD_TUPLE, 10617 sizeof(*arp)); 10618 10619 if (enable && i < offload->ipv4_count) { 10620 /* Copy the target ip addr and flags */ 10621 arp->flags = cpu_to_le32(WMI_ARPOL_FLAGS_VALID); 10622 memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4); 10623 10624 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi arp offload address %pI4", 10625 arp->target_ipaddr); 10626 } 10627 10628 buf_ptr += sizeof(*arp); 10629 } 10630 10631 *ptr = buf_ptr; 10632 } 10633 10634 int ath12k_wmi_arp_ns_offload(struct ath12k *ar, 10635 struct ath12k_link_vif *arvif, 10636 struct wmi_arp_ns_offload_arg *offload, 10637 bool enable) 10638 { 10639 struct wmi_set_arp_ns_offload_cmd *cmd; 10640 struct wmi_tlv *tlv; 10641 struct sk_buff *skb; 10642 void *buf_ptr; 10643 size_t len; 10644 u8 ns_cnt, ns_ext_tuples = 0; 10645 10646 ns_cnt = offload->ipv6_count; 10647 10648 len = sizeof(*cmd) + 10649 sizeof(*tlv) + 10650 WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_params) + 10651 sizeof(*tlv) + 10652 WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_params); 10653 10654 if (ns_cnt > WMI_MAX_NS_OFFLOADS) { 10655 ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS; 10656 len += sizeof(*tlv) + 10657 ns_ext_tuples * sizeof(struct wmi_ns_offload_params); 10658 } 10659 10660 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10661 if (!skb) 10662 return -ENOMEM; 10663 10664 buf_ptr = skb->data; 10665 cmd = buf_ptr; 10666 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_ARP_NS_OFFLOAD_CMD, 10667 sizeof(*cmd)); 10668 cmd->flags = cpu_to_le32(0); 10669 cmd->vdev_id = cpu_to_le32(arvif->vdev_id); 10670 cmd->num_ns_ext_tuples = cpu_to_le32(ns_ext_tuples); 10671 10672 buf_ptr += sizeof(*cmd); 10673 10674 ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0); 10675 ath12k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable); 10676 10677 if (ns_ext_tuples) 10678 ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1); 10679 10680 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID); 10681 } 10682 10683 int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar, 10684 struct ath12k_link_vif *arvif, bool enable) 10685 { 10686 struct ath12k_rekey_data *rekey_data = &arvif->rekey_data; 10687 struct wmi_gtk_rekey_offload_cmd *cmd; 10688 struct sk_buff *skb; 10689 __le64 replay_ctr; 10690 int len; 10691 10692 len = sizeof(*cmd); 10693 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10694 if (!skb) 10695 return -ENOMEM; 10696 10697 cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data; 10698 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd)); 10699 cmd->vdev_id = cpu_to_le32(arvif->vdev_id); 10700 10701 if (enable) { 10702 cmd->flags = cpu_to_le32(GTK_OFFLOAD_ENABLE_OPCODE); 10703 10704 /* the length in rekey_data and cmd is equal */ 10705 memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck)); 10706 memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek)); 10707 10708 replay_ctr = cpu_to_le64(rekey_data->replay_ctr); 10709 memcpy(cmd->replay_ctr, &replay_ctr, 10710 sizeof(replay_ctr)); 10711 } else { 10712 cmd->flags = cpu_to_le32(GTK_OFFLOAD_DISABLE_OPCODE); 10713 } 10714 10715 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "offload gtk rekey vdev: %d %d\n", 10716 arvif->vdev_id, enable); 10717 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID); 10718 } 10719 10720 int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar, 10721 struct ath12k_link_vif *arvif) 10722 { 10723 struct wmi_gtk_rekey_offload_cmd *cmd; 10724 struct sk_buff *skb; 10725 int len; 10726 10727 len = sizeof(*cmd); 10728 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10729 if (!skb) 10730 return -ENOMEM; 10731 10732 cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data; 10733 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd)); 10734 cmd->vdev_id = cpu_to_le32(arvif->vdev_id); 10735 cmd->flags = cpu_to_le32(GTK_OFFLOAD_REQUEST_STATUS_OPCODE); 10736 10737 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "get gtk rekey vdev_id: %d\n", 10738 arvif->vdev_id); 10739 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID); 10740 } 10741 10742 int ath12k_wmi_sta_keepalive(struct ath12k *ar, 10743 const struct wmi_sta_keepalive_arg *arg) 10744 { 10745 struct wmi_sta_keepalive_arp_resp_params *arp; 10746 struct ath12k_wmi_pdev *wmi = ar->wmi; 10747 struct wmi_sta_keepalive_cmd *cmd; 10748 struct sk_buff *skb; 10749 size_t len; 10750 10751 len = sizeof(*cmd) + sizeof(*arp); 10752 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 10753 if (!skb) 10754 return -ENOMEM; 10755 10756 cmd = (struct wmi_sta_keepalive_cmd *)skb->data; 10757 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALIVE_CMD, sizeof(*cmd)); 10758 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 10759 cmd->enabled = cpu_to_le32(arg->enabled); 10760 cmd->interval = cpu_to_le32(arg->interval); 10761 cmd->method = cpu_to_le32(arg->method); 10762 10763 arp = (struct wmi_sta_keepalive_arp_resp_params *)(cmd + 1); 10764 arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALVE_ARP_RESPONSE, 10765 sizeof(*arp)); 10766 if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE || 10767 arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) { 10768 arp->src_ip4_addr = cpu_to_le32(arg->src_ip4_addr); 10769 arp->dest_ip4_addr = cpu_to_le32(arg->dest_ip4_addr); 10770 ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr); 10771 } 10772 10773 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 10774 "wmi sta keepalive vdev %d enabled %d method %d interval %d\n", 10775 arg->vdev_id, arg->enabled, arg->method, arg->interval); 10776 10777 return ath12k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID); 10778 } 10779 10780 int ath12k_wmi_mlo_setup(struct ath12k *ar, struct wmi_mlo_setup_arg *mlo_params) 10781 { 10782 struct wmi_mlo_setup_cmd *cmd; 10783 struct ath12k_wmi_pdev *wmi = ar->wmi; 10784 u32 *partner_links, num_links; 10785 int i, ret, buf_len, arg_len; 10786 struct sk_buff *skb; 10787 struct wmi_tlv *tlv; 10788 void *ptr; 10789 10790 num_links = mlo_params->num_partner_links; 10791 arg_len = num_links * sizeof(u32); 10792 buf_len = sizeof(*cmd) + TLV_HDR_SIZE + arg_len; 10793 10794 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len); 10795 if (!skb) 10796 return -ENOMEM; 10797 10798 cmd = (struct wmi_mlo_setup_cmd *)skb->data; 10799 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_SETUP_CMD, 10800 sizeof(*cmd)); 10801 cmd->mld_group_id = mlo_params->group_id; 10802 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); 10803 ptr = skb->data + sizeof(*cmd); 10804 10805 tlv = ptr; 10806 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len); 10807 ptr += TLV_HDR_SIZE; 10808 10809 partner_links = ptr; 10810 for (i = 0; i < num_links; i++) 10811 partner_links[i] = mlo_params->partner_link_id[i]; 10812 10813 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_SETUP_CMDID); 10814 if (ret) { 10815 ath12k_warn(ar->ab, "failed to submit WMI_MLO_SETUP_CMDID command: %d\n", 10816 ret); 10817 dev_kfree_skb(skb); 10818 return ret; 10819 } 10820 10821 return 0; 10822 } 10823 10824 int ath12k_wmi_mlo_ready(struct ath12k *ar) 10825 { 10826 struct wmi_mlo_ready_cmd *cmd; 10827 struct ath12k_wmi_pdev *wmi = ar->wmi; 10828 struct sk_buff *skb; 10829 int ret, len; 10830 10831 len = sizeof(*cmd); 10832 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 10833 if (!skb) 10834 return -ENOMEM; 10835 10836 cmd = (struct wmi_mlo_ready_cmd *)skb->data; 10837 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_READY_CMD, 10838 sizeof(*cmd)); 10839 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); 10840 10841 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_READY_CMDID); 10842 if (ret) { 10843 ath12k_warn(ar->ab, "failed to submit WMI_MLO_READY_CMDID command: %d\n", 10844 ret); 10845 dev_kfree_skb(skb); 10846 return ret; 10847 } 10848 10849 return 0; 10850 } 10851 10852 int ath12k_wmi_mlo_teardown(struct ath12k *ar) 10853 { 10854 struct wmi_mlo_teardown_cmd *cmd; 10855 struct ath12k_wmi_pdev *wmi = ar->wmi; 10856 struct sk_buff *skb; 10857 int ret, len; 10858 10859 len = sizeof(*cmd); 10860 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 10861 if (!skb) 10862 return -ENOMEM; 10863 10864 cmd = (struct wmi_mlo_teardown_cmd *)skb->data; 10865 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_TEARDOWN_CMD, 10866 sizeof(*cmd)); 10867 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); 10868 cmd->reason_code = WMI_MLO_TEARDOWN_SSR_REASON; 10869 10870 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_TEARDOWN_CMDID); 10871 if (ret) { 10872 ath12k_warn(ar->ab, "failed to submit WMI MLO teardown command: %d\n", 10873 ret); 10874 dev_kfree_skb(skb); 10875 return ret; 10876 } 10877 10878 return 0; 10879 } 10880 10881 bool ath12k_wmi_supports_6ghz_cc_ext(struct ath12k *ar) 10882 { 10883 return test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT, 10884 ar->ab->wmi_ab.svc_map) && ar->supports_6ghz; 10885 } 10886 10887 int ath12k_wmi_send_vdev_set_tpc_power(struct ath12k *ar, 10888 u32 vdev_id, 10889 struct ath12k_reg_tpc_power_info *param) 10890 { 10891 struct wmi_vdev_set_tpc_power_cmd *cmd; 10892 struct ath12k_wmi_pdev *wmi = ar->wmi; 10893 struct wmi_vdev_ch_power_params *ch; 10894 int i, ret, len, array_len; 10895 struct sk_buff *skb; 10896 struct wmi_tlv *tlv; 10897 u8 *ptr; 10898 10899 array_len = sizeof(*ch) * param->num_pwr_levels; 10900 len = sizeof(*cmd) + TLV_HDR_SIZE + array_len; 10901 10902 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 10903 if (!skb) 10904 return -ENOMEM; 10905 10906 ptr = skb->data; 10907 10908 cmd = (struct wmi_vdev_set_tpc_power_cmd *)ptr; 10909 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_TPC_POWER_CMD, 10910 sizeof(*cmd)); 10911 cmd->vdev_id = cpu_to_le32(vdev_id); 10912 cmd->psd_power = cpu_to_le32(param->is_psd_power); 10913 cmd->eirp_power = cpu_to_le32(param->eirp_power); 10914 cmd->power_type_6ghz = cpu_to_le32(param->ap_power_type); 10915 10916 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 10917 "tpc vdev id %d is psd power %d eirp power %d 6 ghz power type %d\n", 10918 vdev_id, param->is_psd_power, param->eirp_power, param->ap_power_type); 10919 10920 ptr += sizeof(*cmd); 10921 tlv = (struct wmi_tlv *)ptr; 10922 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, array_len); 10923 10924 ptr += TLV_HDR_SIZE; 10925 ch = (struct wmi_vdev_ch_power_params *)ptr; 10926 10927 for (i = 0; i < param->num_pwr_levels; i++, ch++) { 10928 ch->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CH_POWER_INFO, 10929 sizeof(*ch)); 10930 ch->chan_cfreq = cpu_to_le32(param->chan_power_info[i].chan_cfreq); 10931 ch->tx_power = cpu_to_le32(param->chan_power_info[i].tx_power); 10932 10933 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "tpc chan freq %d TX power %d\n", 10934 ch->chan_cfreq, ch->tx_power); 10935 } 10936 10937 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_TPC_POWER_CMDID); 10938 if (ret) { 10939 ath12k_warn(ar->ab, "failed to send WMI_VDEV_SET_TPC_POWER_CMDID\n"); 10940 dev_kfree_skb(skb); 10941 return ret; 10942 } 10943 10944 return 0; 10945 } 10946 10947 static int 10948 ath12k_wmi_fill_disallowed_bmap(struct ath12k_base *ab, 10949 struct wmi_disallowed_mlo_mode_bitmap_params *dislw_bmap, 10950 struct wmi_mlo_link_set_active_arg *arg) 10951 { 10952 struct wmi_ml_disallow_mode_bmap_arg *dislw_bmap_arg; 10953 u8 i; 10954 10955 if (arg->num_disallow_mode_comb > 10956 ARRAY_SIZE(arg->disallow_bmap)) { 10957 ath12k_warn(ab, "invalid num_disallow_mode_comb: %d", 10958 arg->num_disallow_mode_comb); 10959 return -EINVAL; 10960 } 10961 10962 dislw_bmap_arg = &arg->disallow_bmap[0]; 10963 for (i = 0; i < arg->num_disallow_mode_comb; i++) { 10964 dislw_bmap->tlv_header = 10965 ath12k_wmi_tlv_cmd_hdr(0, sizeof(*dislw_bmap)); 10966 dislw_bmap->disallowed_mode_bitmap = 10967 cpu_to_le32(dislw_bmap_arg->disallowed_mode); 10968 dislw_bmap->ieee_link_id_comb = 10969 le32_encode_bits(dislw_bmap_arg->ieee_link_id[0], 10970 WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_1) | 10971 le32_encode_bits(dislw_bmap_arg->ieee_link_id[1], 10972 WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_2) | 10973 le32_encode_bits(dislw_bmap_arg->ieee_link_id[2], 10974 WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_3) | 10975 le32_encode_bits(dislw_bmap_arg->ieee_link_id[3], 10976 WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_4); 10977 10978 ath12k_dbg(ab, ATH12K_DBG_WMI, 10979 "entry %d disallowed_mode %d ieee_link_id_comb 0x%x", 10980 i, dislw_bmap_arg->disallowed_mode, 10981 dislw_bmap_arg->ieee_link_id_comb); 10982 dislw_bmap++; 10983 dislw_bmap_arg++; 10984 } 10985 10986 return 0; 10987 } 10988 10989 int ath12k_wmi_send_mlo_link_set_active_cmd(struct ath12k_base *ab, 10990 struct wmi_mlo_link_set_active_arg *arg) 10991 { 10992 struct wmi_disallowed_mlo_mode_bitmap_params *disallowed_mode_bmap; 10993 struct wmi_mlo_set_active_link_number_params *link_num_param; 10994 u32 num_link_num_param = 0, num_vdev_bitmap = 0; 10995 struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; 10996 struct wmi_mlo_link_set_active_cmd *cmd; 10997 u32 num_inactive_vdev_bitmap = 0; 10998 u32 num_disallow_mode_comb = 0; 10999 struct wmi_tlv *tlv; 11000 struct sk_buff *skb; 11001 __le32 *vdev_bitmap; 11002 void *buf_ptr; 11003 int i, ret; 11004 u32 len; 11005 11006 if (!arg->num_vdev_bitmap && !arg->num_link_entry) { 11007 ath12k_warn(ab, "Invalid num_vdev_bitmap and num_link_entry"); 11008 return -EINVAL; 11009 } 11010 11011 switch (arg->force_mode) { 11012 case WMI_MLO_LINK_FORCE_MODE_ACTIVE_LINK_NUM: 11013 case WMI_MLO_LINK_FORCE_MODE_INACTIVE_LINK_NUM: 11014 num_link_num_param = arg->num_link_entry; 11015 fallthrough; 11016 case WMI_MLO_LINK_FORCE_MODE_ACTIVE: 11017 case WMI_MLO_LINK_FORCE_MODE_INACTIVE: 11018 case WMI_MLO_LINK_FORCE_MODE_NO_FORCE: 11019 num_vdev_bitmap = arg->num_vdev_bitmap; 11020 break; 11021 case WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE: 11022 num_vdev_bitmap = arg->num_vdev_bitmap; 11023 num_inactive_vdev_bitmap = arg->num_inactive_vdev_bitmap; 11024 break; 11025 default: 11026 ath12k_warn(ab, "Invalid force mode: %u", arg->force_mode); 11027 return -EINVAL; 11028 } 11029 11030 num_disallow_mode_comb = arg->num_disallow_mode_comb; 11031 len = sizeof(*cmd) + 11032 TLV_HDR_SIZE + sizeof(*link_num_param) * num_link_num_param + 11033 TLV_HDR_SIZE + sizeof(*vdev_bitmap) * num_vdev_bitmap + 11034 TLV_HDR_SIZE + TLV_HDR_SIZE + TLV_HDR_SIZE + 11035 TLV_HDR_SIZE + sizeof(*disallowed_mode_bmap) * num_disallow_mode_comb; 11036 if (arg->force_mode == WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE) 11037 len += sizeof(*vdev_bitmap) * num_inactive_vdev_bitmap; 11038 11039 skb = ath12k_wmi_alloc_skb(wmi_ab, len); 11040 if (!skb) 11041 return -ENOMEM; 11042 11043 cmd = (struct wmi_mlo_link_set_active_cmd *)skb->data; 11044 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_LINK_SET_ACTIVE_CMD, 11045 sizeof(*cmd)); 11046 cmd->force_mode = cpu_to_le32(arg->force_mode); 11047 cmd->reason = cpu_to_le32(arg->reason); 11048 ath12k_dbg(ab, ATH12K_DBG_WMI, 11049 "mode %d reason %d num_link_num_param %d num_vdev_bitmap %d inactive %d num_disallow_mode_comb %d", 11050 arg->force_mode, arg->reason, num_link_num_param, 11051 num_vdev_bitmap, num_inactive_vdev_bitmap, 11052 num_disallow_mode_comb); 11053 11054 buf_ptr = skb->data + sizeof(*cmd); 11055 tlv = buf_ptr; 11056 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 11057 sizeof(*link_num_param) * num_link_num_param); 11058 buf_ptr += TLV_HDR_SIZE; 11059 11060 if (num_link_num_param) { 11061 cmd->ctrl_flags = 11062 le32_encode_bits(arg->ctrl_flags.dync_force_link_num ? 1 : 0, 11063 CRTL_F_DYNC_FORCE_LINK_NUM); 11064 11065 link_num_param = buf_ptr; 11066 for (i = 0; i < num_link_num_param; i++) { 11067 link_num_param->tlv_header = 11068 ath12k_wmi_tlv_cmd_hdr(0, sizeof(*link_num_param)); 11069 link_num_param->num_of_link = 11070 cpu_to_le32(arg->link_num[i].num_of_link); 11071 link_num_param->vdev_type = 11072 cpu_to_le32(arg->link_num[i].vdev_type); 11073 link_num_param->vdev_subtype = 11074 cpu_to_le32(arg->link_num[i].vdev_subtype); 11075 link_num_param->home_freq = 11076 cpu_to_le32(arg->link_num[i].home_freq); 11077 ath12k_dbg(ab, ATH12K_DBG_WMI, 11078 "entry %d num_of_link %d vdev type %d subtype %d freq %d control_flags %d", 11079 i, arg->link_num[i].num_of_link, 11080 arg->link_num[i].vdev_type, 11081 arg->link_num[i].vdev_subtype, 11082 arg->link_num[i].home_freq, 11083 __le32_to_cpu(cmd->ctrl_flags)); 11084 link_num_param++; 11085 } 11086 11087 buf_ptr += sizeof(*link_num_param) * num_link_num_param; 11088 } 11089 11090 tlv = buf_ptr; 11091 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 11092 sizeof(*vdev_bitmap) * num_vdev_bitmap); 11093 buf_ptr += TLV_HDR_SIZE; 11094 11095 if (num_vdev_bitmap) { 11096 vdev_bitmap = buf_ptr; 11097 for (i = 0; i < num_vdev_bitmap; i++) { 11098 vdev_bitmap[i] = cpu_to_le32(arg->vdev_bitmap[i]); 11099 ath12k_dbg(ab, ATH12K_DBG_WMI, "entry %d vdev_id_bitmap 0x%x", 11100 i, arg->vdev_bitmap[i]); 11101 } 11102 11103 buf_ptr += sizeof(*vdev_bitmap) * num_vdev_bitmap; 11104 } 11105 11106 if (arg->force_mode == WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE) { 11107 tlv = buf_ptr; 11108 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 11109 sizeof(*vdev_bitmap) * 11110 num_inactive_vdev_bitmap); 11111 buf_ptr += TLV_HDR_SIZE; 11112 11113 if (num_inactive_vdev_bitmap) { 11114 vdev_bitmap = buf_ptr; 11115 for (i = 0; i < num_inactive_vdev_bitmap; i++) { 11116 vdev_bitmap[i] = 11117 cpu_to_le32(arg->inactive_vdev_bitmap[i]); 11118 ath12k_dbg(ab, ATH12K_DBG_WMI, 11119 "entry %d inactive_vdev_id_bitmap 0x%x", 11120 i, arg->inactive_vdev_bitmap[i]); 11121 } 11122 11123 buf_ptr += sizeof(*vdev_bitmap) * num_inactive_vdev_bitmap; 11124 } 11125 } else { 11126 /* add empty vdev bitmap2 tlv */ 11127 tlv = buf_ptr; 11128 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0); 11129 buf_ptr += TLV_HDR_SIZE; 11130 } 11131 11132 /* add empty ieee_link_id_bitmap tlv */ 11133 tlv = buf_ptr; 11134 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0); 11135 buf_ptr += TLV_HDR_SIZE; 11136 11137 /* add empty ieee_link_id_bitmap2 tlv */ 11138 tlv = buf_ptr; 11139 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0); 11140 buf_ptr += TLV_HDR_SIZE; 11141 11142 tlv = buf_ptr; 11143 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 11144 sizeof(*disallowed_mode_bmap) * 11145 arg->num_disallow_mode_comb); 11146 buf_ptr += TLV_HDR_SIZE; 11147 11148 ret = ath12k_wmi_fill_disallowed_bmap(ab, buf_ptr, arg); 11149 if (ret) 11150 goto free_skb; 11151 11152 ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_MLO_LINK_SET_ACTIVE_CMDID); 11153 if (ret) { 11154 ath12k_warn(ab, 11155 "failed to send WMI_MLO_LINK_SET_ACTIVE_CMDID: %d\n", ret); 11156 goto free_skb; 11157 } 11158 11159 ath12k_dbg(ab, ATH12K_DBG_WMI, "WMI mlo link set active cmd"); 11160 11161 return ret; 11162 11163 free_skb: 11164 dev_kfree_skb(skb); 11165 return ret; 11166 } 11167